code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def inverse_transform(self, X):
"""Reverse the transformation operation.
Parameters
----------
X : array of shape [n_samples, n_selected_features]
The input samples.
Returns
-------
X_original : array of shape [n_samples, n_original_features]
`X` with columns of zeros inserted where features would have
been removed by :meth:`transform`.
"""
if issparse(X):
X = X.tocsc()
# insert additional entries in indptr:
# e.g. if transform changed indptr from [0 2 6 7] to [0 2 3]
# col_nonzeros here will be [2 0 1] so indptr becomes [0 2 2 3]
it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1))
col_nonzeros = it.ravel()
indptr = np.concatenate([[0], np.cumsum(col_nonzeros)])
Xt = csc_matrix(
(X.data, X.indices, indptr),
shape=(X.shape[0], len(indptr) - 1),
dtype=X.dtype,
)
return Xt
support = self.get_support()
X = check_array(X, dtype=None)
if support.sum() != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size), dtype=X.dtype)
Xt[:, support] = X
return Xt
|
Reverse the transformation operation.
Parameters
----------
X : array of shape [n_samples, n_selected_features]
The input samples.
Returns
-------
X_original : array of shape [n_samples, n_original_features]
`X` with columns of zeros inserted where features would have
been removed by :meth:`transform`.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_base.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Mask feature names according to selected features.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self)
input_features = _check_feature_names_in(self, input_features)
return input_features[self.get_support()]
|
Mask feature names according to selected features.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_base.py
|
BSD-3-Clause
|
def _get_feature_importances(estimator, getter, transform_func=None, norm_order=1):
"""
Retrieve and aggregate (ndim > 1) the feature importances
from an estimator. Also optionally applies transformation.
Parameters
----------
estimator : estimator
A scikit-learn estimator from which we want to get the feature
importances.
getter : "auto", str or callable
An attribute or a callable to get the feature importance. If `"auto"`,
`estimator` is expected to expose `coef_` or `feature_importances`.
transform_func : {"norm", "square"}, default=None
The transform to apply to the feature importances. By default (`None`)
no transformation is applied.
norm_order : int, default=1
The norm order to apply when `transform_func="norm"`. Only applied
when `importances.ndim > 1`.
Returns
-------
importances : ndarray of shape (n_features,)
The features importances, optionally transformed.
"""
if isinstance(getter, str):
if getter == "auto":
if hasattr(estimator, "coef_"):
getter = attrgetter("coef_")
elif hasattr(estimator, "feature_importances_"):
getter = attrgetter("feature_importances_")
else:
raise ValueError(
"when `importance_getter=='auto'`, the underlying "
f"estimator {estimator.__class__.__name__} should have "
"`coef_` or `feature_importances_` attribute. Either "
"pass a fitted estimator to feature selector or call fit "
"before calling transform."
)
else:
getter = attrgetter(getter)
elif not callable(getter):
raise ValueError("`importance_getter` has to be a string or `callable`")
importances = getter(estimator)
if transform_func is None:
return importances
elif transform_func == "norm":
if importances.ndim == 1:
importances = np.abs(importances)
else:
importances = np.linalg.norm(importances, axis=0, ord=norm_order)
elif transform_func == "square":
if importances.ndim == 1:
importances = safe_sqr(importances)
else:
importances = safe_sqr(importances).sum(axis=0)
else:
raise ValueError(
"Valid values for `transform_func` are "
"None, 'norm' and 'square'. Those two "
"transformation are only supported now"
)
return importances
|
Retrieve and aggregate (ndim > 1) the feature importances
from an estimator. Also optionally applies transformation.
Parameters
----------
estimator : estimator
A scikit-learn estimator from which we want to get the feature
importances.
getter : "auto", str or callable
An attribute or a callable to get the feature importance. If `"auto"`,
`estimator` is expected to expose `coef_` or `feature_importances`.
transform_func : {"norm", "square"}, default=None
The transform to apply to the feature importances. By default (`None`)
no transformation is applied.
norm_order : int, default=1
The norm order to apply when `transform_func="norm"`. Only applied
when `importances.ndim > 1`.
Returns
-------
importances : ndarray of shape (n_features,)
The features importances, optionally transformed.
|
_get_feature_importances
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_base.py
|
BSD-3-Clause
|
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,), default=None
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `fit` method of the sub-estimator. They are ignored if
`prefit=True`.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the sub-estimator. They are ignored if `prefit=True`.
.. versionchanged:: 1.4
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
self._check_max_features(X)
if self.prefit:
try:
check_is_fitted(self.estimator)
except NotFittedError as exc:
raise NotFittedError(
"When `prefit=True`, `estimator` is expected to be a fitted "
"estimator."
) from exc
self.estimator_ = deepcopy(self.estimator)
else:
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **routed_params.estimator.fit)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
if hasattr(self.estimator_, "feature_names_in_"):
self.feature_names_in_ = self.estimator_.feature_names_in_
else:
_check_feature_names(self, X, reset=True)
return self
|
Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,), default=None
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `fit` method of the sub-estimator. They are ignored if
`prefit=True`.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the sub-estimator. They are ignored if `prefit=True`.
.. versionchanged:: 1.4
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_from_model.py
|
BSD-3-Clause
|
def threshold_(self):
"""Threshold value used for feature selection."""
scores = _get_feature_importances(
estimator=self.estimator_,
getter=self.importance_getter,
transform_func="norm",
norm_order=self.norm_order,
)
return _calculate_threshold(self.estimator, scores, self.threshold)
|
Threshold value used for feature selection.
|
threshold_
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_from_model.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None, **partial_fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,), default=None
The target values (integers that correspond to classes in
classification, real numbers in regression).
**partial_fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `partial_fit` method of the sub-estimator.
- If `enable_metadata_routing=True`: Parameters passed to the `partial_fit`
method of the sub-estimator. They are ignored if `prefit=True`.
.. versionchanged:: 1.4
`**partial_fit_params` are routed to the sub-estimator, if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`, which allows for aliasing.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
first_call = not hasattr(self, "estimator_")
if first_call:
self._check_max_features(X)
if self.prefit:
if first_call:
try:
check_is_fitted(self.estimator)
except NotFittedError as exc:
raise NotFittedError(
"When `prefit=True`, `estimator` is expected to be a fitted "
"estimator."
) from exc
self.estimator_ = deepcopy(self.estimator)
return self
if first_call:
self.estimator_ = clone(self.estimator)
if _routing_enabled():
routed_params = process_routing(self, "partial_fit", **partial_fit_params)
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
self.estimator_.partial_fit(X, y, **partial_fit_params)
if hasattr(self.estimator_, "feature_names_in_"):
self.feature_names_in_ = self.estimator_.feature_names_in_
else:
_check_feature_names(self, X, reset=first_call)
return self
|
Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,), default=None
The target values (integers that correspond to classes in
classification, real numbers in regression).
**partial_fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `partial_fit` method of the sub-estimator.
- If `enable_metadata_routing=True`: Parameters passed to the `partial_fit`
method of the sub-estimator. They are ignored if `prefit=True`.
.. versionchanged:: 1.4
`**partial_fit_params` are routed to the sub-estimator, if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`, which allows for aliasing.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_from_model.py
|
BSD-3-Clause
|
def n_features_in_(self):
"""Number of features seen during `fit`."""
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.estimator_.n_features_in_
|
Number of features seen during `fit`.
|
n_features_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_from_model.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(caller="partial_fit", callee="partial_fit")
.add(caller="fit", callee="fit"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_from_model.py
|
BSD-3-Clause
|
def _compute_mi_cc(x, y, n_neighbors):
"""Compute mutual information between two continuous variables.
Parameters
----------
x, y : ndarray, shape (n_samples,)
Samples of two continuous random variables, must have an identical
shape.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information in nat units. If it turned out to be
negative it is replaced by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
n_samples = x.size
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
xy = np.hstack((x, y))
# Here we rely on NearestNeighbors to select the fastest algorithm.
nn = NearestNeighbors(metric="chebyshev", n_neighbors=n_neighbors)
nn.fit(xy)
radius = nn.kneighbors()[0]
radius = np.nextafter(radius[:, -1], 0)
# KDTree is explicitly fit to allow for the querying of number of
# neighbors within a specified radius
kd = KDTree(x, metric="chebyshev")
nx = kd.query_radius(x, radius, count_only=True, return_distance=False)
nx = np.array(nx) - 1.0
kd = KDTree(y, metric="chebyshev")
ny = kd.query_radius(y, radius, count_only=True, return_distance=False)
ny = np.array(ny) - 1.0
mi = (
digamma(n_samples)
+ digamma(n_neighbors)
- np.mean(digamma(nx + 1))
- np.mean(digamma(ny + 1))
)
return max(0, mi)
|
Compute mutual information between two continuous variables.
Parameters
----------
x, y : ndarray, shape (n_samples,)
Samples of two continuous random variables, must have an identical
shape.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information in nat units. If it turned out to be
negative it is replaced by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
|
_compute_mi_cc
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def _compute_mi_cd(c, d, n_neighbors):
"""Compute mutual information between continuous and discrete variables.
Parameters
----------
c : ndarray, shape (n_samples,)
Samples of a continuous random variable.
d : ndarray, shape (n_samples,)
Samples of a discrete random variable.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information in nat units. If it turned out to be
negative it is replaced by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
"""
n_samples = c.shape[0]
c = c.reshape((-1, 1))
radius = np.empty(n_samples)
label_counts = np.empty(n_samples)
k_all = np.empty(n_samples)
nn = NearestNeighbors()
for label in np.unique(d):
mask = d == label
count = np.sum(mask)
if count > 1:
k = min(n_neighbors, count - 1)
nn.set_params(n_neighbors=k)
nn.fit(c[mask])
r = nn.kneighbors()[0]
radius[mask] = np.nextafter(r[:, -1], 0)
k_all[mask] = k
label_counts[mask] = count
# Ignore points with unique labels.
mask = label_counts > 1
n_samples = np.sum(mask)
label_counts = label_counts[mask]
k_all = k_all[mask]
c = c[mask]
radius = radius[mask]
kd = KDTree(c)
m_all = kd.query_radius(c, radius, count_only=True, return_distance=False)
m_all = np.array(m_all)
mi = (
digamma(n_samples)
+ np.mean(digamma(k_all))
- np.mean(digamma(label_counts))
- np.mean(digamma(m_all))
)
return max(0, mi)
|
Compute mutual information between continuous and discrete variables.
Parameters
----------
c : ndarray, shape (n_samples,)
Samples of a continuous random variable.
d : ndarray, shape (n_samples,)
Samples of a discrete random variable.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information in nat units. If it turned out to be
negative it is replaced by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
|
_compute_mi_cd
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def _compute_mi(x, y, x_discrete, y_discrete, n_neighbors=3):
"""Compute mutual information between two variables.
This is a simple wrapper which selects a proper function to call based on
whether `x` and `y` are discrete or not.
"""
if x_discrete and y_discrete:
return mutual_info_score(x, y)
elif x_discrete and not y_discrete:
return _compute_mi_cd(y, x, n_neighbors)
elif not x_discrete and y_discrete:
return _compute_mi_cd(x, y, n_neighbors)
else:
return _compute_mi_cc(x, y, n_neighbors)
|
Compute mutual information between two variables.
This is a simple wrapper which selects a proper function to call based on
whether `x` and `y` are discrete or not.
|
_compute_mi
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def _iterate_columns(X, columns=None):
"""Iterate over columns of a matrix.
Parameters
----------
X : ndarray or csc_matrix, shape (n_samples, n_features)
Matrix over which to iterate.
columns : iterable or None, default=None
Indices of columns to iterate over. If None, iterate over all columns.
Yields
------
x : ndarray, shape (n_samples,)
Columns of `X` in dense format.
"""
if columns is None:
columns = range(X.shape[1])
if issparse(X):
for i in columns:
x = np.zeros(X.shape[0])
start_ptr, end_ptr = X.indptr[i], X.indptr[i + 1]
x[X.indices[start_ptr:end_ptr]] = X.data[start_ptr:end_ptr]
yield x
else:
for i in columns:
yield X[:, i]
|
Iterate over columns of a matrix.
Parameters
----------
X : ndarray or csc_matrix, shape (n_samples, n_features)
Matrix over which to iterate.
columns : iterable or None, default=None
Indices of columns to iterate over. If None, iterate over all columns.
Yields
------
x : ndarray, shape (n_samples,)
Columns of `X` in dense format.
|
_iterate_columns
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def _estimate_mi(
X,
y,
*,
discrete_features="auto",
discrete_target=False,
n_neighbors=3,
copy=True,
random_state=None,
n_jobs=None,
):
"""Estimate mutual information between the features and the target.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
discrete_target : bool, default=False
Whether to consider `y` as a discrete variable.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [1]_ and [2]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for computing the mutual information.
The parallelization is done on the columns of `X`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 1.5
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target in
nat units. A negative value will be replaced by 0.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
"""
X, y = check_X_y(X, y, accept_sparse="csc", y_numeric=not discrete_target)
n_samples, n_features = X.shape
if isinstance(discrete_features, (str, bool)):
if isinstance(discrete_features, str):
if discrete_features == "auto":
discrete_features = issparse(X)
else:
raise ValueError("Invalid string value for discrete_features.")
discrete_mask = np.empty(n_features, dtype=bool)
discrete_mask.fill(discrete_features)
else:
discrete_features = check_array(discrete_features, ensure_2d=False)
if discrete_features.dtype != "bool":
discrete_mask = np.zeros(n_features, dtype=bool)
discrete_mask[discrete_features] = True
else:
discrete_mask = discrete_features
continuous_mask = ~discrete_mask
if np.any(continuous_mask) and issparse(X):
raise ValueError("Sparse matrix `X` can't have continuous features.")
rng = check_random_state(random_state)
if np.any(continuous_mask):
X = X.astype(np.float64, copy=copy)
X[:, continuous_mask] = scale(
X[:, continuous_mask], with_mean=False, copy=False
)
# Add small noise to continuous features as advised in Kraskov et. al.
means = np.maximum(1, np.mean(np.abs(X[:, continuous_mask]), axis=0))
X[:, continuous_mask] += (
1e-10
* means
* rng.standard_normal(size=(n_samples, np.sum(continuous_mask)))
)
if not discrete_target:
y = scale(y, with_mean=False)
y += (
1e-10
* np.maximum(1, np.mean(np.abs(y)))
* rng.standard_normal(size=n_samples)
)
mi = Parallel(n_jobs=n_jobs)(
delayed(_compute_mi)(x, y, discrete_feature, discrete_target, n_neighbors)
for x, discrete_feature in zip(_iterate_columns(X), discrete_mask)
)
return np.array(mi)
|
Estimate mutual information between the features and the target.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
discrete_target : bool, default=False
Whether to consider `y` as a discrete variable.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [1]_ and [2]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for computing the mutual information.
The parallelization is done on the columns of `X`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 1.5
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target in
nat units. A negative value will be replaced by 0.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [2] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
|
_estimate_mi
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def mutual_info_regression(
X,
y,
*,
discrete_features="auto",
n_neighbors=3,
copy=True,
random_state=None,
n_jobs=None,
):
"""Estimate mutual information for a continuous target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for computing the mutual information.
The parallelization is done on the columns of `X`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 1.5
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target in
nat units.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.feature_selection import mutual_info_regression
>>> X, y = make_regression(
... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
... )
>>> mutual_info_regression(X, y)
array([0.117, 2.645, 0.0287])
"""
return _estimate_mi(
X,
y,
discrete_features=discrete_features,
discrete_target=False,
n_neighbors=n_neighbors,
copy=copy,
random_state=random_state,
n_jobs=n_jobs,
)
|
Estimate mutual information for a continuous target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : {'auto', bool, array-like}, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for computing the mutual information.
The parallelization is done on the columns of `X`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 1.5
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target in
nat units.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector", Probl. Peredachi Inf., 23:2 (1987), 9-16
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.feature_selection import mutual_info_regression
>>> X, y = make_regression(
... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
... )
>>> mutual_info_regression(X, y)
array([0.117, 2.645, 0.0287])
|
mutual_info_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def mutual_info_classif(
X,
y,
*,
discrete_features="auto",
n_neighbors=3,
copy=True,
random_state=None,
n_jobs=None,
):
"""Estimate mutual information for a discrete target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : 'auto', bool or array-like, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for computing the mutual information.
The parallelization is done on the columns of `X`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 1.5
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target in
nat units.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.feature_selection import mutual_info_classif
>>> X, y = make_classification(
... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
... shuffle=False, random_state=42
... )
>>> mutual_info_classif(X, y)
array([0.589, 0.107, 0.196, 0.0968 , 0.,
0. , 0. , 0. , 0. , 0.])
"""
check_classification_targets(y)
return _estimate_mi(
X,
y,
discrete_features=discrete_features,
discrete_target=True,
n_neighbors=n_neighbors,
copy=copy,
random_state=random_state,
n_jobs=n_jobs,
)
|
Estimate mutual information for a discrete target variable.
Mutual information (MI) [1]_ between two random variables is a non-negative
value, which measures the dependency between the variables. It is equal
to zero if and only if two random variables are independent, and higher
values mean higher dependency.
The function relies on nonparametric methods based on entropy estimation
from k-nearest neighbors distances as described in [2]_ and [3]_. Both
methods are based on the idea originally proposed in [4]_.
It can be used for univariate features selection, read more in the
:ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Feature matrix.
y : array-like of shape (n_samples,)
Target vector.
discrete_features : 'auto', bool or array-like, default='auto'
If bool, then determines whether to consider all features discrete
or continuous. If array, then it should be either a boolean mask
with shape (n_features,) or array with indices of discrete features.
If 'auto', it is assigned to False for dense `X` and to True for
sparse `X`.
n_neighbors : int, default=3
Number of neighbors to use for MI estimation for continuous variables,
see [2]_ and [3]_. Higher values reduce variance of the estimation, but
could introduce a bias.
copy : bool, default=True
Whether to make a copy of the given data. If set to False, the initial
data will be overwritten.
random_state : int, RandomState instance or None, default=None
Determines random number generation for adding small noise to
continuous variables in order to remove repeated values.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of jobs to use for computing the mutual information.
The parallelization is done on the columns of `X`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 1.5
Returns
-------
mi : ndarray, shape (n_features,)
Estimated mutual information between each feature and the target in
nat units.
Notes
-----
1. The term "discrete features" is used instead of naming them
"categorical", because it describes the essence more accurately.
For example, pixel intensities of an image are discrete features
(but hardly categorical) and you will get better results if mark them
as such. Also note, that treating a continuous variable as discrete and
vice versa will usually give incorrect results, so be attentive about
that.
2. True mutual information can't be negative. If its estimate turns out
to be negative, it is replaced by zero.
References
----------
.. [1] `Mutual Information
<https://en.wikipedia.org/wiki/Mutual_information>`_
on Wikipedia.
.. [2] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
.. [3] B. C. Ross "Mutual Information between Discrete and Continuous
Data Sets". PLoS ONE 9(2), 2014.
.. [4] L. F. Kozachenko, N. N. Leonenko, "Sample Estimate of the Entropy
of a Random Vector:, Probl. Peredachi Inf., 23:2 (1987), 9-16
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.feature_selection import mutual_info_classif
>>> X, y = make_classification(
... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
... shuffle=False, random_state=42
... )
>>> mutual_info_classif(X, y)
array([0.589, 0.107, 0.196, 0.0968 , 0.,
0. , 0. , 0. , 0. , 0.])
|
mutual_info_classif
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_mutual_info.py
|
BSD-3-Clause
|
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer, routed_params):
"""
Return the score and n_features per step for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
fit_params = _check_method_params(
X, params=routed_params.estimator.fit, indices=train
)
score_params = _check_method_params(
X=X, params=routed_params.scorer.score, indices=test
)
rfe._fit(
X_train,
y_train,
lambda estimator, features: _score(
estimator,
X_test[:, features],
y_test,
scorer,
score_params=score_params,
),
**fit_params,
)
return rfe.step_scores_, rfe.step_support_, rfe.step_ranking_, rfe.step_n_features_
|
Return the score and n_features per step for a fit across one fold.
|
_rfe_single_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def fit(self, X, y, **fit_params):
"""Fit the RFE model and then the underlying estimator on the selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values.
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the ``fit`` method of the underlying estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the ``fit``
method of the underlying estimator.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
self : object
Fitted estimator.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch(estimator=Bunch(fit=fit_params))
return self._fit(X, y, **routed_params.estimator.fit)
|
Fit the RFE model and then the underlying estimator on the selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values.
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the ``fit`` method of the underlying estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the ``fit``
method of the underlying estimator.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def predict(self, X, **predict_params):
"""Reduce X to the selected features and predict using the estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
**predict_params : dict
Parameters to route to the ``predict`` method of the
underlying estimator.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
_raise_for_params(predict_params, self, "predict")
check_is_fitted(self)
if _routing_enabled():
routed_params = process_routing(self, "predict", **predict_params)
else:
routed_params = Bunch(estimator=Bunch(predict={}))
return self.estimator_.predict(
self.transform(X), **routed_params.estimator.predict
)
|
Reduce X to the selected features and predict using the estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
**predict_params : dict
Parameters to route to the ``predict`` method of the
underlying estimator.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def score(self, X, y, **score_params):
"""Reduce X to the selected features and return the score of the estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
**score_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the ``score`` method of the underlying estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `score`
method of the underlying estimator.
.. versionadded:: 1.0
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
score : float
Score of the underlying base estimator computed with the selected
features returned by `rfe.transform(X)` and `y`.
"""
check_is_fitted(self)
if _routing_enabled():
routed_params = process_routing(self, "score", **score_params)
else:
routed_params = Bunch(estimator=Bunch(score=score_params))
return self.estimator_.score(
self.transform(X), y, **routed_params.estimator.score
)
|
Reduce X to the selected features and return the score of the estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
**score_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the ``score`` method of the underlying estimator.
- If `enable_metadata_routing=True`: Parameters safely routed to the `score`
method of the underlying estimator.
.. versionadded:: 1.0
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
score : float
Score of the underlying base estimator computed with the selected
features returned by `rfe.transform(X)` and `y`.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict")
.add(caller="score", callee="score"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def fit(self, X, y, *, groups=None, **params):
"""Fit the RFE model and automatically tune the number of selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like of shape (n_samples,)
Target values (integers for classification, real numbers for
regression).
groups : array-like of shape (n_samples,) or None, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
.. versionadded:: 0.20
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator,
the scorer, and the CV splitter.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(params, self, "fit")
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
ensure_min_features=2,
ensure_all_finite=False,
multi_output=True,
)
if _routing_enabled():
if groups is not None:
params.update({"groups": groups})
routed_params = process_routing(self, "fit", **params)
else:
routed_params = Bunch(
estimator=Bunch(fit={}),
splitter=Bunch(split={"groups": groups}),
scorer=Bunch(score={}),
)
# Initialization
cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
scorer = self._get_scorer()
# Build an RFE object, which will evaluate and score each possible
# feature count, down to self.min_features_to_select
n_features = X.shape[1]
if self.min_features_to_select > n_features:
warnings.warn(
(
f"Found min_features_to_select={self.min_features_to_select} > "
f"{n_features=}. There will be no feature selection and all "
"features will be kept."
),
UserWarning,
)
rfe = RFE(
estimator=self.estimator,
n_features_to_select=min(self.min_features_to_select, n_features),
importance_getter=self.importance_getter,
step=self.step,
verbose=self.verbose,
)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if effective_n_jobs(self.n_jobs) == 1:
parallel, func = list, _rfe_single_fit
else:
parallel = Parallel(n_jobs=self.n_jobs)
func = delayed(_rfe_single_fit)
step_results = parallel(
func(clone(rfe), self.estimator, X, y, train, test, scorer, routed_params)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
scores, supports, rankings, step_n_features = zip(*step_results)
step_n_features_rev = np.array(step_n_features[0])[::-1]
scores = np.array(scores)
rankings = np.array(rankings)
supports = np.array(supports)
# Reverse order such that lowest number of features is selected in case of tie.
scores_sum_rev = np.sum(scores, axis=0)[::-1]
n_features_to_select = step_n_features_rev[np.argmax(scores_sum_rev)]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(
estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step,
importance_getter=self.importance_getter,
verbose=self.verbose,
)
rfe.fit(X, y, **routed_params.estimator.fit)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self._transform(X), y, **routed_params.estimator.fit)
# reverse to stay consistent with before
scores_rev = scores[:, ::-1]
supports_rev = supports[:, ::-1]
rankings_rev = rankings[:, ::-1]
self.cv_results_ = {
"mean_test_score": np.mean(scores_rev, axis=0),
"std_test_score": np.std(scores_rev, axis=0),
**{f"split{i}_test_score": scores_rev[i] for i in range(scores.shape[0])},
**{f"split{i}_ranking": rankings_rev[i] for i in range(rankings.shape[0])},
**{f"split{i}_support": supports_rev[i] for i in range(supports.shape[0])},
"n_features": step_n_features_rev,
}
return self
|
Fit the RFE model and automatically tune the number of selected features.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like of shape (n_samples,)
Target values (integers for classification, real numbers for
regression).
groups : array-like of shape (n_samples,) or None, default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
.. versionadded:: 0.20
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator,
the scorer, and the CV splitter.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def score(self, X, y, **score_params):
"""Score using the `scoring` option on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True labels for X.
**score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
score : float
Score of self.predict(X) w.r.t. y defined by `scoring`.
"""
_raise_for_params(score_params, self, "score")
scoring = self._get_scorer()
if _routing_enabled():
routed_params = process_routing(self, "score", **score_params)
else:
routed_params = Bunch()
routed_params.scorer = Bunch(score={})
return scoring(self, X, y, **routed_params.scorer.score)
|
Score using the `scoring` option on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True labels for X.
**score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
score : float
Score of self.predict(X) w.r.t. y defined by `scoring`.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
router.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
router.add(
splitter=check_cv(self.cv),
method_mapping=MethodMapping().add(
caller="fit",
callee="split",
),
)
router.add(
scorer=self._get_scorer(),
method_mapping=MethodMapping()
.add(caller="fit", callee="score")
.add(caller="score", callee="score"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_rfe.py
|
BSD-3-Clause
|
def fit(self, X, y=None, **params):
"""Learn the features to select from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,), default=None
Target values. This parameter may be ignored for
unsupervised learning.
**params : dict, default=None
Parameters to be passed to the underlying `estimator`, `cv`
and `scorer` objects.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
_raise_for_params(params, self, "fit")
tags = self.__sklearn_tags__()
X = validate_data(
self,
X,
accept_sparse="csc",
ensure_min_features=2,
ensure_all_finite=not tags.input_tags.allow_nan,
)
n_features = X.shape[1]
if self.n_features_to_select == "auto":
if self.tol is not None:
# With auto feature selection, `n_features_to_select_` will be updated
# to `support_.sum()` after features are selected.
self.n_features_to_select_ = n_features - 1
else:
self.n_features_to_select_ = n_features // 2
elif isinstance(self.n_features_to_select, Integral):
if self.n_features_to_select >= n_features:
raise ValueError("n_features_to_select must be < n_features.")
self.n_features_to_select_ = self.n_features_to_select
elif isinstance(self.n_features_to_select, Real):
self.n_features_to_select_ = int(n_features * self.n_features_to_select)
if self.tol is not None and self.tol < 0 and self.direction == "forward":
raise ValueError(
"tol must be strictly positive when doing forward selection"
)
cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator))
cloned_estimator = clone(self.estimator)
# the current mask corresponds to the set of features:
# - that we have already *selected* if we do forward selection
# - that we have already *excluded* if we do backward selection
current_mask = np.zeros(shape=n_features, dtype=bool)
n_iterations = (
self.n_features_to_select_
if self.n_features_to_select == "auto" or self.direction == "forward"
else n_features - self.n_features_to_select_
)
old_score = -np.inf
is_auto_select = self.tol is not None and self.n_features_to_select == "auto"
# We only need to verify the routing here and not use the routed params
# because internally the actual routing will also take place inside the
# `cross_val_score` function.
if _routing_enabled():
process_routing(self, "fit", **params)
for _ in range(n_iterations):
new_feature_idx, new_score = self._get_best_new_feature_score(
cloned_estimator, X, y, cv, current_mask, **params
)
if is_auto_select and ((new_score - old_score) < self.tol):
break
old_score = new_score
current_mask[new_feature_idx] = True
if self.direction == "backward":
current_mask = ~current_mask
self.support_ = current_mask
self.n_features_to_select_ = self.support_.sum()
return self
|
Learn the features to select from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of predictors.
y : array-like of shape (n_samples,), default=None
Target values. This parameter may be ignored for
unsupervised learning.
**params : dict, default=None
Parameters to be passed to the underlying `estimator`, `cv`
and `scorer` objects.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_sequential.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
router.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
router.add(
splitter=check_cv(self.cv, classifier=is_classifier(self.estimator)),
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
router.add(
scorer=check_scoring(self.estimator, scoring=self.scoring),
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_sequential.py
|
BSD-3-Clause
|
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
|
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
|
_clean_nans
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def f_oneway(*args):
"""Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : {array-like, sparse matrix}
Sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
f_statistic : float
The computed F-value of the test.
p_value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://vassarstats.net/textbook
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s**2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.0
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.0)[0]
if np.nonzero(msb)[0].size != msb.size and constant_features_idx.size:
warnings.warn("Features %s are constant." % constant_features_idx, UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
|
Perform a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : {array-like, sparse matrix}
Sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
f_statistic : float
The computed F-value of the test.
p_value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://vassarstats.net/textbook
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
|
f_oneway
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array-like of shape (n_samples,)
The target vector.
Returns
-------
f_statistic : ndarray of shape (n_features,)
F-statistic for each feature.
p_values : ndarray of shape (n_features,)
P-values associated with the F-statistic.
See Also
--------
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.feature_selection import f_classif
>>> X, y = make_classification(
... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
... shuffle=False, random_state=42
... )
>>> f_statistic, p_values = f_classif(X, y)
>>> f_statistic
array([2.21e+02, 7.02e-01, 1.70e+00, 9.31e-01,
5.41e+00, 3.25e-01, 4.71e-02, 5.72e-01,
7.54e-01, 8.90e-02])
>>> p_values
array([7.14e-27, 4.04e-01, 1.96e-01, 3.37e-01,
2.21e-02, 5.70e-01, 8.29e-01, 4.51e-01,
3.87e-01, 7.66e-01])
"""
X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
|
Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array-like of shape (n_samples,)
The target vector.
Returns
-------
f_statistic : ndarray of shape (n_features,)
F-statistic for each feature.
p_values : ndarray of shape (n_features,)
P-values associated with the F-statistic.
See Also
--------
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.feature_selection import f_classif
>>> X, y = make_classification(
... n_samples=100, n_features=10, n_informative=2, n_clusters_per_class=1,
... shuffle=False, random_state=42
... )
>>> f_statistic, p_values = f_classif(X, y)
>>> f_statistic
array([2.21e+02, 7.02e-01, 1.70e+00, 9.31e-01,
5.41e+00, 3.25e-01, 4.71e-02, 5.72e-01,
7.54e-01, 8.90e-02])
>>> p_values
array([7.14e-27, 4.04e-01, 1.96e-01, 3.37e-01,
2.21e-02, 5.70e-01, 8.29e-01, 4.51e-01,
3.87e-01, 7.66e-01])
|
f_classif
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
|
Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
|
_chisquare
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the `n_features` features with the
highest values for the test chi-squared statistic from X, which must
contain only **non-negative integer feature values** such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
If some of your features are continuous, you need to bin them, for
example by using :class:`~sklearn.preprocessing.KBinsDiscretizer`.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample vectors.
y : array-like of shape (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : ndarray of shape (n_features,)
Chi2 statistics for each feature.
p_values : ndarray of shape (n_features,)
P-values for each feature.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
f_regression : F-value between label/feature for regression tasks.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_selection import chi2
>>> X = np.array([[1, 1, 3],
... [0, 1, 5],
... [5, 4, 1],
... [6, 6, 2],
... [1, 4, 0],
... [0, 0, 0]])
>>> y = np.array([1, 1, 0, 0, 2, 2])
>>> chi2_stats, p_values = chi2(X, y)
>>> chi2_stats
array([15.3, 6.5 , 8.9])
>>> p_values
array([0.000456, 0.0387, 0.0116 ])
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
# Converting X to float allows getting better performance for the
# safe_sparse_dot call made below.
X = check_array(X, accept_sparse="csr", dtype=(np.float64, np.float32))
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
# Use a sparse representation for Y by default to reduce memory usage when
# y has many unique classes.
Y = LabelBinarizer(sparse_output=True).fit_transform(y)
if Y.shape[1] == 1:
Y = Y.toarray()
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
if issparse(observed):
# convert back to a dense array before calling _chisquare
# XXX: could _chisquare be reimplement to accept sparse matrices for
# cases where both n_classes and n_features are large (and X is
# sparse)?
observed = observed.toarray()
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
|
Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the `n_features` features with the
highest values for the test chi-squared statistic from X, which must
contain only **non-negative integer feature values** such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
If some of your features are continuous, you need to bin them, for
example by using :class:`~sklearn.preprocessing.KBinsDiscretizer`.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample vectors.
y : array-like of shape (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : ndarray of shape (n_features,)
Chi2 statistics for each feature.
p_values : ndarray of shape (n_features,)
P-values for each feature.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
f_regression : F-value between label/feature for regression tasks.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_selection import chi2
>>> X = np.array([[1, 1, 3],
... [0, 1, 5],
... [5, 4, 1],
... [6, 6, 2],
... [1, 4, 0],
... [0, 0, 0]])
>>> y = np.array([1, 1, 0, 0, 2, 2])
>>> chi2_stats, p_values = chi2(X, y)
>>> chi2_stats
array([15.3, 6.5 , 8.9])
>>> p_values
array([0.000456, 0.0387, 0.0116 ])
|
chi2
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def r_regression(X, y, *, center=True, force_finite=True):
"""Compute Pearson's r for each features and the target.
Pearson's r is also known as the Pearson correlation coefficient.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature selection procedure, not
a free standing feature selection procedure.
The cross correlation between each regressor and the target is computed
as::
E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
.. versionadded:: 1.0
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix.
y : array-like of shape (n_samples,)
The target vector.
center : bool, default=True
Whether or not to center the data matrix `X` and the target vector `y`.
By default, `X` and `y` will be centered.
force_finite : bool, default=True
Whether or not to force the Pearson's R correlation to be finite.
In the particular case where some features in `X` or the target `y`
are constant, the Pearson's R correlation is not defined. When
`force_finite=False`, a correlation of `np.nan` is returned to
acknowledge this case. When `force_finite=True`, this value will be
forced to a minimal correlation of `0.0`.
.. versionadded:: 1.1
Returns
-------
correlation_coefficient : ndarray of shape (n_features,)
Pearson's R correlation coefficients of features.
See Also
--------
f_regression: Univariate linear regression tests returning f-statistic
and p-values.
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.feature_selection import r_regression
>>> X, y = make_regression(
... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
... )
>>> r_regression(X, y)
array([-0.157, 1. , -0.229])
"""
X, y = check_X_y(X, y, accept_sparse=["csr", "csc", "coo"], dtype=np.float64)
n_samples = X.shape[0]
# Compute centered values
# Note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
# TODO: for Scipy <= 1.10, `isspmatrix(X)` returns `True` for sparse arrays.
# Here, we check the output of the `.mean` operation that returns a `np.matrix`
# for sparse matrices while a `np.array` for dense and sparse arrays.
# We can reconsider using `isspmatrix` when the minimum version is
# SciPy >= 1.11
X_means = X.mean(axis=0)
X_means = X_means.getA1() if isinstance(X_means, np.matrix) else X_means
# Compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) - n_samples * X_means**2)
else:
X_norms = row_norms(X.T)
correlation_coefficient = safe_sparse_dot(y, X)
with np.errstate(divide="ignore", invalid="ignore"):
correlation_coefficient /= X_norms
correlation_coefficient /= np.linalg.norm(y)
if force_finite and not np.isfinite(correlation_coefficient).all():
# case where the target or some features are constant
# the correlation coefficient(s) is/are set to the minimum (i.e. 0.0)
nan_mask = np.isnan(correlation_coefficient)
correlation_coefficient[nan_mask] = 0.0
return correlation_coefficient
|
Compute Pearson's r for each features and the target.
Pearson's r is also known as the Pearson correlation coefficient.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature selection procedure, not
a free standing feature selection procedure.
The cross correlation between each regressor and the target is computed
as::
E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
.. versionadded:: 1.0
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix.
y : array-like of shape (n_samples,)
The target vector.
center : bool, default=True
Whether or not to center the data matrix `X` and the target vector `y`.
By default, `X` and `y` will be centered.
force_finite : bool, default=True
Whether or not to force the Pearson's R correlation to be finite.
In the particular case where some features in `X` or the target `y`
are constant, the Pearson's R correlation is not defined. When
`force_finite=False`, a correlation of `np.nan` is returned to
acknowledge this case. When `force_finite=True`, this value will be
forced to a minimal correlation of `0.0`.
.. versionadded:: 1.1
Returns
-------
correlation_coefficient : ndarray of shape (n_features,)
Pearson's R correlation coefficients of features.
See Also
--------
f_regression: Univariate linear regression tests returning f-statistic
and p-values.
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.feature_selection import r_regression
>>> X, y = make_regression(
... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
... )
>>> r_regression(X, y)
array([-0.157, 1. , -0.229])
|
r_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def f_regression(X, y, *, center=True, force_finite=True):
"""Univariate linear regression tests returning F-statistic and p-values.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed
using :func:`r_regression` as::
E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
2. It is converted to an F score and then to a p-value.
:func:`f_regression` is derived from :func:`r_regression` and will rank
features in the same order if all the features are positively correlated
with the target.
Note however that contrary to :func:`f_regression`, :func:`r_regression`
values lie in [-1, 1] and can thus be negative. :func:`f_regression` is
therefore recommended as a feature selection criterion to identify
potentially predictive feature for a downstream classifier, irrespective of
the sign of the association with the target variable.
Furthermore :func:`f_regression` returns p-values while
:func:`r_regression` does not.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix.
y : array-like of shape (n_samples,)
The target vector.
center : bool, default=True
Whether or not to center the data matrix `X` and the target vector `y`.
By default, `X` and `y` will be centered.
force_finite : bool, default=True
Whether or not to force the F-statistics and associated p-values to
be finite. There are two cases where the F-statistic is expected to not
be finite:
- when the target `y` or some features in `X` are constant. In this
case, the Pearson's R correlation is not defined leading to obtain
`np.nan` values in the F-statistic and p-value. When
`force_finite=True`, the F-statistic is set to `0.0` and the
associated p-value is set to `1.0`.
- when a feature in `X` is perfectly correlated (or
anti-correlated) with the target `y`. In this case, the F-statistic
is expected to be `np.inf`. When `force_finite=True`, the F-statistic
is set to `np.finfo(dtype).max` and the associated p-value is set to
`0.0`.
.. versionadded:: 1.1
Returns
-------
f_statistic : ndarray of shape (n_features,)
F-statistic for each feature.
p_values : ndarray of shape (n_features,)
P-values associated with the F-statistic.
See Also
--------
r_regression: Pearson's R between label/feature for regression tasks.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.feature_selection import f_regression
>>> X, y = make_regression(
... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
... )
>>> f_statistic, p_values = f_regression(X, y)
>>> f_statistic
array([1.21, 2.67e13, 2.66])
>>> p_values
array([0.276, 1.54e-283, 0.11])
"""
correlation_coefficient = r_regression(
X, y, center=center, force_finite=force_finite
)
deg_of_freedom = y.size - (2 if center else 1)
corr_coef_squared = correlation_coefficient**2
with np.errstate(divide="ignore", invalid="ignore"):
f_statistic = corr_coef_squared / (1 - corr_coef_squared) * deg_of_freedom
p_values = stats.f.sf(f_statistic, 1, deg_of_freedom)
if force_finite and not np.isfinite(f_statistic).all():
# case where there is a perfect (anti-)correlation
# f-statistics can be set to the maximum and p-values to zero
mask_inf = np.isinf(f_statistic)
f_statistic[mask_inf] = np.finfo(f_statistic.dtype).max
# case where the target or some features are constant
# f-statistics would be minimum and thus p-values large
mask_nan = np.isnan(f_statistic)
f_statistic[mask_nan] = 0.0
p_values[mask_nan] = 1.0
return f_statistic, p_values
|
Univariate linear regression tests returning F-statistic and p-values.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 2 steps:
1. The cross correlation between each regressor and the target is computed
using :func:`r_regression` as::
E[(X[:, i] - mean(X[:, i])) * (y - mean(y))] / (std(X[:, i]) * std(y))
2. It is converted to an F score and then to a p-value.
:func:`f_regression` is derived from :func:`r_regression` and will rank
features in the same order if all the features are positively correlated
with the target.
Note however that contrary to :func:`f_regression`, :func:`r_regression`
values lie in [-1, 1] and can thus be negative. :func:`f_regression` is
therefore recommended as a feature selection criterion to identify
potentially predictive feature for a downstream classifier, irrespective of
the sign of the association with the target variable.
Furthermore :func:`f_regression` returns p-values while
:func:`r_regression` does not.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data matrix.
y : array-like of shape (n_samples,)
The target vector.
center : bool, default=True
Whether or not to center the data matrix `X` and the target vector `y`.
By default, `X` and `y` will be centered.
force_finite : bool, default=True
Whether or not to force the F-statistics and associated p-values to
be finite. There are two cases where the F-statistic is expected to not
be finite:
- when the target `y` or some features in `X` are constant. In this
case, the Pearson's R correlation is not defined leading to obtain
`np.nan` values in the F-statistic and p-value. When
`force_finite=True`, the F-statistic is set to `0.0` and the
associated p-value is set to `1.0`.
- when a feature in `X` is perfectly correlated (or
anti-correlated) with the target `y`. In this case, the F-statistic
is expected to be `np.inf`. When `force_finite=True`, the F-statistic
is set to `np.finfo(dtype).max` and the associated p-value is set to
`0.0`.
.. versionadded:: 1.1
Returns
-------
f_statistic : ndarray of shape (n_features,)
F-statistic for each feature.
p_values : ndarray of shape (n_features,)
P-values associated with the F-statistic.
See Also
--------
r_regression: Pearson's R between label/feature for regression tasks.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.feature_selection import f_regression
>>> X, y = make_regression(
... n_samples=50, n_features=3, n_informative=1, noise=1e-4, random_state=42
... )
>>> f_statistic, p_values = f_regression(X, y)
>>> f_statistic
array([1.21, 2.67e13, 2.66])
>>> p_values
array([0.276, 1.54e-283, 0.11])
|
f_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,) or None
The target values (class labels in classification, real numbers in
regression). If the selector is unsupervised then `y` can be set to `None`.
Returns
-------
self : object
Returns the instance itself.
"""
if y is None:
X = validate_data(self, X, accept_sparse=["csr", "csc"])
else:
X, y = validate_data(
self, X, y, accept_sparse=["csr", "csc"], multi_output=True
)
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
|
Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,) or None
The target values (class labels in classification, real numbers in
regression). If the selector is unsupervised then `y` can be set to `None`.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_univariate_selection.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_univariate_selection.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
dtype=np.float64,
ensure_all_finite="allow-nan",
)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
if self.threshold == 0:
mins, maxes = min_max_axis(X, axis=0)
peak_to_peaks = maxes - mins
else:
self.variances_ = np.nanvar(X, axis=0)
if self.threshold == 0:
peak_to_peaks = np.ptp(X, axis=0)
if self.threshold == 0:
# Use peak-to-peak to avoid numeric precision issues
# for constant features
compare_arr = np.array([self.variances_, peak_to_peaks])
self.variances_ = np.nanmin(compare_arr, axis=0)
if np.all(~np.isfinite(self.variances_) | (self.variances_ <= self.threshold)):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
|
Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data from which to compute variances, where `n_samples` is
the number of samples and `n_features` is the number of features.
y : any, default=None
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_variance_threshold.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_variance_threshold.py
|
BSD-3-Clause
|
def test_output_dataframe():
"""Check output dtypes for dataframes is consistent with the input dtypes."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"a": pd.Series([1.0, 2.4, 4.5], dtype=np.float32),
"b": pd.Series(["a", "b", "a"], dtype="category"),
"c": pd.Series(["j", "b", "b"], dtype="category"),
"d": pd.Series([3.0, 2.4, 1.2], dtype=np.float64),
}
)
for step in [2, 3]:
sel = StepSelector(step=step).set_output(transform="pandas")
sel.fit(X)
output = sel.transform(X)
for name, dtype in output.dtypes.items():
assert dtype == X.dtypes[name]
# step=0 will select nothing
sel0 = StepSelector(step=0).set_output(transform="pandas")
sel0.fit(X, y)
msg = "No features were selected"
with pytest.warns(UserWarning, match=msg):
output0 = sel0.transform(X)
assert_array_equal(output0.index, X.index)
assert output0.shape == (X.shape[0], 0)
|
Check output dtypes for dataframes is consistent with the input dtypes.
|
test_output_dataframe
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_base.py
|
BSD-3-Clause
|
def test_r_regression_force_finite(X, y, expected_corr_coef, force_finite):
"""Check the behaviour of `force_finite` for some corner cases with `r_regression`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15672
"""
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
corr_coef = r_regression(X, y, force_finite=force_finite)
np.testing.assert_array_almost_equal(corr_coef, expected_corr_coef)
|
Check the behaviour of `force_finite` for some corner cases with `r_regression`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15672
|
test_r_regression_force_finite
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_feature_select.py
|
BSD-3-Clause
|
def test_f_regression_corner_case(
X, y, expected_f_statistic, expected_p_values, force_finite
):
"""Check the behaviour of `force_finite` for some corner cases with `f_regression`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15672
"""
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
f_statistic, p_values = f_regression(X, y, force_finite=force_finite)
np.testing.assert_array_almost_equal(f_statistic, expected_f_statistic)
np.testing.assert_array_almost_equal(p_values, expected_p_values)
|
Check the behaviour of `force_finite` for some corner cases with `f_regression`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/15672
|
test_f_regression_corner_case
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_feature_select.py
|
BSD-3-Clause
|
def test_dataframe_output_dtypes():
"""Check that the output datafarme dtypes are the same as the input.
Non-regression test for gh-24860.
"""
pd = pytest.importorskip("pandas")
X, y = load_iris(return_X_y=True, as_frame=True)
X = X.astype(
{
"petal length (cm)": np.float32,
"petal width (cm)": np.float64,
}
)
X["petal_width_binned"] = pd.cut(X["petal width (cm)"], bins=10)
column_order = X.columns
def selector(X, y):
ranking = {
"sepal length (cm)": 1,
"sepal width (cm)": 2,
"petal length (cm)": 3,
"petal width (cm)": 4,
"petal_width_binned": 5,
}
return np.asarray([ranking[name] for name in column_order])
univariate_filter = SelectKBest(selector, k=3).set_output(transform="pandas")
output = univariate_filter.fit_transform(X, y)
assert_array_equal(
output.columns, ["petal length (cm)", "petal width (cm)", "petal_width_binned"]
)
for name, dtype in output.dtypes.items():
assert dtype == X.dtypes[name]
|
Check that the output datafarme dtypes are the same as the input.
Non-regression test for gh-24860.
|
test_dataframe_output_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_feature_select.py
|
BSD-3-Clause
|
def test_unsupervised_filter(selector):
"""Check support for unsupervised feature selection for the filter that could
require only `X`.
"""
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
def score_func(X, y=None):
return np.array([1, 1, 1, 1, 0])
selector.set_params(score_func=score_func)
selector.fit(X)
X_trans = selector.transform(X)
assert_allclose(X_trans, X[:, :4])
X_trans = selector.fit_transform(X)
assert_allclose(X_trans, X[:, :4])
|
Check support for unsupervised feature selection for the filter that could
require only `X`.
|
test_unsupervised_filter
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_feature_select.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_feature_select.py
|
BSD-3-Clause
|
def test_inferred_max_features_integer(max_features):
"""Check max_features_ and output shape for integer max_features."""
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(data, y)
if max_features is not None:
assert transformer.max_features_ == max_features
assert X_trans.shape[1] == transformer.max_features_
else:
assert not hasattr(transformer, "max_features_")
assert X_trans.shape[1] == data.shape[1]
|
Check max_features_ and output shape for integer max_features.
|
test_inferred_max_features_integer
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_inferred_max_features_callable(max_features):
"""Check max_features_ and output shape for callable max_features."""
clf = RandomForestClassifier(n_estimators=5, random_state=0)
transformer = SelectFromModel(
estimator=clf, max_features=max_features, threshold=-np.inf
)
X_trans = transformer.fit_transform(data, y)
assert transformer.max_features_ == max_features(data)
assert X_trans.shape[1] == transformer.max_features_
|
Check max_features_ and output shape for callable max_features.
|
test_inferred_max_features_callable
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_max_features_callable_data(max_features):
"""Tests that the callable passed to `fit` is called on X."""
clf = RandomForestClassifier(n_estimators=50, random_state=0)
m = Mock(side_effect=max_features)
transformer = SelectFromModel(estimator=clf, max_features=m, threshold=-np.inf)
transformer.fit_transform(data, y)
m.assert_called_with(data)
|
Tests that the callable passed to `fit` is called on X.
|
test_max_features_callable_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_prefit_max_features():
"""Check the interaction between `prefit` and `max_features`."""
# case 1: an error should be raised at `transform` if `fit` was not called to
# validate the attributes
estimator = RandomForestClassifier(n_estimators=5, random_state=0)
estimator.fit(data, y)
model = SelectFromModel(estimator, prefit=True, max_features=lambda X: X.shape[1])
err_msg = (
"When `prefit=True` and `max_features` is a callable, call `fit` "
"before calling `transform`."
)
with pytest.raises(NotFittedError, match=err_msg):
model.transform(data)
# case 2: `max_features` is not validated and different from an integer
# FIXME: we cannot validate the upper bound of the attribute at transform
# and we should force calling `fit` if we intend to force the attribute
# to have such an upper bound.
max_features = 2.5
model.set_params(max_features=max_features)
with pytest.raises(ValueError, match="`max_features` must be an integer"):
model.transform(data)
|
Check the interaction between `prefit` and `max_features`.
|
test_prefit_max_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_get_feature_names_out_elasticnetcv():
"""Check if ElasticNetCV works with a list of floats.
Non-regression test for #30936."""
X, y = make_regression(n_features=5, n_informative=3, random_state=0)
estimator = ElasticNetCV(l1_ratio=[0.25, 0.5, 0.75], random_state=0)
selector = SelectFromModel(estimator=estimator)
selector.fit(X, y)
names_out = selector.get_feature_names_out()
mask = selector.get_support()
expected = np.array([f"x{i}" for i in range(X.shape[1])])[mask]
assert_array_equal(names_out, expected)
|
Check if ElasticNetCV works with a list of floats.
Non-regression test for #30936.
|
test_get_feature_names_out_elasticnetcv
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_prefit_get_feature_names_out():
"""Check the interaction between prefit and the feature names."""
clf = RandomForestClassifier(n_estimators=2, random_state=0)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True, max_features=1)
name = type(model).__name__
err_msg = (
f"This {name} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=err_msg):
model.get_feature_names_out()
model.fit(data, y)
feature_names = model.get_feature_names_out()
assert feature_names == ["x3"]
|
Check the interaction between prefit and the feature names.
|
test_prefit_get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_select_from_model_pls(PLSEstimator):
"""Check the behaviour of SelectFromModel with PLS estimators.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12410
"""
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = PLSEstimator(n_components=1)
model = make_pipeline(SelectFromModel(estimator), estimator).fit(X, y)
assert model.score(X, y) > 0.5
|
Check the behaviour of SelectFromModel with PLS estimators.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12410
|
test_select_from_model_pls
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_estimator_does_not_support_feature_names():
"""SelectFromModel works with estimators that do not support feature_names_in_.
Non-regression test for #21949.
"""
pytest.importorskip("pandas")
X, y = datasets.load_iris(as_frame=True, return_X_y=True)
all_feature_names = set(X.columns)
def importance_getter(estimator):
return np.arange(X.shape[1])
selector = SelectFromModel(
MinimalClassifier(), importance_getter=importance_getter
).fit(X, y)
# selector learns the feature names itself
assert_array_equal(selector.feature_names_in_, X.columns)
feature_names_out = set(selector.get_feature_names_out())
assert feature_names_out < all_feature_names
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
selector.transform(X.iloc[1:3])
|
SelectFromModel works with estimators that do not support feature_names_in_.
Non-regression test for #21949.
|
test_estimator_does_not_support_feature_names
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_from_model_estimator_attribute_error():
"""Check that we raise the proper AttributeError when the estimator
does not implement the `partial_fit` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
# `LinearRegression` does not implement 'partial_fit' and should raise an
# AttributeError
from_model = SelectFromModel(estimator=LinearRegression())
outer_msg = "This 'SelectFromModel' has no attribute 'partial_fit'"
inner_msg = "'LinearRegression' object has no attribute 'partial_fit'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
from_model.fit(data, y).partial_fit(data)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
|
Check that we raise the proper AttributeError when the estimator
does not implement the `partial_fit` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
|
test_from_model_estimator_attribute_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_from_model.py
|
BSD-3-Clause
|
def test_mutual_information_symmetry_classif_regression(correlated, global_random_seed):
"""Check that `mutual_info_classif` and `mutual_info_regression` are
symmetric by switching the target `y` as `feature` in `X` and vice
versa.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23720
"""
rng = np.random.RandomState(global_random_seed)
n = 100
d = rng.randint(10, size=n)
if correlated:
c = d.astype(np.float64)
else:
c = rng.normal(0, 1, size=n)
mi_classif = mutual_info_classif(
c[:, None], d, discrete_features=[False], random_state=global_random_seed
)
mi_regression = mutual_info_regression(
d[:, None], c, discrete_features=[True], random_state=global_random_seed
)
assert mi_classif == pytest.approx(mi_regression)
|
Check that `mutual_info_classif` and `mutual_info_regression` are
symmetric by switching the target `y` as `feature` in `X` and vice
versa.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23720
|
test_mutual_information_symmetry_classif_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_mutual_info.py
|
BSD-3-Clause
|
def test_mutual_info_regression_X_int_dtype(global_random_seed):
"""Check that results agree when X is integer dtype and float dtype.
Non-regression test for Issue #26696.
"""
rng = np.random.RandomState(global_random_seed)
X = rng.randint(100, size=(100, 10))
X_float = X.astype(np.float64, copy=True)
y = rng.randint(100, size=100)
expected = mutual_info_regression(X_float, y, random_state=global_random_seed)
result = mutual_info_regression(X, y, random_state=global_random_seed)
assert_allclose(result, expected)
|
Check that results agree when X is integer dtype and float dtype.
Non-regression test for Issue #26696.
|
test_mutual_info_regression_X_int_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_mutual_info.py
|
BSD-3-Clause
|
def test_mutual_info_n_jobs(global_random_seed, mutual_info_func, data_generator):
"""Check that results are consistent with different `n_jobs`."""
X, y = data_generator(random_state=global_random_seed)
single_job = mutual_info_func(X, y, random_state=global_random_seed, n_jobs=1)
multi_job = mutual_info_func(X, y, random_state=global_random_seed, n_jobs=2)
assert_allclose(single_job, multi_job)
|
Check that results are consistent with different `n_jobs`.
|
test_mutual_info_n_jobs
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_mutual_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_mutual_info.py
|
BSD-3-Clause
|
def test_pipeline_with_nans(ClsRFE):
"""Check that RFE works with pipeline that accept nans.
Non-regression test for gh-21743.
"""
X, y = load_iris(return_X_y=True)
X[0, 0] = np.nan
pipe = make_pipeline(
SimpleImputer(),
StandardScaler(),
LogisticRegression(),
)
fs = ClsRFE(
estimator=pipe,
importance_getter="named_steps.logisticregression.coef_",
)
fs.fit(X, y)
|
Check that RFE works with pipeline that accept nans.
Non-regression test for gh-21743.
|
test_pipeline_with_nans
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_rfe.py
|
BSD-3-Clause
|
def test_rfe_pls(ClsRFE, PLSEstimator):
"""Check the behaviour of RFE with PLS estimators.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12410
"""
X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
estimator = PLSEstimator(n_components=1)
selector = ClsRFE(estimator, step=1).fit(X, y)
assert selector.score(X, y) > 0.5
|
Check the behaviour of RFE with PLS estimators.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12410
|
test_rfe_pls
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_rfe.py
|
BSD-3-Clause
|
def test_rfe_estimator_attribute_error():
"""Check that we raise the proper AttributeError when the estimator
does not implement the `decision_function` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
iris = load_iris()
# `LinearRegression` does not implement 'decision_function' and should raise an
# AttributeError
rfe = RFE(estimator=LinearRegression())
outer_msg = "This 'RFE' has no attribute 'decision_function'"
inner_msg = "'LinearRegression' object has no attribute 'decision_function'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
rfe.fit(iris.data, iris.target).decision_function(iris.data)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
|
Check that we raise the proper AttributeError when the estimator
does not implement the `decision_function` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
|
test_rfe_estimator_attribute_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_rfe.py
|
BSD-3-Clause
|
def test_rfe_n_features_to_select_warning(ClsRFE, param):
"""Check if the correct warning is raised when trying to initialize a RFE
object with a n_features_to_select attribute larger than the number of
features present in the X variable that is passed to the fit method
"""
X, y = make_classification(n_features=20, random_state=0)
with pytest.warns(UserWarning, match=f"{param}=21 > n_features=20"):
# Create RFE/RFECV with n_features_to_select/min_features_to_select
# larger than the number of features present in the X variable
clsrfe = ClsRFE(estimator=LogisticRegression(), **{param: 21})
clsrfe.fit(X, y)
|
Check if the correct warning is raised when trying to initialize a RFE
object with a n_features_to_select attribute larger than the number of
features present in the X variable that is passed to the fit method
|
test_rfe_n_features_to_select_warning
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_rfe.py
|
BSD-3-Clause
|
def test_rfe_with_sample_weight():
"""Test that `RFE` works correctly with sample weights."""
X, y = make_classification(random_state=0)
n_samples = X.shape[0]
# Assign the first half of the samples with twice the weight
sample_weight = np.ones_like(y)
sample_weight[: n_samples // 2] = 2
# Duplicate the first half of the data samples to replicate the effect
# of sample weights for comparison
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
estimator = SVC(kernel="linear")
rfe_sw = RFE(estimator=estimator, step=0.1)
rfe_sw.fit(X, y, sample_weight=sample_weight)
rfe = RFE(estimator=estimator, step=0.1)
rfe.fit(X2, y2)
assert_array_equal(rfe_sw.ranking_, rfe.ranking_)
# Also verify that when sample weights are not doubled the results
# are different from the duplicated data
rfe_sw_2 = RFE(estimator=estimator, step=0.1)
sample_weight_2 = np.ones_like(y)
rfe_sw_2.fit(X, y, sample_weight=sample_weight_2)
assert not np.array_equal(rfe_sw_2.ranking_, rfe.ranking_)
|
Test that `RFE` works correctly with sample weights.
|
test_rfe_with_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_rfe.py
|
BSD-3-Clause
|
def test_results_per_cv_in_rfecv(global_random_seed):
"""
Test that the results of RFECV are consistent across the different folds
in terms of length of the arrays.
"""
X, y = make_classification(random_state=global_random_seed)
clf = LogisticRegression()
rfecv = RFECV(
estimator=clf,
n_jobs=2,
cv=5,
)
rfecv.fit(X, y)
assert len(rfecv.cv_results_["split1_test_score"]) == len(
rfecv.cv_results_["split2_test_score"]
)
assert len(rfecv.cv_results_["split1_support"]) == len(
rfecv.cv_results_["split2_support"]
)
assert len(rfecv.cv_results_["split1_ranking"]) == len(
rfecv.cv_results_["split2_ranking"]
)
|
Test that the results of RFECV are consistent across the different folds
in terms of length of the arrays.
|
test_results_per_cv_in_rfecv
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_rfe.py
|
BSD-3-Clause
|
def test_n_features_to_select_auto(direction):
"""Check the behaviour of `n_features_to_select="auto"` with different
values for the parameter `tol`.
"""
n_features = 10
tol = 1e-3
X, y = make_regression(n_features=n_features, random_state=0)
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select="auto",
tol=tol,
direction=direction,
cv=2,
)
sfs.fit(X, y)
max_features_to_select = n_features - 1
assert sfs.get_support(indices=True).shape[0] <= max_features_to_select
assert sfs.n_features_to_select_ <= max_features_to_select
assert sfs.transform(X).shape[1] <= max_features_to_select
assert sfs.get_support(indices=True).shape[0] == sfs.n_features_to_select_
|
Check the behaviour of `n_features_to_select="auto"` with different
values for the parameter `tol`.
|
test_n_features_to_select_auto
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_sequential.py
|
BSD-3-Clause
|
def test_n_features_to_select_stopping_criterion(direction):
"""Check the behaviour stopping criterion for feature selection
depending on the values of `n_features_to_select` and `tol`.
When `direction` is `'forward'`, select a new features at random
among those not currently selected in selector.support_,
build a new version of the data that includes all the features
in selector.support_ + this newly selected feature.
And check that the cross-validation score of the model trained on
this new dataset variant is lower than the model with
the selected forward selected features or at least does not improve
by more than the tol margin.
When `direction` is `'backward'`, instead of adding a new feature
to selector.support_, try to remove one of those selected features at random
And check that the cross-validation score is either decreasing or
not improving by more than the tol margin.
"""
X, y = make_regression(n_features=50, n_informative=10, random_state=0)
tol = 1e-3
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select="auto",
tol=tol,
direction=direction,
cv=2,
)
sfs.fit(X, y)
selected_X = sfs.transform(X)
rng = np.random.RandomState(0)
added_candidates = list(set(range(X.shape[1])) - set(sfs.get_support(indices=True)))
added_X = np.hstack(
[
selected_X,
(X[:, rng.choice(added_candidates)])[:, np.newaxis],
]
)
removed_candidate = rng.choice(list(range(sfs.n_features_to_select_)))
removed_X = np.delete(selected_X, removed_candidate, axis=1)
plain_cv_score = cross_val_score(LinearRegression(), X, y, cv=2).mean()
sfs_cv_score = cross_val_score(LinearRegression(), selected_X, y, cv=2).mean()
added_cv_score = cross_val_score(LinearRegression(), added_X, y, cv=2).mean()
removed_cv_score = cross_val_score(LinearRegression(), removed_X, y, cv=2).mean()
assert sfs_cv_score >= plain_cv_score
if direction == "forward":
assert (sfs_cv_score - added_cv_score) <= tol
assert (sfs_cv_score - removed_cv_score) >= tol
else:
assert (added_cv_score - sfs_cv_score) <= tol
assert (removed_cv_score - sfs_cv_score) <= tol
|
Check the behaviour stopping criterion for feature selection
depending on the values of `n_features_to_select` and `tol`.
When `direction` is `'forward'`, select a new features at random
among those not currently selected in selector.support_,
build a new version of the data that includes all the features
in selector.support_ + this newly selected feature.
And check that the cross-validation score of the model trained on
this new dataset variant is lower than the model with
the selected forward selected features or at least does not improve
by more than the tol margin.
When `direction` is `'backward'`, instead of adding a new feature
to selector.support_, try to remove one of those selected features at random
And check that the cross-validation score is either decreasing or
not improving by more than the tol margin.
|
test_n_features_to_select_stopping_criterion
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_sequential.py
|
BSD-3-Clause
|
def test_forward_neg_tol_error():
"""Check that we raise an error when tol<0 and direction='forward'"""
X, y = make_regression(n_features=10, random_state=0)
sfs = SequentialFeatureSelector(
LinearRegression(),
n_features_to_select="auto",
direction="forward",
tol=-1e-3,
)
with pytest.raises(ValueError, match="tol must be strictly positive"):
sfs.fit(X, y)
|
Check that we raise an error when tol<0 and direction='forward'
|
test_forward_neg_tol_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_sequential.py
|
BSD-3-Clause
|
def test_backward_neg_tol():
"""Check that SequentialFeatureSelector works negative tol
non-regression test for #25525
"""
X, y = make_regression(n_features=10, random_state=0)
lr = LinearRegression()
initial_score = lr.fit(X, y).score(X, y)
sfs = SequentialFeatureSelector(
lr,
n_features_to_select="auto",
direction="backward",
tol=-1e-3,
)
Xr = sfs.fit_transform(X, y)
new_score = lr.fit(Xr, y).score(Xr, y)
assert 0 < sfs.get_support().sum() < X.shape[1]
assert new_score < initial_score
|
Check that SequentialFeatureSelector works negative tol
non-regression test for #25525
|
test_backward_neg_tol
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_sequential.py
|
BSD-3-Clause
|
def test_cv_generator_support():
"""Check that no exception raised when cv is generator
non-regression test for #25957
"""
X, y = make_classification(random_state=0)
groups = np.zeros_like(y, dtype=int)
groups[y.size // 2 :] = 1
cv = LeaveOneGroupOut()
splits = cv.split(X, y, groups=groups)
knc = KNeighborsClassifier(n_neighbors=5)
sfs = SequentialFeatureSelector(knc, n_features_to_select=5, cv=splits)
sfs.fit(X, y)
|
Check that no exception raised when cv is generator
non-regression test for #25957
|
test_cv_generator_support
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/tests/test_sequential.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/tests/test_sequential.py
|
BSD-3-Clause
|
def _estimator_has(attr):
"""Check that final_estimator has `attr`.
Used together with `available_if`.
"""
def check(self):
# raise original `AttributeError` if `attr` does not exist
getattr(self.estimator, attr)
return True
return check
|
Check that final_estimator has `attr`.
Used together with `available_if`.
|
_estimator_has
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/_frozen.py
|
BSD-3-Clause
|
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
The only valid key here is `estimator`. You cannot set the parameters of the
inner estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : FrozenEstimator
This estimator.
"""
estimator = kwargs.pop("estimator", None)
if estimator is not None:
self.estimator = estimator
if kwargs:
raise ValueError(
"You cannot set parameters of the inner estimator in a frozen "
"estimator since calling `fit` has no effect. You can use "
"`frozenestimator.estimator.set_params` to set parameters of the inner "
"estimator."
)
|
Set the parameters of this estimator.
The only valid key here is `estimator`. You cannot set the parameters of the
inner estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : FrozenEstimator
This estimator.
|
set_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/_frozen.py
|
BSD-3-Clause
|
def test_frozen_methods(estimator, dataset, request, method):
"""Test that frozen.fit doesn't do anything, and that all other methods are
exposed by the frozen estimator and return the same values as the estimator.
"""
X, y = request.getfixturevalue(dataset)
set_random_state(estimator)
estimator.fit(X, y)
frozen = FrozenEstimator(estimator)
# this should be no-op
frozen.fit([[1]], [1])
if hasattr(estimator, method):
assert_array_equal(getattr(estimator, method)(X), getattr(frozen, method)(X))
assert is_classifier(estimator) == is_classifier(frozen)
assert is_regressor(estimator) == is_regressor(frozen)
assert is_clusterer(estimator) == is_clusterer(frozen)
assert is_outlier_detector(estimator) == is_outlier_detector(frozen)
|
Test that frozen.fit doesn't do anything, and that all other methods are
exposed by the frozen estimator and return the same values as the estimator.
|
test_frozen_methods
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def test_frozen_metadata_routing(regression_dataset):
"""Test that metadata routing works with frozen estimators."""
class ConsumesMetadata(BaseEstimator):
def __init__(self, on_fit=None, on_predict=None):
self.on_fit = on_fit
self.on_predict = on_predict
def fit(self, X, y, metadata=None):
if self.on_fit:
assert metadata is not None
self.fitted_ = True
return self
def predict(self, X, metadata=None):
if self.on_predict:
assert metadata is not None
return np.ones(len(X))
X, y = regression_dataset
pipeline = make_pipeline(
ConsumesMetadata(on_fit=True, on_predict=True)
.set_fit_request(metadata=True)
.set_predict_request(metadata=True)
)
pipeline.fit(X, y, metadata="test")
frozen = FrozenEstimator(pipeline)
pipeline.predict(X, metadata="test")
frozen.predict(X, metadata="test")
frozen["consumesmetadata"].set_predict_request(metadata=False)
with pytest.raises(
TypeError,
match=re.escape(
"Pipeline.predict got unexpected argument(s) {'metadata'}, which are not "
"routed to any object."
),
):
frozen.predict(X, metadata="test")
frozen["consumesmetadata"].set_predict_request(metadata=None)
with pytest.raises(UnsetMetadataPassedError):
frozen.predict(X, metadata="test")
|
Test that metadata routing works with frozen estimators.
|
test_frozen_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def test_composite_fit(classification_dataset):
"""Test that calling fit_transform and fit_predict doesn't call fit."""
class Estimator(BaseEstimator):
def fit(self, X, y):
try:
self._fit_counter += 1
except AttributeError:
self._fit_counter = 1
return self
def fit_transform(self, X, y=None):
# only here to test that it doesn't get called
... # pragma: no cover
def fit_predict(self, X, y=None):
# only here to test that it doesn't get called
... # pragma: no cover
X, y = classification_dataset
est = Estimator().fit(X, y)
frozen = FrozenEstimator(est)
with pytest.raises(AttributeError):
frozen.fit_predict(X, y)
with pytest.raises(AttributeError):
frozen.fit_transform(X, y)
assert frozen._fit_counter == 1
|
Test that calling fit_transform and fit_predict doesn't call fit.
|
test_composite_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def test_clone_frozen(regression_dataset):
"""Test that cloning a frozen estimator keeps the frozen state."""
X, y = regression_dataset
estimator = LinearRegression().fit(X, y)
frozen = FrozenEstimator(estimator)
cloned = clone(frozen)
assert cloned.estimator is estimator
|
Test that cloning a frozen estimator keeps the frozen state.
|
test_clone_frozen
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def test_check_is_fitted(regression_dataset):
"""Test that check_is_fitted works on frozen estimators."""
X, y = regression_dataset
estimator = LinearRegression()
frozen = FrozenEstimator(estimator)
with pytest.raises(NotFittedError):
check_is_fitted(frozen)
estimator = LinearRegression().fit(X, y)
frozen = FrozenEstimator(estimator)
check_is_fitted(frozen)
|
Test that check_is_fitted works on frozen estimators.
|
test_check_is_fitted
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def test_frozen_tags():
"""Test that frozen estimators have the same tags as the original estimator
except for the skip_test tag."""
class Estimator(BaseEstimator):
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.categorical = True
return tags
estimator = Estimator()
frozen = FrozenEstimator(estimator)
frozen_tags = frozen.__sklearn_tags__()
estimator_tags = estimator.__sklearn_tags__()
assert frozen_tags._skip_test is True
assert estimator_tags._skip_test is False
assert estimator_tags.input_tags.categorical is True
assert frozen_tags.input_tags.categorical is True
|
Test that frozen estimators have the same tags as the original estimator
except for the skip_test tag.
|
test_frozen_tags
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def test_frozen_params():
"""Test that FrozenEstimator only exposes the estimator parameter."""
est = LogisticRegression()
frozen = FrozenEstimator(est)
with pytest.raises(ValueError, match="You cannot set parameters of the inner"):
frozen.set_params(estimator__C=1)
assert frozen.get_params() == {"estimator": est}
other_est = LocalOutlierFactor()
frozen.set_params(estimator=other_est)
assert frozen.get_params() == {"estimator": other_est}
|
Test that FrozenEstimator only exposes the estimator parameter.
|
test_frozen_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/frozen/tests/test_frozen.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/frozen/tests/test_frozen.py
|
BSD-3-Clause
|
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self":
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError(
"scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention." % (cls,)
)
for arg in args:
params[arg] = getattr(self, arg)
return params
|
Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
|
get_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in params.items():
split = key.split("__", 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError(
"Invalid parameter %s for kernel %s. "
"Check the list of available parameters "
"with `kernel.get_params().keys()`." % (name, self)
)
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError(
"Invalid parameter %s for kernel %s. "
"Check the list of available parameters "
"with `kernel.get_params().keys()`."
% (key, self.__class__.__name__)
)
setattr(self, key, value)
return self
|
Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
|
set_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : ndarray of shape (n_dims,)
The hyperparameters
"""
cloned = clone(self)
cloned.theta = theta
return cloned
|
Returns a clone of self with given hyperparameters theta.
Parameters
----------
theta : ndarray of shape (n_dims,)
The hyperparameters
|
clone_with_theta
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = [
getattr(self, attr)
for attr in dir(self)
if attr.startswith("hyperparameter_")
]
return r
|
Returns a list of all hyperparameter specifications.
|
hyperparameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
params = self.get_params()
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(params[hyperparameter.name])
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
|
Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
|
theta
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
params = self.get_params()
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
params[hyperparameter.name] = np.exp(
theta[i : i + hyperparameter.n_elements]
)
i += hyperparameter.n_elements
else:
params[hyperparameter.name] = np.exp(theta[i])
i += 1
if i != len(theta):
raise ValueError(
"theta has not the correct number of entries."
" Should be %d; given are %d" % (i, len(theta))
)
self.set_params(**params)
|
Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
|
theta
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = [
hyperparameter.bounds
for hyperparameter in self.hyperparameters
if not hyperparameter.fixed
]
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
|
Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
|
bounds
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples,)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
|
Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples,)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
|
diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def _check_bounds_params(self):
"""Called after fitting to warn if bounds may have been too tight."""
list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T)
idx = 0
for hyp in self.hyperparameters:
if hyp.fixed:
continue
for dim in range(hyp.n_elements):
if list_close[idx, 0]:
warnings.warn(
"The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified lower "
"bound %s. Decreasing the bound and"
" calling fit again may find a "
"better value." % (dim, hyp.name, hyp.bounds[dim][0]),
ConvergenceWarning,
)
elif list_close[idx, 1]:
warnings.warn(
"The optimal value found for "
"dimension %s of parameter %s is "
"close to the specified upper "
"bound %s. Increasing the bound and"
" calling fit again may find a "
"better value." % (dim, hyp.name, hyp.bounds[dim][1]),
ConvergenceWarning,
)
idx += 1
|
Called after fitting to warn if bounds may have been too tight.
|
_check_bounds_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims : (i + 1) * k_dims]
|
Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
|
theta
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of the
kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : ndarray of shape \
(n_samples_X, n_samples_X, n_dims, n_kernels), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels])
|
Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object, default=None
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of the
kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims, n_kernels), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(("k1__" + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(("k2__" + k, val) for k, val in deep_items)
return params
|
Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
|
get_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
|
Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : ndarray of shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
|
theta
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
|
Returns the log-transformed bounds on the theta.
Returns
-------
bounds : ndarray of shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
|
bounds
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack(
(K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])
)
else:
return self.k1(X, Y) * self.k2(X, Y)
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object, default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(("kernel__" + k, val) for k, val in deep_items)
return params
|
Get parameters of this kernel.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
|
get_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K**self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K**self.exponent
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_Y, n_features) or list of object, default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, \
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.full(
(_num_samples(X), _num_samples(Y)),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
)
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (
K,
np.full(
(_num_samples(X), _num_samples(X), 1),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
),
)
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(
_num_samples(X),
self.constant_value,
dtype=np.array(self.constant_value).dtype,
)
|
Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
|
diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object,\
default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(_num_samples(X))
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (
K,
self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis],
)
else:
return K, np.empty((_num_samples(X), _num_samples(X), 0))
else:
return K
else:
return np.zeros((_num_samples(X), _num_samples(Y)))
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Left argument of the returned kernel k(X, Y)
Y : array-like of shape (n_samples_X, n_features) or list of object, default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
is evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.full(
_num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype
)
|
Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Argument to the kernel.
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
|
diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean")
K = np.exp(-0.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or length_scale.shape[0] == 1:
K_gradient = (K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (
length_scale**2
)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
length_scale = _check_length_scale(X, self.length_scale)
if Y is None:
dists = pdist(X / length_scale, metric="euclidean")
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X / length_scale, Y / length_scale, metric="euclidean")
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1.0 + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1.0 + K + K**2 / 3.0) * np.exp(-K)
elif self.nu == np.inf:
K = np.exp(-(dists**2) / 2.0)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = math.sqrt(2 * self.nu) * K
K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu))
K *= tmp**self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (length_scale**2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis]
divide_result = np.zeros_like(D)
np.divide(
D,
denominator,
out=divide_result,
where=denominator != 0,
)
K_gradient = K[..., np.newaxis] * divide_result
elif self.nu == 1.5:
K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
elif self.nu == np.inf:
K_gradient = D * K[..., np.newaxis]
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if len(np.atleast_1d(self.length_scale)) > 1:
raise AttributeError(
"RationalQuadratic kernel only supports isotropic version, "
"please use a single scalar for length_scale"
)
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric="sqeuclidean"))
tmp = dists / (2 * self.alpha * self.length_scale**2)
base = 1 + tmp
K = base**-self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric="sqeuclidean")
K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = dists * K / (self.length_scale**2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = K * (
-self.alpha * np.log(base)
+ dists / (2 * self.length_scale**2 * base)
)
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric="euclidean"))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric="euclidean")
K = np.exp(
-2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2
)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = (
4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K
)
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0**2
else:
if eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0**2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0**2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\
optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
"""
pairwise_kernels_kwargs = self.pairwise_kernels_kwargs
if self.pairwise_kernels_kwargs is None:
pairwise_kernels_kwargs = {}
X = np.atleast_2d(X)
K = pairwise_kernels(
X,
Y,
metric=self.metric,
gamma=self.gamma,
filter_params=True,
**pairwise_kernels_kwargs,
)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X,
Y,
metric=self.metric,
gamma=np.exp(gamma),
filter_params=True,
**pairwise_kernels_kwargs,
)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
|
Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : ndarray of shape (n_samples_Y, n_features), default=None
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool, default=False
Determines whether the gradient with respect to the log of
the kernel hyperparameter is computed.
Only supported when Y is None.
Returns
-------
K : ndarray of shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional
The gradient of the kernel k(X, X) with respect to the log of the
hyperparameter of the kernel. Only returned when `eval_gradient`
is True.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X).ravel()
|
Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : ndarray of shape (n_samples_X,)
Diagonal of kernel k(X, X)
|
diag
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/kernels.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/kernels.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.