code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError(
"%s supports only binary classification. y contains classes %s"
% (self.__class__.__name__, self.classes_)
)
elif self.classes_.size == 1:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class".format(
self.__class__.__name__, self.classes_.size
)
)
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta
)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
K, return_temporaries=True
)
return self
|
Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : returns an instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self)
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
|
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self)
# Compute the mean and variance of the latent function
# (Lines 4-6 of Algorithm 3.2 of GPML)
latent_mean, latent_var = self.latent_mean_and_variance(X)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * latent_var)
gamma = LAMBDAS * latent_mean
integrals = (
np.sqrt(np.pi / alpha)
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2)))
/ (2 * np.sqrt(latent_var * 2 * np.pi))
)
pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
|
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = (
-0.5
* (np.diag(K) - np.einsum("ij, ij -> j", C, C))
* (pi * (1 - pi) * (1 - 2 * pi))
) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
|
Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
|
log_marginal_likelihood
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def latent_mean_and_variance(self, X):
"""Compute the mean and variance of the latent function values.
Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.
Note that this function is only supported for binary classification.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
latent_mean : array-like of shape (n_samples,)
Mean of the latent function values at the query points.
latent_var : array-like of shape (n_samples,)
Variance of the latent function values at the query points.
"""
check_is_fitted(self)
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
latent_mean = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
latent_var = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
return latent_mean, latent_var
|
Compute the mean and variance of the latent function values.
Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.
Note that this function is only supported for binary classification.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
latent_mean : array-like of shape (n_samples,)
Mean of the latent function values at the query points.
latent_var : array-like of shape (n_samples,)
Variance of the latent function values at the query points.
|
latent_mean_and_variance
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if (
self.warm_start
and hasattr(self, "f_cached")
and self.f_cached.shape == self.y_train_.shape
):
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = expit(f)
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = (
-0.5 * a.T.dot(f)
- np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
- np.log(np.diag(L)).sum()
)
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
|
Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
|
_posterior_mode
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : object
Returns an instance of self.
"""
if isinstance(self.kernel, CompoundKernel):
raise ValueError("kernel cannot be a CompoundKernel")
if self.kernel is None or self.kernel.requires_vector_input:
X, y = validate_data(
self, X, y, multi_output=False, ensure_2d=True, dtype="numeric"
)
else:
X, y = validate_data(
self, X, y, multi_output=False, ensure_2d=False, dtype=None
)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
kernel=self.kernel,
optimizer=self.optimizer,
n_restarts_optimizer=self.n_restarts_optimizer,
max_iter_predict=self.max_iter_predict,
warm_start=self.warm_start,
copy_X_train=self.copy_X_train,
random_state=self.random_state,
)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError(
"GaussianProcessClassifier requires 2 or more "
"distinct classes; got %d class (only class %s "
"is present)" % (self.n_classes_, self.classes_[0])
)
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = OneVsRestClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = OneVsOneClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
else:
raise ValueError("Unknown multi-class mode %s" % self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[
estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_
]
)
else:
self.log_marginal_likelihood_value_ = (
self.base_estimator_.log_marginal_likelihood()
)
return self
|
Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : object
Returns an instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``.
"""
check_is_fitted(self)
if self.kernel is None or self.kernel.requires_vector_input:
X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict(X)
|
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError(
"one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead."
)
if self.kernel is None or self.kernel.requires_vector_input:
X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict_proba(X)
|
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def kernel_(self):
"""Return the kernel of the base estimator."""
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_ for estimator in self.base_estimator_.estimators_]
)
|
Return the kernel of the base estimator.
|
kernel_
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
check_is_fitted(self)
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient, clone_kernel=clone_kernel
)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC."
)
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[
estimator.log_marginal_likelihood(
theta, clone_kernel=clone_kernel
)
for i, estimator in enumerate(estimators)
]
)
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[
estimator.log_marginal_likelihood(
theta[n_dims * i : n_dims * (i + 1)],
clone_kernel=clone_kernel,
)
for i, estimator in enumerate(estimators)
]
)
else:
raise ValueError(
"Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
)
|
Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
|
log_marginal_likelihood
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def latent_mean_and_variance(self, X):
"""Compute the mean and variance of the latent function.
Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.
Note that this function is only supported for binary classification.
.. versionadded:: 1.7
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
latent_mean : array-like of shape (n_samples,)
Mean of the latent function values at the query points.
latent_var : array-like of shape (n_samples,)
Variance of the latent function values at the query points.
"""
if self.n_classes_ > 2:
raise ValueError(
"Returning the mean and variance of the latent function f "
"is only supported for binary classification, received "
f"{self.n_classes_} classes."
)
check_is_fitted(self)
if self.kernel is None or self.kernel.requires_vector_input:
X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.latent_mean_and_variance(X)
|
Compute the mean and variance of the latent function.
Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.
Note that this function is only supported for binary classification.
.. versionadded:: 1.7
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
latent_mean : array-like of shape (n_samples,)
Mean of the latent function values at the query points.
latent_var : array-like of shape (n_samples,)
Variance of the latent function values at the query points.
|
latent_mean_and_variance
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpc.py
|
BSD-3-Clause
|
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X, y = validate_data(
self,
X,
y,
multi_output=True,
y_numeric=True,
ensure_2d=ensure_2d,
dtype=dtype,
)
n_targets_seen = y.shape[1] if y.ndim > 1 else 1
if self.n_targets is not None and n_targets_seen != self.n_targets:
raise ValueError(
"The number of targets seen in `y` is different from the parameter "
f"`n_targets`. Got {n_targets_seen} != {self.n_targets}."
)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1
self._y_train_mean = np.zeros(shape=shape_y_stats)
self._y_train_std = np.ones(shape=shape_y_stats)
if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError(
"alpha must be a scalar or an array with same number of "
f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})"
)
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
(
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta, clone_kernel=False
)
# Precompute quantities required for predictions which are independent
# of actual query points
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError as exc:
exc.args = (
(
f"The kernel, {self.kernel_}, is not returning a positive "
"definite matrix. Try gradually increasing the 'alpha' "
"parameter of your GaussianProcessRegressor estimator."
),
) + exc.args
raise
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
self.alpha_ = cho_solve(
(self.L_, GPR_CHOLESKY_LOWER),
self.y_train_,
check_finite=False,
)
return self
|
Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpr.py
|
BSD-3-Clause
|
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution at query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or \
(n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution at query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"At most one of return_std or return_cov can be requested."
)
if self.kernel is None or self.kernel.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X = validate_data(self, X, ensure_2d=ensure_2d, dtype=dtype, reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
kernel = self.kernel
n_targets = self.n_targets if self.n_targets is not None else 1
y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze()
if return_cov:
y_cov = kernel(X)
if n_targets > 1:
y_cov = np.repeat(
np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1
)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
if n_targets > 1:
y_var = np.repeat(
np.expand_dims(y_var, -1), repeats=n_targets, axis=-1
)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
# Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans @ self.alpha_
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
# if y_mean has shape (n_samples, 1), reshape to (n_samples,)
if y_mean.ndim > 1 and y_mean.shape[1] == 1:
y_mean = np.squeeze(y_mean, axis=1)
# Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T
V = solve_triangular(
self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False
)
if return_cov:
# Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v
y_cov = self.kernel_(X) - V.T @ V
# undo normalisation
y_cov = np.outer(y_cov, self._y_train_std**2).reshape(*y_cov.shape, -1)
# if y_cov has shape (n_samples, n_samples, 1), reshape to
# (n_samples, n_samples)
if y_cov.shape[2] == 1:
y_cov = np.squeeze(y_cov, axis=2)
return y_mean, y_cov
elif return_std:
# Compute variance of predictive distribution
# Use einsum to avoid explicitly forming the large matrix
# V^T @ V just to extract its diagonal afterward.
y_var = self.kernel_.diag(X).copy()
y_var -= np.einsum("ij,ji->i", V.T, V)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn(
"Predicted variances smaller than 0. "
"Setting those variances to 0."
)
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = np.outer(y_var, self._y_train_std**2).reshape(*y_var.shape, -1)
# if y_var has shape (n_samples, 1), reshape to (n_samples,)
if y_var.shape[1] == 1:
y_var = np.squeeze(y_var, axis=1)
return y_mean, np.sqrt(y_var)
else:
return y_mean
|
Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution at query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or (n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution at query points.
Only returned when `return_cov` is True.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpr.py
|
BSD-3-Clause
|
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point.
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or \
(n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = [
rng.multivariate_normal(
y_mean[:, target], y_cov[..., target], n_samples
).T[:, np.newaxis]
for target in range(y_mean.shape[1])
]
y_samples = np.hstack(y_samples)
return y_samples
|
Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point.
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or (n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
|
sample_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpr.py
|
BSD-3-Clause
|
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False)
# Alg 2.1, page 19, line 7
# -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi)
# y is originally thought to be a (1, n_samples) row vector. However,
# in multioutputs, y is of shape (n_samples, 2) and we need to compute
# y^T . alpha for each output, independently using einsum. Thus, it
# is equivalent to:
# for output_idx in range(n_outputs):
# log_likelihood_dims[output_idx] = (
# y_train[:, [output_idx]] @ alpha[:, [output_idx]]
# )
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
# the log likelihood is sum-up across the outputs
log_likelihood = log_likelihood_dims.sum(axis=-1)
if eval_gradient:
# Eq. 5.9, p. 114, and footnote 5 in p. 114
# 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient)
# alpha is supposed to be a vector of (n_samples,) elements. With
# multioutputs, alpha is a matrix of size (n_samples, n_outputs).
# Therefore, we want to construct a matrix of
# (n_samples, n_samples, n_outputs) equivalent to
# for output_idx in range(n_outputs):
# output_alpha = alpha[:, [output_idx]]
# inner_term[..., output_idx] = output_alpha @ output_alpha.T
inner_term = np.einsum("ik,jk->ijk", alpha, alpha)
# compute K^-1 of shape (n_samples, n_samples)
K_inv = cho_solve(
(L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False
)
# create a new axis to use broadcasting between inner_term and
# K_inv
inner_term -= K_inv[..., np.newaxis]
# Since we are interested about the trace of
# inner_term @ K_gradient, we don't explicitly compute the
# matrix-by-matrix operation and instead use an einsum. Therefore
# it is equivalent to:
# for param_idx in range(n_kernel_params):
# for output_idx in range(n_output):
# log_likehood_gradient_dims[param_idx, output_idx] = (
# inner_term[..., output_idx] @
# K_gradient[..., param_idx]
# )
log_likelihood_gradient_dims = 0.5 * np.einsum(
"ijl,jik->kl", inner_term, K_gradient
)
# the log likelihood gradient is the sum-up across the outputs
log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
|
Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
|
log_marginal_likelihood
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/_gpr.py
|
BSD-3-Clause
|
def test_gpc_fit_error(params, error_type, err_msg):
"""Check that expected error are raised during fit."""
gpc = GaussianProcessClassifier(**params)
with pytest.raises(error_type, match=err_msg):
gpc.fit(X, y)
|
Check that expected error are raised during fit.
|
test_gpc_fit_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpc.py
|
BSD-3-Clause
|
def test_gpc_latent_mean_and_variance_shape(kernel):
"""Checks that the latent mean and variance have the right shape."""
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y)
# Check that the latent mean and variance have the right shape
latent_mean, latent_variance = gpc.latent_mean_and_variance(X)
assert latent_mean.shape == (X.shape[0],)
assert latent_variance.shape == (X.shape[0],)
|
Checks that the latent mean and variance have the right shape.
|
test_gpc_latent_mean_and_variance_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpc.py
|
BSD-3-Clause
|
def test_gpc_latent_mean_and_variance_complain_on_more_than_2_classes():
"""Checks that the latent mean and variance have the right shape."""
gpc = GaussianProcessClassifier(kernel=RBF())
gpc.fit(X, y_mc)
# Check that the latent mean and variance have the right shape
with pytest.raises(
ValueError,
match="Returning the mean and variance of the latent function f "
"is only supported for binary classification",
):
gpc.latent_mean_and_variance(X)
|
Checks that the latent mean and variance have the right shape.
|
test_gpc_latent_mean_and_variance_complain_on_more_than_2_classes
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpc.py
|
BSD-3-Clause
|
def test_y_normalization(kernel):
"""
Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results. Note that, here,
'normalized y' refers to y that has been made zero mean and unit
variance.
"""
y_mean = np.mean(y)
y_std = np.std(y)
y_norm = (y - y_mean) / y_std
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_pred * y_std + y_mean
y_pred_std = y_pred_std * y_std
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
y_cov = y_cov * y_std**2
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
|
Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results. Note that, here,
'normalized y' refers to y that has been made zero mean and unit
variance.
|
test_y_normalization
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_large_variance_y():
"""
Here we test that, when noramlize_y=True, our GP can produce a
sensible fit to training data whose variance is significantly
larger than unity. This test was made in response to issue #15612.
GP predictions are verified against predictions that were made
using GPy which, here, is treated as the 'gold standard'. Note that we
only investigate the RBF kernel here, as that is what was used in the
GPy implementation.
The following code can be used to recreate the GPy data:
--------------------------------------------------------------------------
import GPy
kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
gpy.optimize()
y_pred_gpy, y_var_gpy = gpy.predict(X2)
y_pred_std_gpy = np.sqrt(y_var_gpy)
--------------------------------------------------------------------------
"""
# Here we utilise a larger variance version of the training data
y_large = 10 * y
# Standard GP with normalize_y=True
RBF_params = {"length_scale": 1.0}
kernel = RBF(**RBF_params)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_large)
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
# 'Gold standard' mean predictions from GPy
y_pred_gpy = np.array(
[15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589]
)
# 'Gold standard' std predictions from GPy
y_pred_std_gpy = np.array(
[7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042]
)
# Based on numerical experiments, it's reasonable to expect our
# GP's mean predictions to get within 7% of predictions of those
# made by GPy.
assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
# Based on numerical experiments, it's reasonable to expect our
# GP's std predictions to get within 15% of predictions of those
# made by GPy.
assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
|
Here we test that, when noramlize_y=True, our GP can produce a
sensible fit to training data whose variance is significantly
larger than unity. This test was made in response to issue #15612.
GP predictions are verified against predictions that were made
using GPy which, here, is treated as the 'gold standard'. Note that we
only investigate the RBF kernel here, as that is what was used in the
GPy implementation.
The following code can be used to recreate the GPy data:
--------------------------------------------------------------------------
import GPy
kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
gpy.optimize()
y_pred_gpy, y_var_gpy = gpy.predict(X2)
y_pred_std_gpy = np.sqrt(y_var_gpy)
--------------------------------------------------------------------------
|
test_large_variance_y
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_constant_target(kernel):
"""Check that the std. dev. is affected to 1 when normalizing a constant
feature.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18318
NaN where affected to the target when scaling due to null std. dev. with
constant target.
"""
y_constant = np.ones(X.shape[0], dtype=np.float64)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_constant)
assert gpr._y_train_std == pytest.approx(1.0)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_allclose(y_pred, y_constant)
# set atol because we compare to zero
assert_allclose(np.diag(y_cov), 0.0, atol=1e-9)
# Test multi-target data
n_samples, n_targets = X.shape[0], 2
rng = np.random.RandomState(0)
y = np.concatenate(
[
rng.normal(size=(n_samples, 1)), # non-constant target
np.full(shape=(n_samples, 1), fill_value=2), # constant target
],
axis=1,
)
gpr.fit(X, y)
Y_pred, Y_cov = gpr.predict(X, return_cov=True)
assert_allclose(Y_pred[:, 1], 2)
assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9)
assert Y_pred.shape == (n_samples, n_targets)
assert Y_cov.shape == (n_samples, n_samples, n_targets)
|
Check that the std. dev. is affected to 1 when normalizing a constant
feature.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18318
NaN where affected to the target when scaling due to null std. dev. with
constant target.
|
test_constant_target
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_gpr_consistency_std_cov_non_invertible_kernel():
"""Check the consistency between the returned std. dev. and the covariance.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19936
Inconsistencies were observed when the kernel cannot be inverted (or
numerically stable).
"""
kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF(
[5.91326520e02, 1.32584051e03], (1e-12, 1e12)
) + WhiteKernel(noise_level=1e-5)
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None)
X_train = np.array(
[
[0.0, 0.0],
[1.54919334, -0.77459667],
[-1.54919334, 0.0],
[0.0, -1.54919334],
[0.77459667, 0.77459667],
[-0.77459667, 1.54919334],
]
)
y_train = np.array(
[
[-2.14882017e-10],
[-4.66975823e00],
[4.01823986e00],
[-1.30303674e00],
[-1.35760156e00],
[3.31215668e00],
]
)
gpr.fit(X_train, y_train)
X_test = np.array(
[
[-1.93649167, -1.93649167],
[1.93649167, -1.93649167],
[-1.93649167, 1.93649167],
[1.93649167, 1.93649167],
]
)
pred1, std = gpr.predict(X_test, return_std=True)
pred2, cov = gpr.predict(X_test, return_cov=True)
assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5)
|
Check the consistency between the returned std. dev. and the covariance.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19936
Inconsistencies were observed when the kernel cannot be inverted (or
numerically stable).
|
test_gpr_consistency_std_cov_non_invertible_kernel
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_gpr_fit_error(params, TypeError, err_msg):
"""Check that expected error are raised during fit."""
gpr = GaussianProcessRegressor(**params)
with pytest.raises(TypeError, match=err_msg):
gpr.fit(X, y)
|
Check that expected error are raised during fit.
|
test_gpr_fit_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_gpr_lml_error():
"""Check that we raise the proper error in the LML method."""
gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
err_msg = "Gradient can only be evaluated for theta!=None"
with pytest.raises(ValueError, match=err_msg):
gpr.log_marginal_likelihood(eval_gradient=True)
|
Check that we raise the proper error in the LML method.
|
test_gpr_lml_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_gpr_predict_error():
"""Check that we raise the proper error during predict."""
gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
err_msg = "At most one of return_std or return_cov can be requested."
with pytest.raises(RuntimeError, match=err_msg):
gpr.predict(X, return_cov=True, return_std=True)
|
Check that we raise the proper error during predict.
|
test_gpr_predict_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_predict_shapes(normalize_y, n_targets):
"""Check the shapes of y_mean, y_std, and y_cov in single-output
(n_targets=None) and multi-output settings, including the edge case when
n_targets=1, where the sklearn convention is to squeeze the predictions.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/17394
https://github.com/scikit-learn/scikit-learn/issues/18065
https://github.com/scikit-learn/scikit-learn/issues/22174
"""
rng = np.random.RandomState(1234)
n_features, n_samples_train, n_samples_test = 6, 9, 7
y_train_shape = (n_samples_train,)
if n_targets is not None:
y_train_shape = y_train_shape + (n_targets,)
# By convention single-output data is squeezed upon prediction
y_test_shape = (n_samples_test,)
if n_targets is not None and n_targets > 1:
y_test_shape = y_test_shape + (n_targets,)
X_train = rng.randn(n_samples_train, n_features)
X_test = rng.randn(n_samples_test, n_features)
y_train = rng.randn(*y_train_shape)
model = GaussianProcessRegressor(normalize_y=normalize_y)
model.fit(X_train, y_train)
y_pred, y_std = model.predict(X_test, return_std=True)
_, y_cov = model.predict(X_test, return_cov=True)
assert y_pred.shape == y_test_shape
assert y_std.shape == y_test_shape
assert y_cov.shape == (n_samples_test,) + y_test_shape
|
Check the shapes of y_mean, y_std, and y_cov in single-output
(n_targets=None) and multi-output settings, including the edge case when
n_targets=1, where the sklearn convention is to squeeze the predictions.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/17394
https://github.com/scikit-learn/scikit-learn/issues/18065
https://github.com/scikit-learn/scikit-learn/issues/22174
|
test_predict_shapes
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_sample_y_shapes(normalize_y, n_targets):
"""Check the shapes of y_samples in single-output (n_targets=0) and
multi-output settings, including the edge case when n_targets=1, where the
sklearn convention is to squeeze the predictions.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/22175
"""
rng = np.random.RandomState(1234)
n_features, n_samples_train = 6, 9
# Number of spatial locations to predict at
n_samples_X_test = 7
# Number of sample predictions per test point
n_samples_y_test = 5
y_train_shape = (n_samples_train,)
if n_targets is not None:
y_train_shape = y_train_shape + (n_targets,)
# By convention single-output data is squeezed upon prediction
if n_targets is not None and n_targets > 1:
y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test)
else:
y_test_shape = (n_samples_X_test, n_samples_y_test)
X_train = rng.randn(n_samples_train, n_features)
X_test = rng.randn(n_samples_X_test, n_features)
y_train = rng.randn(*y_train_shape)
model = GaussianProcessRegressor(normalize_y=normalize_y)
# FIXME: before fitting, the estimator does not have information regarding
# the number of targets and default to 1. This is inconsistent with the shape
# provided after `fit`. This assert should be made once the following issue
# is fixed:
# https://github.com/scikit-learn/scikit-learn/issues/22430
# y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
# assert y_samples.shape == y_test_shape
model.fit(X_train, y_train)
y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
assert y_samples.shape == y_test_shape
|
Check the shapes of y_samples in single-output (n_targets=0) and
multi-output settings, including the edge case when n_targets=1, where the
sklearn convention is to squeeze the predictions.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/22175
|
test_sample_y_shapes
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_sample_y_shape_with_prior(n_targets, n_samples):
"""Check the output shape of `sample_y` is consistent before and after `fit`."""
rng = np.random.RandomState(1024)
X = rng.randn(10, 3)
y = rng.randn(10, n_targets if n_targets is not None else 1)
model = GaussianProcessRegressor(n_targets=n_targets)
shape_before_fit = model.sample_y(X, n_samples=n_samples).shape
model.fit(X, y)
shape_after_fit = model.sample_y(X, n_samples=n_samples).shape
assert shape_before_fit == shape_after_fit
|
Check the output shape of `sample_y` is consistent before and after `fit`.
|
test_sample_y_shape_with_prior
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_predict_shape_with_prior(n_targets):
"""Check the output shape of `predict` with prior distribution."""
rng = np.random.RandomState(1024)
n_sample = 10
X = rng.randn(n_sample, 3)
y = rng.randn(n_sample, n_targets if n_targets is not None else 1)
model = GaussianProcessRegressor(n_targets=n_targets)
mean_prior, cov_prior = model.predict(X, return_cov=True)
_, std_prior = model.predict(X, return_std=True)
model.fit(X, y)
mean_post, cov_post = model.predict(X, return_cov=True)
_, std_post = model.predict(X, return_std=True)
assert mean_prior.shape == mean_post.shape
assert cov_prior.shape == cov_post.shape
assert std_prior.shape == std_post.shape
|
Check the output shape of `predict` with prior distribution.
|
test_predict_shape_with_prior
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_n_targets_error():
"""Check that an error is raised when the number of targets seen at fit is
inconsistent with n_targets.
"""
rng = np.random.RandomState(0)
X = rng.randn(10, 3)
y = rng.randn(10, 2)
model = GaussianProcessRegressor(n_targets=1)
with pytest.raises(ValueError, match="The number of targets seen in `y`"):
model.fit(X, y)
|
Check that an error is raised when the number of targets seen at fit is
inconsistent with n_targets.
|
test_n_targets_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def test_gpr_predict_input_not_modified():
"""
Check that the input X is not modified by the predict method of the
GaussianProcessRegressor when setting return_std=True.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24340
"""
gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y)
X2_copy = np.copy(X2)
_, _ = gpr.predict(X2, return_std=True)
assert_allclose(X2, X2_copy)
|
Check that the input X is not modified by the predict method of the
GaussianProcessRegressor when setting return_std=True.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24340
|
test_gpr_predict_input_not_modified
|
python
|
scikit-learn/scikit-learn
|
sklearn/gaussian_process/tests/test_gpr.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/gaussian_process/tests/test_gpr.py
|
BSD-3-Clause
|
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
if array.dtype == object:
# scipy.stats.mode is slow with object dtype array.
# Python Counter is more efficient
counter = Counter(array)
most_frequent_count = counter.most_common(1)[0][1]
# tie breaking similarly to scipy.stats.mode
most_frequent_value = min(
value
for value, count in counter.items()
if count == most_frequent_count
)
else:
mode = _mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# tie breaking similarly to scipy.stats.mode
return min(most_frequent_value, extra_value)
|
Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array.
|
_most_frequent
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _transform_indicator(self, X):
"""Compute the indicator mask.'
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, "indicator_"):
raise ValueError(
"Make sure to call _fit_indicator before _transform_indicator"
)
return self.indicator_.transform(X)
|
Compute the indicator mask.'
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
|
_transform_indicator
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data."""
if not self.add_indicator:
return X_imputed
if sp.issparse(X_imputed):
# sp.hstack may result in different formats between sparse arrays and
# matrices; specify the format to keep consistent behavior
hstack = partial(sp.hstack, format=X_imputed.format)
else:
hstack = np.hstack
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return hstack((X_imputed, X_indicator))
|
Concatenate indicator mask with the imputed data.
|
_concatenate_indicator
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
X = self._validate_input(X, in_fit=True)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
if sp.issparse(X):
self.statistics_ = self._sparse_fit(
X, self.strategy, self.missing_values, fill_value
)
else:
self.statistics_ = self._dense_fit(
X, self.strategy, self.missing_values, fill_value
)
return self
|
Fit the imputer on `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
missing_mask = _get_mask(X, missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# TODO(1.8): Remove FutureWarning and add `np.nan` as a statistic
# for empty features to drop them later.
if not self.keep_empty_features and any(
[all(missing_mask[:, i].data) for i in range(missing_mask.shape[1])]
):
warnings.warn(
"Currently, when `keep_empty_feature=False` and "
'`strategy="constant"`, empty features are not dropped. '
"This behaviour will change in version 1.8. Set "
"`keep_empty_feature=True` to preserve this behaviour.",
FutureWarning,
)
# for constant strategy, self.statistics_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i] : X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i] : X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if len(column) == 0 and self.keep_empty_features:
# in case we want to keep columns with only missing values.
statistics[i] = 0
else:
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column, n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column, 0, n_zeros)
elif isinstance(strategy, Callable):
statistics[i] = self.strategy(column)
super()._fit_indicator(missing_mask)
return statistics
|
Fit the transformer on sparse data.
|
_sparse_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
missing_mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = 0 if self.keep_empty_features else np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = (
0 if self.keep_empty_features else np.nan
)
return median
# Most frequent
elif strategy == "most_frequent":
# Avoid use of scipy.stats.mstats.mode due to the required
# additional overhead and slow benchmarking performance.
# See Issue 14325 and PR 14399 for full discussion.
# To be able access the elements by columns
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
if len(row) == 0 and self.keep_empty_features:
most_frequent[i] = 0
else:
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# TODO(1.8): Remove FutureWarning and add `np.nan` as a statistic
# for empty features to drop them later.
if not self.keep_empty_features and ma.getmask(masked_X).all(axis=0).any():
warnings.warn(
"Currently, when `keep_empty_feature=False` and "
'`strategy="constant"`, empty features are not dropped. '
"This behaviour will change in version 1.8. Set "
"`keep_empty_feature=True` to preserve this behaviour.",
FutureWarning,
)
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
# Custom
elif isinstance(strategy, Callable):
statistics = np.empty(masked_X.shape[1])
for i in range(masked_X.shape[1]):
statistics[i] = self.strategy(masked_X[:, i].compressed())
return statistics
|
Fit the transformer on dense data.
|
_dense_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def transform(self, X):
"""Impute all missing values in `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape \
(n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
X = self._validate_input(X, in_fit=False)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError(
"X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0])
)
# compute mask before eliminating invalid features
missing_mask = _get_mask(X, self.missing_values)
# Decide whether to keep missing features
if self.strategy == "constant" or self.keep_empty_features:
valid_statistics = statistics
valid_statistics_indexes = None
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
invalid_features = np.arange(X.shape[1])[invalid_mask]
# use feature names warning if features are provided
if hasattr(self, "feature_names_in_"):
invalid_features = self.feature_names_in_[invalid_features]
warnings.warn(
"Skipping features without any observed values:"
f" {invalid_features}. At least one non-missing value is needed"
f" for imputation with strategy='{self.strategy}'."
)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError(
"Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead."
)
else:
# if no invalid statistics are found, use the mask computed
# before, else recompute mask
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(
np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)
)[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)
else:
# use mask computed before eliminating invalid mask
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
|
Impute all missing values in `X`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape (n_samples, n_features_out)
`X` with imputed values.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Convert the data back to the original representation.
Inverts the `transform` operation performed on an array.
This operation can only be performed after :class:`SimpleImputer` is
instantiated with `add_indicator=True`.
Note that `inverse_transform` can only invert the transform in
features that have binary indicators for missing values. If a feature
has no missing values at `fit` time, the feature won't have a binary
indicator, and the imputation done at `transform` time won't be
inverted.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape \
(n_samples, n_features + n_features_missing_indicator)
The imputed data to be reverted to original data. It has to be
an augmented array of imputed data and the missing indicator mask.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
The original `X` with missing values as it was prior
to imputation.
"""
check_is_fitted(self)
if not self.add_indicator:
raise ValueError(
"'inverse_transform' works only when "
"'SimpleImputer' is instantiated with "
"'add_indicator=True'. "
f"Got 'add_indicator={self.add_indicator}' "
"instead."
)
n_features_missing = len(self.indicator_.features_)
non_empty_feature_count = X.shape[1] - n_features_missing
array_imputed = X[:, :non_empty_feature_count].copy()
missing_mask = X[:, non_empty_feature_count:].astype(bool)
n_features_original = len(self.statistics_)
shape_original = (X.shape[0], n_features_original)
X_original = np.zeros(shape_original)
X_original[:, self.indicator_.features_] = missing_mask
full_mask = X_original.astype(bool)
imputed_idx, original_idx = 0, 0
while imputed_idx < len(array_imputed.T):
if not np.all(X_original[:, original_idx]):
X_original[:, original_idx] = array_imputed.T[imputed_idx]
imputed_idx += 1
original_idx += 1
else:
original_idx += 1
X_original[full_mask] = self.missing_values
return X_original
|
Convert the data back to the original representation.
Inverts the `transform` operation performed on an array.
This operation can only be performed after :class:`SimpleImputer` is
instantiated with `add_indicator=True`.
Note that `inverse_transform` can only invert the transform in
features that have binary indicators for missing values. If a feature
has no missing values at `fit` time, the feature won't have a binary
indicator, and the imputation done at `transform` time won't be
inverted.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape (n_samples, n_features + n_features_missing_indicator)
The imputed data to be reverted to original data. It has to be
an augmented array of imputed data and the missing indicator mask.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
The original `X` with missing values as it was prior
to imputation.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(self, input_features)
non_missing_mask = np.logical_not(_get_mask(self.statistics_, np.nan))
names = input_features[non_missing_mask]
return self._concatenate_indicator_feature_names_out(names, input_features)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input data with missing values. Note that `X` has been
checked in :meth:`fit` and :meth:`transform` before to call this
function.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape \
(n_samples, n_features)
The imputer mask of the original data.
features_with_missing : ndarray of shape (n_features_with_missing)
The features containing missing values.
"""
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if sp.issparse(X):
imputer_mask.eliminate_zeros()
if self.features == "missing-only":
# count number of True values in each row.
n_missing = imputer_mask.sum(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == "csr":
imputer_mask = imputer_mask.tocsc()
else:
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if self.features == "missing-only":
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sp.csc_matrix(imputer_mask)
if self.features == "all":
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
return imputer_mask, features_indices
|
Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input data with missing values. Note that `X` has been
checked in :meth:`fit` and :meth:`transform` before to call this
function.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
The imputer mask of the original data.
features_with_missing : ndarray of shape (n_features_with_missing)
The features containing missing values.
|
_get_missing_features_info
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, \
n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
self._precomputed = True
else:
self._precomputed = False
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
else:
# only create `n_features_in_` in the precomputed case
_check_n_features(self, X, reset=True)
self._n_features = X.shape[1]
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
return missing_features_info[0]
|
Fit the transformer on `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
If `precomputed=True`, then `X` is a mask of the input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray, sparse matrix} of shape (n_samples, n_features)
The imputer mask of the original data.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def transform(self, X):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
check_is_fitted(self)
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=False)
else:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if self.error_on_new and features_diff_fit_trans.size > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
|
Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
imputer_mask = self._fit(X, y)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
|
Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(self, input_features)
prefix = self.__class__.__name__.lower()
return np.asarray(
[
f"{prefix}_{feature_name}"
for feature_name in input_features[self.features_]
],
dtype=object,
)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_base.py
|
BSD-3-Clause
|
def _assign_where(X1, X2, cond):
"""Assign X2 to X1 where cond is True.
Parameters
----------
X1 : ndarray or dataframe of shape (n_samples, n_features)
Data.
X2 : ndarray of shape (n_samples, n_features)
Data to be assigned.
cond : ndarray of shape (n_samples, n_features)
Boolean mask to assign data.
"""
if hasattr(X1, "mask"): # pandas dataframes
X1.mask(cond=cond, other=X2, inplace=True)
else: # ndarrays
X1[cond] = X2[cond]
|
Assign X2 to X1 where cond is True.
Parameters
----------
X1 : ndarray or dataframe of shape (n_samples, n_features)
Data.
X2 : ndarray of shape (n_samples, n_features)
Data to be assigned.
cond : ndarray of shape (n_samples, n_features)
Boolean mask to assign data.
|
_assign_where
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def _impute_one_feature(
self,
X_filled,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True,
params=None,
):
"""Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The `estimator` must
support `return_std=True` in its `predict` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing `feat_idx`.
estimator : object
The estimator to use at this step of the round-robin imputation.
If `sample_posterior=True`, the estimator must support
`return_std` in its `predict` method.
If None, it will be cloned from self._estimator.
fit_mode : boolean, default=True
Whether to fit and predict with the estimator or just predict.
params : dict
Additional params routed to the individual estimator.
Returns
-------
X_filled : ndarray
Input data with `X_filled[missing_row_mask, feat_idx]` updated.
estimator : estimator with sklearn API
The fitted estimator used to impute
`X_filled[missing_row_mask, feat_idx]`.
"""
if estimator is None and fit_mode is False:
raise ValueError(
"If fit_mode is False, then an already-fitted "
"estimator should be passed in."
)
if estimator is None:
estimator = clone(self._estimator)
missing_row_mask = mask_missing_values[:, feat_idx]
if fit_mode:
X_train = _safe_indexing(
_safe_indexing(X_filled, neighbor_feat_idx, axis=1),
~missing_row_mask,
axis=0,
)
y_train = _safe_indexing(
_safe_indexing(X_filled, feat_idx, axis=1),
~missing_row_mask,
axis=0,
)
estimator.fit(X_train, y_train, **params)
# if no missing values, don't predict
if np.sum(missing_row_mask) == 0:
return X_filled, estimator
# get posterior samples if there is at least one missing value
X_test = _safe_indexing(
_safe_indexing(X_filled, neighbor_feat_idx, axis=1),
missing_row_mask,
axis=0,
)
if self.sample_posterior:
mus, sigmas = estimator.predict(X_test, return_std=True)
imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype)
# two types of problems: (1) non-positive sigmas
# (2) mus outside legal range of min_value and max_value
# (results in inf sample)
positive_sigmas = sigmas > 0
imputed_values[~positive_sigmas] = mus[~positive_sigmas]
mus_too_low = mus < self._min_value[feat_idx]
imputed_values[mus_too_low] = self._min_value[feat_idx]
mus_too_high = mus > self._max_value[feat_idx]
imputed_values[mus_too_high] = self._max_value[feat_idx]
# the rest can be sampled without statistical issues
inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high
mus = mus[inrange_mask]
sigmas = sigmas[inrange_mask]
a = (self._min_value[feat_idx] - mus) / sigmas
b = (self._max_value[feat_idx] - mus) / sigmas
truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas)
imputed_values[inrange_mask] = truncated_normal.rvs(
random_state=self.random_state_
)
else:
imputed_values = estimator.predict(X_test)
imputed_values = np.clip(
imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]
)
# update the feature
_safe_assign(
X_filled,
imputed_values,
row_indexer=missing_row_mask,
column_indexer=feat_idx,
)
return X_filled, estimator
|
Impute a single feature from the others provided.
This function predicts the missing values of one of the features using
the current estimates of all the other features. The `estimator` must
support `return_std=True` in its `predict` method for this function
to work.
Parameters
----------
X_filled : ndarray
Input data with the most recent imputations.
mask_missing_values : ndarray
Input data's missing indicator matrix.
feat_idx : int
Index of the feature currently being imputed.
neighbor_feat_idx : ndarray
Indices of the features to be used in imputing `feat_idx`.
estimator : object
The estimator to use at this step of the round-robin imputation.
If `sample_posterior=True`, the estimator must support
`return_std` in its `predict` method.
If None, it will be cloned from self._estimator.
fit_mode : boolean, default=True
Whether to fit and predict with the estimator or just predict.
params : dict
Additional params routed to the individual estimator.
Returns
-------
X_filled : ndarray
Input data with `X_filled[missing_row_mask, feat_idx]` updated.
estimator : estimator with sklearn API
The fitted estimator used to impute
`X_filled[missing_row_mask, feat_idx]`.
|
_impute_one_feature
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat):
"""Get a list of other features to predict `feat_idx`.
If `self.n_nearest_features` is less than or equal to the total
number of features, then use a probability proportional to the absolute
correlation between `feat_idx` and each other feature to randomly
choose a subsample of the other features (without replacement).
Parameters
----------
n_features : int
Number of features in `X`.
feat_idx : int
Index of the feature currently being imputed.
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of `X`. The diagonal has been zeroed
out and each feature has been normalized to sum to 1. Can be None.
Returns
-------
neighbor_feat_idx : array-like
The features to use to impute `feat_idx`.
"""
if self.n_nearest_features is not None and self.n_nearest_features < n_features:
p = abs_corr_mat[:, feat_idx]
neighbor_feat_idx = self.random_state_.choice(
np.arange(n_features), self.n_nearest_features, replace=False, p=p
)
else:
inds_left = np.arange(feat_idx)
inds_right = np.arange(feat_idx + 1, n_features)
neighbor_feat_idx = np.concatenate((inds_left, inds_right))
return neighbor_feat_idx
|
Get a list of other features to predict `feat_idx`.
If `self.n_nearest_features` is less than or equal to the total
number of features, then use a probability proportional to the absolute
correlation between `feat_idx` and each other feature to randomly
choose a subsample of the other features (without replacement).
Parameters
----------
n_features : int
Number of features in `X`.
feat_idx : int
Index of the feature currently being imputed.
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of `X`. The diagonal has been zeroed
out and each feature has been normalized to sum to 1. Can be None.
Returns
-------
neighbor_feat_idx : array-like
The features to use to impute `feat_idx`.
|
_get_neighbor_feat_idx
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def _get_ordered_idx(self, mask_missing_values):
"""Decide in what order we will update the features.
As a homage to the MICE R package, we will have 4 main options of
how to order the updates, and use a random order if anything else
is specified.
Also, this function skips features which have no missing values.
Parameters
----------
mask_missing_values : array-like, shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features.
Returns
-------
ordered_idx : ndarray, shape (n_features,)
The order in which to impute the features.
"""
frac_of_missing_values = mask_missing_values.mean(axis=0)
if self.skip_complete:
missing_values_idx = np.flatnonzero(frac_of_missing_values)
else:
missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0])
if self.imputation_order == "roman":
ordered_idx = missing_values_idx
elif self.imputation_order == "arabic":
ordered_idx = missing_values_idx[::-1]
elif self.imputation_order == "ascending":
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:]
elif self.imputation_order == "descending":
n = len(frac_of_missing_values) - len(missing_values_idx)
ordered_idx = np.argsort(frac_of_missing_values, kind="mergesort")[n:][::-1]
elif self.imputation_order == "random":
ordered_idx = missing_values_idx
self.random_state_.shuffle(ordered_idx)
return ordered_idx
|
Decide in what order we will update the features.
As a homage to the MICE R package, we will have 4 main options of
how to order the updates, and use a random order if anything else
is specified.
Also, this function skips features which have no missing values.
Parameters
----------
mask_missing_values : array-like, shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features.
Returns
-------
ordered_idx : ndarray, shape (n_features,)
The order in which to impute the features.
|
_get_ordered_idx
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def _get_abs_corr_mat(self, X_filled, tolerance=1e-6):
"""Get absolute correlation matrix between features.
Parameters
----------
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
tolerance : float, default=1e-6
`abs_corr_mat` can have nans, which will be replaced
with `tolerance`.
Returns
-------
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of `X` at the beginning of the
current round. The diagonal has been zeroed out and each feature's
absolute correlations with all others have been normalized to sum
to 1.
"""
n_features = X_filled.shape[1]
if self.n_nearest_features is None or self.n_nearest_features >= n_features:
return None
with np.errstate(invalid="ignore"):
# if a feature in the neighborhood has only a single value
# (e.g., categorical feature), the std. dev. will be null and
# np.corrcoef will raise a warning due to a division by zero
abs_corr_mat = np.abs(np.corrcoef(X_filled.T))
# np.corrcoef is not defined for features with zero std
abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance
# ensures exploration, i.e. at least some probability of sampling
np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat)
# features are not their own neighbors
np.fill_diagonal(abs_corr_mat, 0)
# needs to sum to 1 for np.random.choice sampling
abs_corr_mat = normalize(abs_corr_mat, norm="l1", axis=0, copy=False)
return abs_corr_mat
|
Get absolute correlation matrix between features.
Parameters
----------
X_filled : ndarray, shape (n_samples, n_features)
Input data with the most recent imputations.
tolerance : float, default=1e-6
`abs_corr_mat` can have nans, which will be replaced
with `tolerance`.
Returns
-------
abs_corr_mat : ndarray, shape (n_features, n_features)
Absolute correlation matrix of `X` at the beginning of the
current round. The diagonal has been zeroed out and each feature's
absolute correlations with all others have been normalized to sum
to 1.
|
_get_abs_corr_mat
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def _initial_imputation(self, X, in_fit=False):
"""Perform initial imputation for input `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
in_fit : bool, default=False
Whether function is called in :meth:`fit`.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
X_filled : ndarray of shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray of shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features,
masked by non-missing features.
X_missing_mask : ndarray, shape (n_samples, n_features)
Input data's mask matrix indicating missing datapoints, where
`n_samples` is the number of samples and `n_features` is the
number of features.
"""
if is_scalar_nan(self.missing_values):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=FLOAT_DTYPES,
order="F",
reset=in_fit,
ensure_all_finite=ensure_all_finite,
)
_check_inputs_dtype(X, self.missing_values)
X_missing_mask = _get_mask(X, self.missing_values)
mask_missing_values = X_missing_mask.copy()
# TODO (1.8): remove this once the deprecation is removed. In the meantime,
# we need to catch the warning to avoid false positives.
catch_warning = (
self.initial_strategy == "constant" and not self.keep_empty_features
)
if self.initial_imputer_ is None:
self.initial_imputer_ = SimpleImputer(
missing_values=self.missing_values,
strategy=self.initial_strategy,
fill_value=self.fill_value,
keep_empty_features=self.keep_empty_features,
).set_output(transform="default")
# TODO (1.8): remove this once the deprecation is removed to keep only
# the code in the else case.
if catch_warning:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
X_filled = self.initial_imputer_.fit_transform(X)
else:
X_filled = self.initial_imputer_.fit_transform(X)
else:
# TODO (1.8): remove this once the deprecation is removed to keep only
# the code in the else case.
if catch_warning:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
X_filled = self.initial_imputer_.transform(X)
else:
X_filled = self.initial_imputer_.transform(X)
if in_fit:
self._is_empty_feature = np.all(mask_missing_values, axis=0)
if not self.keep_empty_features:
# drop empty features
Xt = X[:, ~self._is_empty_feature]
mask_missing_values = mask_missing_values[:, ~self._is_empty_feature]
if self.initial_imputer_.get_params()["strategy"] == "constant":
# The constant strategy has a specific behavior and preserve empty
# features even with ``keep_empty_features=False``. We need to drop
# the column for consistency.
# TODO (1.8): remove this `if` branch once the following issue is
# addressed:
# https://github.com/scikit-learn/scikit-learn/issues/29827
X_filled = X_filled[:, ~self._is_empty_feature]
else:
# mark empty features as not missing and keep the original
# imputation
mask_missing_values[:, self._is_empty_feature] = False
Xt = X
Xt[:, self._is_empty_feature] = X_filled[:, self._is_empty_feature]
return Xt, X_filled, mask_missing_values, X_missing_mask
|
Perform initial imputation for input `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
in_fit : bool, default=False
Whether function is called in :meth:`fit`.
Returns
-------
Xt : ndarray of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
X_filled : ndarray of shape (n_samples, n_features)
Input data with the most recent imputations.
mask_missing_values : ndarray of shape (n_samples, n_features)
Input data's missing indicator matrix, where `n_samples` is the
number of samples and `n_features` is the number of features,
masked by non-missing features.
X_missing_mask : ndarray, shape (n_samples, n_features)
Input data's mask matrix indicating missing datapoints, where
`n_samples` is the number of samples and `n_features` is the
number of features.
|
_initial_imputation
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, **params):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
_raise_for_params(params, self, "fit")
routed_params = process_routing(
self,
"fit",
**params,
)
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=True
)
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
# Edge case: a single feature, we return the initial imputation.
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
self._min_value = self._validate_limit(
self.min_value,
"min",
X.shape[1],
self._is_empty_feature,
self.keep_empty_features,
)
self._max_value = self._validate_limit(
self.max_value,
"max",
X.shape[1],
self._is_empty_feature,
self.keep_empty_features,
)
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError("One (or more) features have min_value >= max_value.")
# order in which to impute
# note this is probably too slow for large feature data (d > 100000)
# and a better way would be good.
# see: https://goo.gl/KyCNwj and subsequent comments
ordered_idx = self._get_ordered_idx(mask_missing_values)
self.n_features_with_missing_ = len(ordered_idx)
abs_corr_mat = self._get_abs_corr_mat(Xt)
n_samples, n_features = Xt.shape
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == "random":
ordered_idx = self._get_ordered_idx(mask_missing_values)
for feat_idx in ordered_idx:
neighbor_feat_idx = self._get_neighbor_feat_idx(
n_features, feat_idx, abs_corr_mat
)
Xt, estimator = self._impute_one_feature(
Xt,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True,
params=routed_params.estimator.fit,
)
estimator_triplet = _ImputerTriplet(
feat_idx, neighbor_feat_idx, estimator
)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (self.n_iter_, self.max_iter, time() - start_t)
)
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
if self.verbose > 0:
print(
"[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
inf_norm, normalized_tol
)
)
if inf_norm < normalized_tol:
if self.verbose > 0:
print("[IterativeImputer] Early stopping criterion reached.")
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn(
"[IterativeImputer] Early stopping criterion not reached.",
ConvergenceWarning,
)
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
|
Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def transform(self, X):
"""Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self)
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=False
)
X_indicator = super()._transform_indicator(complete_mask)
if self.n_iter_ == 0 or np.all(mask_missing_values):
return super()._concatenate_indicator(Xt, X_indicator)
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
i_rnd = 0
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
for it, estimator_triplet in enumerate(self.imputation_sequence_):
Xt, _ = self._impute_one_feature(
Xt,
mask_missing_values,
estimator_triplet.feat_idx,
estimator_triplet.neighbor_feat_idx,
estimator=estimator_triplet.estimator,
fit_mode=False,
)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (i_rnd + 1, self.n_iter_, time() - start_t)
)
i_rnd += 1
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
|
Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(self, input_features)
names = self.initial_imputer_.get_feature_names_out(input_features)
return self._concatenate_indicator_feature_names_out(names, input_features)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_iterative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_iterative.py
|
BSD-3-Clause
|
def _calc_impute(self, dist_pot_donors, n_neighbors, fit_X_col, mask_fit_X_col):
"""Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
"""
# Get donors
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1, axis=1)[
:, :n_neighbors
]
# Get weight matrix from distance matrix
donors_dist = dist_pot_donors[
np.arange(donors_idx.shape[0])[:, None], donors_idx
]
weight_matrix = _get_weights(donors_dist, self.weights)
# fill nans with zeros
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
else:
weight_matrix = np.ones_like(donors_dist)
weight_matrix[np.isnan(donors_dist)] = 0.0
# Retrieve donor values and calculate kNN average
donors = fit_X_col.take(donors_idx)
donors_mask = mask_fit_X_col.take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
return np.ma.average(donors, axis=1, weights=weight_matrix).data
|
Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
|
_calc_impute
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_knn.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_knn.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
super()._fit_indicator(self._mask_fit_X)
return self
|
Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_knn.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_knn.py
|
BSD-3-Clause
|
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
force_writeable=True,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
reset=False,
)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = self._valid_mask
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask[:, valid_mask]):
# No missing values in X
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
# Even if there are no missing values in X, we still concatenate Xc
# with the missing value indicator matrix, X_indicator.
# This is to ensure that the output maintains consistency in terms
# of columns, regardless of whether missing values exist in X or not.
return super()._concatenate_indicator(Xc, X_indicator)
row_missing_idx = np.flatnonzero(mask[:, valid_mask].any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(
self._fit_X[:, col], mask=mask_fit_X[:, col]
).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col],
)
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
ensure_all_finite=ensure_all_finite,
reduce_func=process_chunk,
)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
return super()._concatenate_indicator(Xc, X_indicator)
|
Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_knn.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_knn.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(self, input_features)
names = input_features[self._valid_mask]
return self._concatenate_indicator_feature_names_out(names, input_features)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/_knn.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/_knn.py
|
BSD-3-Clause
|
def test_assign_where(X1_type):
"""Check the behaviour of the private helpers `_assign_where`."""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X1 = _convert_container(rng.randn(n_samples, n_features), constructor_name=X1_type)
X2 = rng.randn(n_samples, n_features)
mask = rng.randint(0, 2, size=(n_samples, n_features)).astype(bool)
_assign_where(X1, X2, mask)
if X1_type == "dataframe":
X1 = X1.to_numpy()
assert_allclose(X1[mask], X2[mask])
|
Check the behaviour of the private helpers `_assign_where`.
|
test_assign_where
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_base.py
|
BSD-3-Clause
|
def test_imputers_feature_names_out_pandas(imputer, add_indicator):
"""Check feature names out for imputers."""
pd = pytest.importorskip("pandas")
marker = np.nan
imputer = imputer.set_params(add_indicator=add_indicator, missing_values=marker)
X = np.array(
[
[marker, 1, 5, 3, marker, 1],
[2, marker, 1, 4, marker, 2],
[6, 3, 7, marker, marker, 3],
[1, 2, 9, 8, marker, 4],
]
)
X_df = pd.DataFrame(X, columns=["a", "b", "c", "d", "e", "f"])
imputer.fit(X_df)
names = imputer.get_feature_names_out()
if add_indicator:
expected_names = [
"a",
"b",
"c",
"d",
"f",
"missingindicator_a",
"missingindicator_b",
"missingindicator_d",
"missingindicator_e",
]
assert_array_equal(expected_names, names)
else:
expected_names = ["a", "b", "c", "d", "f"]
assert_array_equal(expected_names, names)
|
Check feature names out for imputers.
|
test_imputers_feature_names_out_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_common.py
|
BSD-3-Clause
|
def test_keep_empty_features(imputer, keep_empty_features):
"""Check that the imputer keeps features with only missing values."""
X = np.array([[np.nan, 1], [np.nan, 2], [np.nan, 3]])
imputer = imputer.set_params(
add_indicator=False, keep_empty_features=keep_empty_features
)
for method in ["fit_transform", "transform"]:
X_imputed = getattr(imputer, method)(X)
if keep_empty_features:
assert X_imputed.shape == X.shape
else:
assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
|
Check that the imputer keeps features with only missing values.
|
test_keep_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_common.py
|
BSD-3-Clause
|
def test_imputation_adds_missing_indicator_if_add_indicator_is_true(
imputer, missing_value_test
):
"""Check that missing indicator always exists when add_indicator=True.
Non-regression test for gh-26590.
"""
X_train = np.array([[0, np.nan], [1, 2]])
# Test data where missing_value_test variable can be set to np.nan or 1.
X_test = np.array([[0, missing_value_test], [1, 2]])
imputer.set_params(add_indicator=True)
imputer.fit(X_train)
X_test_imputed_with_indicator = imputer.transform(X_test)
assert X_test_imputed_with_indicator.shape == (2, 3)
imputer.set_params(add_indicator=False)
imputer.fit(X_train)
X_test_imputed_without_indicator = imputer.transform(X_test)
assert X_test_imputed_without_indicator.shape == (2, 2)
assert_allclose(
X_test_imputed_with_indicator[:, :-1], X_test_imputed_without_indicator
)
if np.isnan(missing_value_test):
expected_missing_indicator = [1, 0]
else:
expected_missing_indicator = [0, 0]
assert_allclose(X_test_imputed_with_indicator[:, -1], expected_missing_indicator)
|
Check that missing indicator always exists when add_indicator=True.
Non-regression test for gh-26590.
|
test_imputation_adds_missing_indicator_if_add_indicator_is_true
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_common.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_common.py
|
BSD-3-Clause
|
def _check_statistics(
X, X_true, strategy, statistics, missing_values, sparse_container
):
"""Utility function for testing imputation for a given strategy.
Test with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, sparse = {0}" % (
strategy,
missing_values,
)
assert_ae = assert_array_equal
if X.dtype.kind == "f" or X_true.dtype.kind == "f":
assert_ae = assert_array_almost_equal
# Normal matrix
imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
X_trans = imputer.fit(X).transform(X.copy())
assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(False))
assert_ae(X_trans, X_true, err_msg=err_msg.format(False))
# Sparse matrix
imputer = SimpleImputer(missing_values=missing_values, strategy=strategy)
imputer.fit(sparse_container(X))
X_trans = imputer.transform(sparse_container(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_ae(imputer.statistics_, statistics, err_msg=err_msg.format(True))
assert_ae(X_trans, X_true, err_msg=err_msg.format(True))
|
Utility function for testing imputation for a given strategy.
Test with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly
|
_check_statistics
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_iterative_imputer_keep_empty_features(initial_strategy):
"""Check the behaviour of the iterative imputer with different initial strategy
and keeping empty features (i.e. features containing only missing values).
"""
X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
imputer = IterativeImputer(
initial_strategy=initial_strategy, keep_empty_features=True
)
X_imputed = imputer.fit_transform(X)
assert_allclose(X_imputed[:, 1], 0)
X_imputed = imputer.transform(X)
assert_allclose(X_imputed[:, 1], 0)
|
Check the behaviour of the iterative imputer with different initial strategy
and keeping empty features (i.e. features containing only missing values).
|
test_iterative_imputer_keep_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_iterative_imputer_constant_fill_value():
"""Check that we propagate properly the parameter `fill_value`."""
X = np.array([[-1, 2, 3, -1], [4, -1, 5, -1], [6, 7, -1, -1], [8, 9, 0, -1]])
fill_value = 100
imputer = IterativeImputer(
missing_values=-1,
initial_strategy="constant",
fill_value=fill_value,
max_iter=0,
keep_empty_features=True,
)
imputer.fit_transform(X)
assert_array_equal(imputer.initial_imputer_.statistics_, fill_value)
|
Check that we propagate properly the parameter `fill_value`.
|
test_iterative_imputer_constant_fill_value
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_iterative_imputer_min_max_value_remove_empty():
"""Check that we properly apply the empty feature mask to `min_value` and
`max_value`.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/29355
"""
# Intentionally make column 2 as a missing column, then the bound of the imputed
# value of column 3 should be (4, 5)
X = np.array(
[
[1, 2, np.nan, np.nan],
[4, 5, np.nan, 6],
[7, 8, np.nan, np.nan],
[10, 11, np.nan, 12],
]
)
min_value = [-np.inf, -np.inf, -np.inf, 4]
max_value = [np.inf, np.inf, np.inf, 5]
X_imputed = IterativeImputer(
min_value=min_value,
max_value=max_value,
keep_empty_features=False,
).fit_transform(X)
X_without_missing_column = np.delete(X, 2, axis=1)
assert X_imputed.shape == X_without_missing_column.shape
assert np.min(X_imputed[np.isnan(X_without_missing_column)]) == pytest.approx(4)
assert np.max(X_imputed[np.isnan(X_without_missing_column)]) == pytest.approx(5)
# Intentionally make column 3 as a missing column, then the bound of the imputed
# value of column 2 should be (3.5, 6)
X = np.array(
[
[1, 2, np.nan, np.nan],
[4, 5, 6, np.nan],
[7, 8, np.nan, np.nan],
[10, 11, 12, np.nan],
]
)
min_value = [-np.inf, -np.inf, 3.5, -np.inf]
max_value = [np.inf, np.inf, 6, np.inf]
X_imputed = IterativeImputer(
min_value=min_value,
max_value=max_value,
keep_empty_features=False,
).fit_transform(X)
X_without_missing_column = X[:, :3]
assert X_imputed.shape == X_without_missing_column.shape
assert np.min(X_imputed[np.isnan(X_without_missing_column)]) == pytest.approx(3.5)
assert np.max(X_imputed[np.isnan(X_without_missing_column)]) == pytest.approx(6)
|
Check that we properly apply the empty feature mask to `min_value` and
`max_value`.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/29355
|
test_iterative_imputer_min_max_value_remove_empty
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_knn_imputer_keep_empty_features(keep_empty_features):
"""Check the behaviour of `keep_empty_features` for `KNNImputer`."""
X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])
imputer = KNNImputer(keep_empty_features=keep_empty_features)
for method in ["fit_transform", "transform"]:
X_imputed = getattr(imputer, method)(X)
if keep_empty_features:
assert X_imputed.shape == X.shape
assert_array_equal(X_imputed[:, 1], 0)
else:
assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
|
Check the behaviour of `keep_empty_features` for `KNNImputer`.
|
test_knn_imputer_keep_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_missing_indicator_feature_names_out():
"""Check that missing indicator return the feature names with a prefix."""
pd = pytest.importorskip("pandas")
missing_values = np.nan
X = pd.DataFrame(
[
[missing_values, missing_values, 1, missing_values],
[4, missing_values, 2, 10],
],
columns=["a", "b", "c", "d"],
)
indicator = MissingIndicator(missing_values=missing_values).fit(X)
feature_names = indicator.get_feature_names_out()
expected_names = ["missingindicator_a", "missingindicator_b", "missingindicator_d"]
assert_array_equal(expected_names, feature_names)
|
Check that missing indicator return the feature names with a prefix.
|
test_missing_indicator_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_imputer_lists_fit_transform():
"""Check transform uses object dtype when fitted on an object dtype.
Non-regression test for #19572.
"""
X = [["a", "b"], ["c", "b"], ["a", "a"]]
imp_frequent = SimpleImputer(strategy="most_frequent").fit(X)
X_trans = imp_frequent.transform([[np.nan, np.nan]])
assert X_trans.dtype == object
assert_array_equal(X_trans, [["a", "b"]])
|
Check transform uses object dtype when fitted on an object dtype.
Non-regression test for #19572.
|
test_imputer_lists_fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_imputer_transform_preserves_numeric_dtype(dtype_test):
"""Check transform preserves numeric dtype independent of fit dtype."""
X = np.asarray(
[[1.2, 3.4, np.nan], [np.nan, 1.2, 1.3], [4.2, 2, 1]], dtype=np.float64
)
imp = SimpleImputer().fit(X)
X_test = np.asarray([[np.nan, np.nan, np.nan]], dtype=dtype_test)
X_trans = imp.transform(X_test)
assert X_trans.dtype == dtype_test
|
Check transform preserves numeric dtype independent of fit dtype.
|
test_imputer_transform_preserves_numeric_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features):
"""Check the behaviour of `keep_empty_features` with `strategy='constant'.
For backward compatibility, a column full of missing values will always be
fill and never dropped.
"""
X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
X = _convert_container(X, array_type)
fill_value = 10
imputer = SimpleImputer(
strategy="constant",
fill_value=fill_value,
keep_empty_features=keep_empty_features,
)
for method in ["fit_transform", "transform"]:
# TODO(1.8): Remove the condition and still call getattr(imputer, method)(X)
if method.startswith("fit") and not keep_empty_features:
warn_msg = '`strategy="constant"`, empty features are not dropped. '
with pytest.warns(FutureWarning, match=warn_msg):
X_imputed = getattr(imputer, method)(X)
else:
X_imputed = getattr(imputer, method)(X)
assert X_imputed.shape == X.shape
constant_feature = (
X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0]
)
assert_array_equal(constant_feature, fill_value)
|
Check the behaviour of `keep_empty_features` with `strategy='constant'.
For backward compatibility, a column full of missing values will always be
fill and never dropped.
|
test_simple_imputer_constant_keep_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_simple_imputer_keep_empty_features(strategy, array_type, keep_empty_features):
"""Check the behaviour of `keep_empty_features` with all strategies but
'constant'.
"""
X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])
X = _convert_container(X, array_type)
imputer = SimpleImputer(strategy=strategy, keep_empty_features=keep_empty_features)
for method in ["fit_transform", "transform"]:
X_imputed = getattr(imputer, method)(X)
if keep_empty_features:
assert X_imputed.shape == X.shape
constant_feature = (
X_imputed[:, 0].toarray() if array_type == "sparse" else X_imputed[:, 0]
)
assert_array_equal(constant_feature, 0)
else:
assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
|
Check the behaviour of `keep_empty_features` with all strategies but
'constant'.
|
test_simple_imputer_keep_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_simple_imputer_constant_fill_value_casting():
"""Check that we raise a proper error message when we cannot cast the fill value
to the input data type. Otherwise, check that the casting is done properly.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28309
"""
# cannot cast fill_value at fit
fill_value = 1.5
X_int64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.int64)
imputer = SimpleImputer(
strategy="constant", fill_value=fill_value, missing_values=2
)
err_msg = f"fill_value={fill_value!r} (of type {type(fill_value)!r}) cannot be cast"
with pytest.raises(ValueError, match=re.escape(err_msg)):
imputer.fit(X_int64)
# cannot cast fill_value at transform
X_float64 = np.array([[1, 2, 3], [2, 3, 4]], dtype=np.float64)
imputer.fit(X_float64)
err_msg = (
f"The dtype of the filling value (i.e. {imputer.statistics_.dtype!r}) "
"cannot be cast"
)
with pytest.raises(ValueError, match=re.escape(err_msg)):
imputer.transform(X_int64)
# check that no error is raised when having the same kind of dtype
fill_value_list = [np.float64(1.5), 1.5, 1]
X_float32 = X_float64.astype(np.float32)
for fill_value in fill_value_list:
imputer = SimpleImputer(
strategy="constant", fill_value=fill_value, missing_values=2
)
X_trans = imputer.fit_transform(X_float32)
assert X_trans.dtype == X_float32.dtype
|
Check that we raise a proper error message when we cannot cast the fill value
to the input data type. Otherwise, check that the casting is done properly.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28309
|
test_simple_imputer_constant_fill_value_casting
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_iterative_imputer_no_empty_features(strategy):
"""Check the behaviour of `keep_empty_features` with no empty features.
With no-empty features, we should get the same imputation whatever the
parameter `keep_empty_features`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/29375
"""
X = np.array([[np.nan, 0, 1], [2, np.nan, 3], [4, 5, np.nan]])
imputer_drop_empty_features = IterativeImputer(
initial_strategy=strategy, fill_value=1, keep_empty_features=False
)
imputer_keep_empty_features = IterativeImputer(
initial_strategy=strategy, fill_value=1, keep_empty_features=True
)
assert_allclose(
imputer_drop_empty_features.fit_transform(X),
imputer_keep_empty_features.fit_transform(X),
)
|
Check the behaviour of `keep_empty_features` with no empty features.
With no-empty features, we should get the same imputation whatever the
parameter `keep_empty_features`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/29375
|
test_iterative_imputer_no_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def test_iterative_imputer_with_empty_features(strategy, X_test):
"""Check the behaviour of `keep_empty_features` in the presence of empty features.
With `keep_empty_features=True`, the empty feature will be imputed with the value
defined by the initial imputation.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/29375
"""
X_train = np.array(
[[np.nan, np.nan, 0, 1], [np.nan, 2, np.nan, 3], [np.nan, 4, 5, np.nan]]
)
imputer_drop_empty_features = IterativeImputer(
initial_strategy=strategy, fill_value=0, keep_empty_features=False
)
X_train_drop_empty_features = imputer_drop_empty_features.fit_transform(X_train)
X_test_drop_empty_features = imputer_drop_empty_features.transform(X_test)
imputer_keep_empty_features = IterativeImputer(
initial_strategy=strategy, fill_value=0, keep_empty_features=True
)
X_train_keep_empty_features = imputer_keep_empty_features.fit_transform(X_train)
X_test_keep_empty_features = imputer_keep_empty_features.transform(X_test)
assert_allclose(X_train_drop_empty_features, X_train_keep_empty_features[:, 1:])
assert_allclose(X_train_keep_empty_features[:, 0], 0)
assert X_train_drop_empty_features.shape[1] == X_test_drop_empty_features.shape[1]
assert X_train_keep_empty_features.shape[1] == X_test_keep_empty_features.shape[1]
|
Check the behaviour of `keep_empty_features` in the presence of empty features.
With `keep_empty_features=True`, the empty feature will be imputed with the value
defined by the initial imputation.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/29375
|
test_iterative_imputer_with_empty_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/impute/tests/test_impute.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/impute/tests/test_impute.py
|
BSD-3-Clause
|
def _grid_from_X(X, percentiles, is_categorical, grid_resolution, custom_values):
"""Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
custom_values: dict
Mapping from column index of X to an array-like of values where
the partial dependence should be calculated for that feature
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, the number of
unique values in ``X[:, j]``, if j is not in ``custom_range``.
If j is in ``custom_range``, then it is the length of ``custom_range[j]``.
"""
if not isinstance(percentiles, Iterable) or len(percentiles) != 2:
raise ValueError("'percentiles' must be a sequence of 2 elements.")
if not all(0 <= x <= 1 for x in percentiles):
raise ValueError("'percentiles' values must be in [0, 1].")
if percentiles[0] >= percentiles[1]:
raise ValueError("percentiles[0] must be strictly less than percentiles[1].")
if grid_resolution <= 1:
raise ValueError("'grid_resolution' must be strictly greater than 1.")
def _convert_custom_values(values):
# Convert custom types such that object types are always used for string arrays
dtype = object if any(isinstance(v, str) for v in values) else None
return np.asarray(values, dtype=dtype)
custom_values = {k: _convert_custom_values(v) for k, v in custom_values.items()}
if any(v.ndim != 1 for v in custom_values.values()):
error_string = ", ".join(
f"Feature {k}: {v.ndim} dimensions"
for k, v in custom_values.items()
if v.ndim != 1
)
raise ValueError(
"The custom grid for some features is not a one-dimensional array. "
f"{error_string}"
)
values = []
# TODO: we should handle missing values (i.e. `np.nan`) specifically and store them
# in a different Bunch attribute.
for feature, is_cat in enumerate(is_categorical):
if feature in custom_values:
# Use values in the custom range
axis = custom_values[feature]
else:
try:
uniques = np.unique(_safe_indexing(X, feature, axis=1))
except TypeError as exc:
# `np.unique` will fail in the presence of `np.nan` and `str` categories
# due to sorting. Temporary, we reraise an error explaining the problem.
raise ValueError(
f"The column #{feature} contains mixed data types. Finding unique "
"categories fail due to sorting. It usually means that the column "
"contains `np.nan` values together with `str` categories. Such use "
"case is not yet supported in scikit-learn."
) from exc
if is_cat or uniques.shape[0] < grid_resolution:
# Use the unique values either because:
# - feature has low resolution use unique values
# - feature is categorical
axis = uniques
else:
# create axis based on percentiles and grid resolution
emp_percentiles = mquantiles(
_safe_indexing(X, feature, axis=1), prob=percentiles, axis=0
)
if np.allclose(emp_percentiles[0], emp_percentiles[1]):
raise ValueError(
"percentiles are too close to each other, "
"unable to build the grid. Please choose percentiles "
"that are further apart."
)
axis = np.linspace(
emp_percentiles[0],
emp_percentiles[1],
num=grid_resolution,
endpoint=True,
)
values.append(axis)
return cartesian(values), values
|
Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
custom_values: dict
Mapping from column index of X to an array-like of values where
the partial dependence should be calculated for that feature
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, the number of
unique values in ``X[:, j]``, if j is not in ``custom_range``.
If j is in ``custom_range``, then it is the length of ``custom_range[j]``.
|
_grid_from_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_partial_dependence.py
|
BSD-3-Clause
|
def _partial_dependence_recursion(est, grid, features):
"""Calculate partial dependence via the recursion method.
The recursion method is in particular enabled for tree-based estimators.
For each `grid` value, a weighted tree traversal is performed: if a split node
involves an input feature of interest, the corresponding left or right branch
is followed; otherwise both branches are followed, each branch being weighted
by the fraction of training samples that entered that branch. Finally, the
partial dependence is given by a weighted average of all the visited leaves
values.
This method is more efficient in terms of speed than the `'brute'` method
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`).
However, here, the partial dependence computation is done explicitly with the
`X` used during training of `est`.
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict` or
:term:`decision_function`. Multioutput-multiclass classifiers are not
supported. Note that `'recursion'` is only supported for some tree-based
estimators (namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`,
).
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
"""
averaged_predictions = est._compute_partial_dependence_recursion(grid, features)
if averaged_predictions.ndim == 1:
# reshape to (1, n_points) for consistency with
# _partial_dependence_brute
averaged_predictions = averaged_predictions.reshape(1, -1)
return averaged_predictions
|
Calculate partial dependence via the recursion method.
The recursion method is in particular enabled for tree-based estimators.
For each `grid` value, a weighted tree traversal is performed: if a split node
involves an input feature of interest, the corresponding left or right branch
is followed; otherwise both branches are followed, each branch being weighted
by the fraction of training samples that entered that branch. Finally, the
partial dependence is given by a weighted average of all the visited leaves
values.
This method is more efficient in terms of speed than the `'brute'` method
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`).
However, here, the partial dependence computation is done explicitly with the
`X` used during training of `est`.
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict` or
:term:`decision_function`. Multioutput-multiclass classifiers are not
supported. Note that `'recursion'` is only supported for some tree-based
estimators (namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`,
).
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
|
_partial_dependence_recursion
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_partial_dependence.py
|
BSD-3-Clause
|
def _partial_dependence_brute(
est, grid, features, X, response_method, sample_weight=None
):
"""Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
"""
predictions = []
averaged_predictions = []
if response_method == "auto":
response_method = (
"predict" if is_regressor(est) else ["predict_proba", "decision_function"]
)
X_eval = X.copy()
for new_values in grid:
for i, variable in enumerate(features):
_safe_assign(X_eval, new_values[i], column_indexer=variable)
# Note: predictions is of shape
# (n_points,) for non-multioutput regressors
# (n_points, n_tasks) for multioutput regressors
# (n_points, 1) for the regressors in cross_decomposition (I think)
# (n_points, 1) for binary classification (positive class already selected)
# (n_points, n_classes) for multiclass classification
pred, _ = _get_response_values(est, X_eval, response_method=response_method)
predictions.append(pred)
# average over samples
averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight))
n_samples = X.shape[0]
# reshape to (n_targets, n_instances, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
predictions = np.array(predictions).T
if is_regressor(est) and predictions.ndim == 2:
# non-multioutput regression, shape is (n_instances, n_points,)
predictions = predictions.reshape(n_samples, -1)
elif is_classifier(est) and predictions.shape[0] == 2:
# Binary classification, shape is (2, n_instances, n_points).
# we output the effect of **positive** class
predictions = predictions[1]
predictions = predictions.reshape(n_samples, -1)
# reshape averaged_predictions to (n_targets, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
averaged_predictions = np.array(averaged_predictions).T
if averaged_predictions.ndim == 1:
# reshape to (1, n_points) for consistency with
# _partial_dependence_recursion
averaged_predictions = averaged_predictions.reshape(1, -1)
return averaged_predictions, predictions
|
Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
|
_partial_dependence_brute
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_partial_dependence.py
|
BSD-3-Clause
|
def partial_dependence(
estimator,
X,
features,
*,
sample_weight=None,
categorical_features=None,
feature_names=None,
response_method="auto",
percentiles=(0.05, 0.95),
grid_resolution=100,
custom_values=None,
method="auto",
kind="average",
):
"""Partial dependence of ``features``.
Partial dependence of a feature (or a set of features) corresponds to
the average response of an estimator for each possible value of the
feature.
Read more in
:ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`
and the :ref:`User Guide <partial_dependence>`.
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is 'brute'.
features : array-like of {int, str, bool} or int or str
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape \
(n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
.. versionadded:: 1.2
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the grid. Must be in [0, 1].
This parameter is overridden by `custom_values` if that parameter is set.
grid_resolution : int, default=100
The number of equally spaced points on the grid, for each target
feature.
This parameter is overridden by `custom_values` if that parameter is set.
custom_values : dict
A dictionary mapping the index of an element of `features` to an array
of values where the partial dependence should be calculated
for that feature. Setting a range of values for a feature overrides
`grid_resolution` and `percentiles`.
See :ref:`how to use partial_dependence
<plt_partial_dependence_custom_values>` for an example of how this parameter can
be used.
.. versionadded:: 1.7
method : {'auto', 'recursion', 'brute'}, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`,
) when `kind='average'`.
This is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the Individual Conditional Expectation (ICE) by
design, it is not compatible with ICE and thus `kind` must be
`'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
kind : {'average', 'individual', 'both'}, default='average'
Whether to return the partial dependence averaged across all the
samples in the dataset or one value per sample or both.
See Returns below.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
.. versionadded:: 0.24
Returns
-------
predictions : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
individual : ndarray of shape (n_outputs, n_instances, \
len(values[0]), len(values[1]), ...)
The predictions for all the points in the grid for all
samples in X. This is also known as Individual
Conditional Expectation (ICE).
Only available when `kind='individual'` or `kind='both'`.
average : ndarray of shape (n_outputs, len(values[0]), \
len(values[1]), ...)
The predictions for all the points in the grid, averaged
over all samples in X (or over the training data if
`method` is 'recursion').
Only available when `kind='average'` or `kind='both'`.
grid_values : seq of 1d ndarrays
The values with which the grid has been created. The generated
grid is a cartesian product of the arrays in `grid_values` where
`len(grid_values) == len(features)`. The size of each array
`grid_values[j]` is either `grid_resolution`, or the number of
unique values in `X[:, j]`, whichever is smaller.
.. versionadded:: 1.3
`n_outputs` corresponds to the number of classes in a multi-class
setting, or to the number of tasks for multi-output regression.
For classical regression and binary classification `n_outputs==1`.
`n_values_feature_j` corresponds to the size `grid_values[j]`.
See Also
--------
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
PartialDependenceDisplay : Partial Dependence visualization.
Examples
--------
>>> X = [[0, 0, 2], [1, 0, 0]]
>>> y = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(X, y)
>>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1),
... grid_resolution=2) # doctest: +SKIP
(array([[-4.52, 4.52]]), [array([ 0., 1.])])
"""
check_is_fitted(estimator)
if not (is_classifier(estimator) or is_regressor(estimator)):
raise ValueError("'estimator' must be a fitted regressor or classifier.")
if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray):
raise ValueError("Multiclass-multioutput estimators are not supported")
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not (hasattr(X, "__array__") or sparse.issparse(X)):
X = check_array(X, ensure_all_finite="allow-nan", dtype=object)
if is_regressor(estimator) and response_method != "auto":
raise ValueError(
"The response_method parameter is ignored for regressors and "
"must be 'auto'."
)
if kind != "average":
if method == "recursion":
raise ValueError(
"The 'recursion' method only applies when 'kind' is set to 'average'"
)
method = "brute"
if method == "recursion" and sample_weight is not None:
raise ValueError(
"The 'recursion' method can only be applied when sample_weight is None."
)
if method == "auto":
if sample_weight is not None:
method = "brute"
elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None:
method = "recursion"
elif isinstance(
estimator,
(BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor),
):
method = "recursion"
else:
method = "brute"
if method == "recursion":
if not isinstance(
estimator,
(
BaseGradientBoosting,
BaseHistGradientBoosting,
DecisionTreeRegressor,
RandomForestRegressor,
),
):
supported_classes_recursion = (
"GradientBoostingClassifier",
"GradientBoostingRegressor",
"HistGradientBoostingClassifier",
"HistGradientBoostingRegressor",
"HistGradientBoostingRegressor",
"DecisionTreeRegressor",
"RandomForestRegressor",
)
raise ValueError(
"Only the following estimators support the 'recursion' "
"method: {}. Try using method='brute'.".format(
", ".join(supported_classes_recursion)
)
)
if response_method == "auto":
response_method = "decision_function"
if response_method != "decision_function":
raise ValueError(
"With the 'recursion' method, the response_method must be "
"'decision_function'. Got {}.".format(response_method)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if _determine_key_type(features, accept_slice=False) == "int":
# _get_column_indices() supports negative indexing. Here, we limit
# the indexing to be positive. The upper bound will be checked
# by _get_column_indices()
if np.any(np.less(features, 0)):
raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1))
features_indices = np.asarray(
_get_column_indices(X, features), dtype=np.intp, order="C"
).ravel()
feature_names = _check_feature_names(X, feature_names)
n_features = X.shape[1]
if categorical_features is None:
is_categorical = [False] * len(features_indices)
else:
categorical_features = np.asarray(categorical_features)
if categorical_features.size == 0:
raise ValueError(
"Passing an empty list (`[]`) to `categorical_features` is not "
"supported. Use `None` instead to indicate that there are no "
"categorical features."
)
if categorical_features.dtype.kind == "b":
# categorical features provided as a list of boolean
if categorical_features.size != n_features:
raise ValueError(
"When `categorical_features` is a boolean array-like, "
"the array should be of shape (n_features,). Got "
f"{categorical_features.size} elements while `X` contains "
f"{n_features} features."
)
is_categorical = [categorical_features[idx] for idx in features_indices]
elif categorical_features.dtype.kind in ("i", "O", "U"):
# categorical features provided as a list of indices or feature names
categorical_features_idx = [
_get_feature_index(cat, feature_names=feature_names)
for cat in categorical_features
]
is_categorical = [
idx in categorical_features_idx for idx in features_indices
]
else:
raise ValueError(
"Expected `categorical_features` to be an array-like of boolean,"
f" integer, or string. Got {categorical_features.dtype} instead."
)
custom_values = custom_values or {}
if isinstance(features, (str, int)):
features = [features]
for feature_idx, feature, is_cat in zip(features_indices, features, is_categorical):
if is_cat:
continue
if _safe_indexing(X, feature_idx, axis=1).dtype.kind in "iu":
# TODO(1.9): raise a ValueError instead.
warnings.warn(
f"The column {feature!r} contains integer data. Partial "
"dependence plots are not supported for integer data: this "
"can lead to implicit rounding with NumPy arrays or even errors "
"with newer pandas versions. Please convert numerical features"
"to floating point dtypes ahead of time to avoid problems. "
"This will raise ValueError in scikit-learn 1.9.",
FutureWarning,
)
# Do not warn again for other features to avoid spamming the caller.
break
X_subset = _safe_indexing(X, features_indices, axis=1)
custom_values_for_X_subset = {
index: custom_values.get(feature)
for index, feature in enumerate(features)
if feature in custom_values
}
grid, values = _grid_from_X(
X_subset,
percentiles,
is_categorical,
grid_resolution,
custom_values_for_X_subset,
)
if method == "brute":
averaged_predictions, predictions = _partial_dependence_brute(
estimator, grid, features_indices, X, response_method, sample_weight
)
# reshape predictions to
# (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...)
predictions = predictions.reshape(
-1, X.shape[0], *[val.shape[0] for val in values]
)
else:
averaged_predictions = _partial_dependence_recursion(
estimator, grid, features_indices
)
# reshape averaged_predictions to
# (n_outputs, n_values_feature_0, n_values_feature_1, ...)
averaged_predictions = averaged_predictions.reshape(
-1, *[val.shape[0] for val in values]
)
pdp_results = Bunch(grid_values=values)
if kind == "average":
pdp_results["average"] = averaged_predictions
elif kind == "individual":
pdp_results["individual"] = predictions
else: # kind='both'
pdp_results["average"] = averaged_predictions
pdp_results["individual"] = predictions
return pdp_results
|
Partial dependence of ``features``.
Partial dependence of a feature (or a set of features) corresponds to
the average response of an estimator for each possible value of the
feature.
Read more in
:ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`
and the :ref:`User Guide <partial_dependence>`.
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is 'brute'.
features : array-like of {int, str, bool} or int or str
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape (n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
.. versionadded:: 1.2
response_method : {'auto', 'predict_proba', 'decision_function'}, default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the grid. Must be in [0, 1].
This parameter is overridden by `custom_values` if that parameter is set.
grid_resolution : int, default=100
The number of equally spaced points on the grid, for each target
feature.
This parameter is overridden by `custom_values` if that parameter is set.
custom_values : dict
A dictionary mapping the index of an element of `features` to an array
of values where the partial dependence should be calculated
for that feature. Setting a range of values for a feature overrides
`grid_resolution` and `percentiles`.
See :ref:`how to use partial_dependence
<plt_partial_dependence_custom_values>` for an example of how this parameter can
be used.
.. versionadded:: 1.7
method : {'auto', 'recursion', 'brute'}, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`,
) when `kind='average'`.
This is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the Individual Conditional Expectation (ICE) by
design, it is not compatible with ICE and thus `kind` must be
`'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
kind : {'average', 'individual', 'both'}, default='average'
Whether to return the partial dependence averaged across all the
samples in the dataset or one value per sample or both.
See Returns below.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
.. versionadded:: 0.24
Returns
-------
predictions : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
individual : ndarray of shape (n_outputs, n_instances, len(values[0]), len(values[1]), ...)
The predictions for all the points in the grid for all
samples in X. This is also known as Individual
Conditional Expectation (ICE).
Only available when `kind='individual'` or `kind='both'`.
average : ndarray of shape (n_outputs, len(values[0]), len(values[1]), ...)
The predictions for all the points in the grid, averaged
over all samples in X (or over the training data if
`method` is 'recursion').
Only available when `kind='average'` or `kind='both'`.
grid_values : seq of 1d ndarrays
The values with which the grid has been created. The generated
grid is a cartesian product of the arrays in `grid_values` where
`len(grid_values) == len(features)`. The size of each array
`grid_values[j]` is either `grid_resolution`, or the number of
unique values in `X[:, j]`, whichever is smaller.
.. versionadded:: 1.3
`n_outputs` corresponds to the number of classes in a multi-class
setting, or to the number of tasks for multi-output regression.
For classical regression and binary classification `n_outputs==1`.
`n_values_feature_j` corresponds to the size `grid_values[j]`.
See Also
--------
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
PartialDependenceDisplay : Partial Dependence visualization.
Examples
--------
>>> X = [[0, 0, 2], [1, 0, 0]]
>>> y = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(X, y)
>>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1),
... grid_resolution=2) # doctest: +SKIP
(array([[-4.52, 4.52]]), [array([ 0., 1.])])
|
partial_dependence
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_partial_dependence.py
|
BSD-3-Clause
|
def _check_feature_names(X, feature_names=None):
"""Check feature names.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
feature_names : None or array-like of shape (n_names,), dtype=str
Feature names to check or `None`.
Returns
-------
feature_names : list of str
Feature names validated. If `feature_names` is `None`, then a list of
feature names is provided, i.e. the column names of a pandas dataframe
or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a
NumPy array.
"""
if feature_names is None:
if hasattr(X, "columns") and hasattr(X.columns, "tolist"):
# get the column names for a pandas dataframe
feature_names = X.columns.tolist()
else:
# define a list of numbered indices for a numpy array
feature_names = [f"x{i}" for i in range(X.shape[1])]
elif hasattr(feature_names, "tolist"):
# convert numpy array or pandas index to a list
feature_names = feature_names.tolist()
if len(set(feature_names)) != len(feature_names):
raise ValueError("feature_names should not contain duplicates.")
return feature_names
|
Check feature names.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
feature_names : None or array-like of shape (n_names,), dtype=str
Feature names to check or `None`.
Returns
-------
feature_names : list of str
Feature names validated. If `feature_names` is `None`, then a list of
feature names is provided, i.e. the column names of a pandas dataframe
or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a
NumPy array.
|
_check_feature_names
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_pd_utils.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_pd_utils.py
|
BSD-3-Clause
|
def _get_feature_index(fx, feature_names=None):
"""Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
"""
if isinstance(fx, str):
if feature_names is None:
raise ValueError(
f"Cannot plot partial dependence for feature {fx!r} since "
"the list of feature names was not provided, neither as "
"column names of a pandas data-frame nor via the feature_names "
"parameter."
)
try:
return feature_names.index(fx)
except ValueError as e:
raise ValueError(f"Feature {fx!r} not in feature_names") from e
return fx
|
Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
|
_get_feature_index
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_pd_utils.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_pd_utils.py
|
BSD-3-Clause
|
def _create_importances_bunch(baseline_score, permuted_score):
"""Compute the importances as the decrease in score.
Parameters
----------
baseline_score : ndarray of shape (n_features,)
The baseline score without permutation.
permuted_score : ndarray of shape (n_features, n_repeats)
The permuted scores for the `n` repetitions.
Returns
-------
importances : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
importances_mean : ndarray, shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray, shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray, shape (n_features, n_repeats)
Raw permutation importance scores.
"""
importances = baseline_score - permuted_score
return Bunch(
importances_mean=np.mean(importances, axis=1),
importances_std=np.std(importances, axis=1),
importances=importances,
)
|
Compute the importances as the decrease in score.
Parameters
----------
baseline_score : ndarray of shape (n_features,)
The baseline score without permutation.
permuted_score : ndarray of shape (n_features, n_repeats)
The permuted scores for the `n` repetitions.
Returns
-------
importances : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
importances_mean : ndarray, shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray, shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray, shape (n_features, n_repeats)
Raw permutation importance scores.
|
_create_importances_bunch
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_permutation_importance.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_permutation_importance.py
|
BSD-3-Clause
|
def permutation_importance(
estimator,
X,
y,
*,
scoring=None,
n_repeats=5,
n_jobs=None,
random_state=None,
sample_weight=None,
max_samples=1.0,
):
"""Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : str, callable, list, tuple, or dict, default=None
Scorer to use.
If `scoring` represents a single score, one can use:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
Passing multiple scores to `scoring` is more efficient than calling
`permutation_importance` for each of the scores as it reuses
predictions to avoid redundant computation.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term:`Glossary <random_state>`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights used in scoring.
.. versionadded:: 0.24
max_samples : int or float, default=1.0
The number of samples to draw from X to compute feature importance
in each repeat (without replacement).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples
will be used.
While using this option may provide less accurate importance estimates,
it keeps the method tractable when evaluating feature importance on
large datasets. In combination with `n_repeats`, this allows to control
the computational speed vs statistical accuracy trade-off of this method.
.. versionadded:: 1.0
Returns
-------
result : :class:`~sklearn.utils.Bunch` or dict of such instances
Dictionary-like object, with the following attributes.
importances_mean : ndarray of shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray of shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray of shape (n_features, n_repeats)
Raw permutation importance scores.
If there are multiple scoring metrics in the scoring parameter
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
`Bunch` objects like above as values.
References
----------
.. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. <10.1023/A:1010933404324>`
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666, 0. , 0. ])
>>> result.importances_std
array([0.2211, 0. , 0. ])
"""
if not hasattr(X, "iloc"):
X = check_array(X, ensure_all_finite="allow-nan", dtype=None)
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
if not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
elif max_samples > X.shape[0]:
raise ValueError("max_samples must be <= n_samples")
scorer = check_scoring(estimator, scoring=scoring)
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
scores = Parallel(n_jobs=n_jobs)(
delayed(_calculate_permutation_scores)(
estimator,
X,
y,
sample_weight,
col_idx,
random_seed,
n_repeats,
scorer,
max_samples,
)
for col_idx in range(X.shape[1])
)
if isinstance(baseline_score, dict):
return {
name: _create_importances_bunch(
baseline_score[name],
# unpack the permuted scores
np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]),
)
for name in baseline_score
}
else:
return _create_importances_bunch(baseline_score, np.array(scores))
|
Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : str, callable, list, tuple, or dict, default=None
Scorer to use.
If `scoring` represents a single score, one can use:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
Passing multiple scores to `scoring` is more efficient than calling
`permutation_importance` for each of the scores as it reuses
predictions to avoid redundant computation.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term:`Glossary <random_state>`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights used in scoring.
.. versionadded:: 0.24
max_samples : int or float, default=1.0
The number of samples to draw from X to compute feature importance
in each repeat (without replacement).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples
will be used.
While using this option may provide less accurate importance estimates,
it keeps the method tractable when evaluating feature importance on
large datasets. In combination with `n_repeats`, this allows to control
the computational speed vs statistical accuracy trade-off of this method.
.. versionadded:: 1.0
Returns
-------
result : :class:`~sklearn.utils.Bunch` or dict of such instances
Dictionary-like object, with the following attributes.
importances_mean : ndarray of shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray of shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray of shape (n_features, n_repeats)
Raw permutation importance scores.
If there are multiple scoring metrics in the scoring parameter
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
`Bunch` objects like above as values.
References
----------
.. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. <10.1023/A:1010933404324>`
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666, 0. , 0. ])
>>> result.importances_std
array([0.2211, 0. , 0. ])
|
permutation_importance
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_permutation_importance.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_permutation_importance.py
|
BSD-3-Clause
|
def test_grid_from_X_with_categorical(grid_resolution):
"""Check that `_grid_from_X` always sample from categories and does not
depend from the percentiles.
"""
pd = pytest.importorskip("pandas")
percentiles = (0.05, 0.95)
is_categorical = [True]
X = pd.DataFrame({"cat_feature": ["A", "B", "C", "A", "B", "D", "E"]})
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical,
grid_resolution=grid_resolution,
custom_values={},
)
assert grid.shape == (5, X.shape[1])
assert axes[0].shape == (5,)
|
Check that `_grid_from_X` always sample from categories and does not
depend from the percentiles.
|
test_grid_from_X_with_categorical
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_grid_from_X_heterogeneous_type(grid_resolution):
"""Check that `_grid_from_X` always sample from categories and does not
depend from the percentiles.
"""
pd = pytest.importorskip("pandas")
percentiles = (0.05, 0.95)
is_categorical = [True, False]
X = pd.DataFrame(
{
"cat": ["A", "B", "C", "A", "B", "D", "E", "A", "B", "D"],
"num": [1, 1, 1, 2, 5, 6, 6, 6, 6, 8],
}
)
nunique = X.nunique()
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical,
grid_resolution=grid_resolution,
custom_values={},
)
if grid_resolution == 3:
assert grid.shape == (15, 2)
assert axes[0].shape[0] == nunique["num"]
assert axes[1].shape[0] == grid_resolution
else:
assert grid.shape == (25, 2)
assert axes[0].shape[0] == nunique["cat"]
assert axes[1].shape[0] == nunique["cat"]
|
Check that `_grid_from_X` always sample from categories and does not
depend from the percentiles.
|
test_grid_from_X_heterogeneous_type
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_partial_dependence_kind_individual_ignores_sample_weight(Estimator, data):
"""Check that `sample_weight` does not have any effect on reported ICE."""
est = Estimator()
(X, y), n_targets = data
sample_weight = np.arange(X.shape[0])
est.fit(X, y)
pdp_nsw = partial_dependence(est, X=X, features=[1, 2], kind="individual")
pdp_sw = partial_dependence(
est, X=X, features=[1, 2], kind="individual", sample_weight=sample_weight
)
assert_allclose(pdp_nsw["individual"], pdp_sw["individual"])
assert_allclose(pdp_nsw["grid_values"], pdp_sw["grid_values"])
|
Check that `sample_weight` does not have any effect on reported ICE.
|
test_partial_dependence_kind_individual_ignores_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_partial_dependence_non_null_weight_idx(estimator, non_null_weight_idx):
"""Check that if we pass a `sample_weight` of zeros with only one index with
sample weight equals one, then the average `partial_dependence` with this
`sample_weight` is equal to the individual `partial_dependence` of the
corresponding index.
"""
X, y = iris.data, iris.target
preprocessor = make_column_transformer(
(StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])
)
pipe = make_pipeline(preprocessor, clone(estimator)).fit(X, y)
sample_weight = np.zeros_like(y)
sample_weight[non_null_weight_idx] = 1
pdp_sw = partial_dependence(
pipe,
X,
[2, 3],
kind="average",
sample_weight=sample_weight,
grid_resolution=10,
)
pdp_ind = partial_dependence(pipe, X, [2, 3], kind="individual", grid_resolution=10)
output_dim = 1 if is_regressor(pipe) else len(np.unique(y))
for i in range(output_dim):
assert_allclose(
pdp_ind["individual"][i][non_null_weight_idx],
pdp_sw["average"][i],
)
|
Check that if we pass a `sample_weight` of zeros with only one index with
sample weight equals one, then the average `partial_dependence` with this
`sample_weight` is equal to the individual `partial_dependence` of the
corresponding index.
|
test_partial_dependence_non_null_weight_idx
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_partial_dependence_equivalence_equal_sample_weight(Estimator, data):
"""Check that `sample_weight=None` is equivalent to having equal weights."""
est = Estimator()
(X, y), n_targets = data
est.fit(X, y)
sample_weight, params = None, {"X": X, "features": [1, 2], "kind": "average"}
pdp_sw_none = partial_dependence(est, **params, sample_weight=sample_weight)
sample_weight = np.ones(len(y))
pdp_sw_unit = partial_dependence(est, **params, sample_weight=sample_weight)
assert_allclose(pdp_sw_none["average"], pdp_sw_unit["average"])
sample_weight = 2 * np.ones(len(y))
pdp_sw_doubling = partial_dependence(est, **params, sample_weight=sample_weight)
assert_allclose(pdp_sw_none["average"], pdp_sw_doubling["average"])
|
Check that `sample_weight=None` is equivalent to having equal weights.
|
test_partial_dependence_equivalence_equal_sample_weight
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_partial_dependence_sample_weight_size_error():
"""Check that we raise an error when the size of `sample_weight` is not
consistent with `X` and `y`.
"""
est = LogisticRegression()
(X, y), n_targets = binary_classification_data
sample_weight = np.ones_like(y)
est.fit(X, y)
with pytest.raises(ValueError, match="sample_weight.shape =="):
partial_dependence(
est, X, features=[0], sample_weight=sample_weight[1:], grid_resolution=10
)
|
Check that we raise an error when the size of `sample_weight` is not
consistent with `X` and `y`.
|
test_partial_dependence_sample_weight_size_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_partial_dependence_sample_weight_with_recursion():
"""Check that we raise an error when `sample_weight` is provided with
`"recursion"` method.
"""
est = RandomForestRegressor()
(X, y), n_targets = regression_data
sample_weight = np.ones_like(y)
est.fit(X, y, sample_weight=sample_weight)
with pytest.raises(ValueError, match="'recursion' method can only be applied when"):
partial_dependence(
est, X, features=[0], method="recursion", sample_weight=sample_weight
)
|
Check that we raise an error when `sample_weight` is provided with
`"recursion"` method.
|
test_partial_dependence_sample_weight_with_recursion
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_mixed_type_categorical():
"""Check that we raise a proper error when a column has mixed types and
the sorting of `np.unique` will fail."""
X = np.array(["A", "B", "C", np.nan], dtype=object).reshape(-1, 1)
y = np.array([0, 1, 0, 1])
from sklearn.preprocessing import OrdinalEncoder
clf = make_pipeline(
OrdinalEncoder(encoded_missing_value=-1),
LogisticRegression(),
).fit(X, y)
with pytest.raises(ValueError, match="The column #0 contains mixed data types"):
partial_dependence(clf, X, features=[0])
|
Check that we raise a proper error when a column has mixed types and
the sorting of `np.unique` will fail.
|
test_mixed_type_categorical
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_partial_dependence_empty_categorical_features():
"""Check that we raise the proper exception when `categorical_features`
is an empty list"""
clf = make_pipeline(StandardScaler(), LogisticRegression())
clf.fit(iris.data, iris.target)
with pytest.raises(
ValueError,
match=re.escape(
"Passing an empty list (`[]`) to `categorical_features` is not "
"supported. Use `None` instead to indicate that there are no "
"categorical features."
),
):
partial_dependence(
estimator=clf, X=iris.data, features=[0], categorical_features=[]
)
|
Check that we raise the proper exception when `categorical_features`
is an empty list
|
test_partial_dependence_empty_categorical_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_partial_dependence.py
|
BSD-3-Clause
|
def test_permutation_importance_max_samples_error():
"""Check that a proper error message is raised when `max_samples` is not
set to a valid input value.
"""
X = np.array([(1.0, 2.0, 3.0, 4.0)]).T
y = np.array([0, 1, 0, 1])
clf = LogisticRegression()
clf.fit(X, y)
err_msg = r"max_samples must be <= n_samples"
with pytest.raises(ValueError, match=err_msg):
permutation_importance(clf, X, y, max_samples=5)
|
Check that a proper error message is raised when `max_samples` is not
set to a valid input value.
|
test_permutation_importance_max_samples_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/tests/test_permutation_importance.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/tests/test_permutation_importance.py
|
BSD-3-Clause
|
def _check_boundary_response_method(estimator, response_method, class_of_interest):
"""Validate the response methods to be used with the fitted estimator.
Parameters
----------
estimator : object
Fitted estimator to check.
response_method : {'auto', 'decision_function', 'predict_proba', 'predict'}
Specifies whether to use :term:`decision_function`, :term:`predict_proba`,
:term:`predict` as the target response. If set to 'auto', the response method is
tried in the before mentioned order.
class_of_interest : int, float, bool, str or None
The class considered when plotting the decision. Cannot be None if
multiclass and `response_method` is 'predict_proba' or 'decision_function'.
.. versionadded:: 1.4
Returns
-------
prediction_method : list of str or str
The name or list of names of the response methods to use.
"""
has_classes = hasattr(estimator, "classes_")
if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):
msg = "Multi-label and multi-output multi-class classifiers are not supported"
raise ValueError(msg)
if response_method == "auto":
if is_regressor(estimator):
prediction_method = "predict"
else:
prediction_method = ["decision_function", "predict_proba", "predict"]
else:
prediction_method = response_method
return prediction_method
|
Validate the response methods to be used with the fitted estimator.
Parameters
----------
estimator : object
Fitted estimator to check.
response_method : {'auto', 'decision_function', 'predict_proba', 'predict'}
Specifies whether to use :term:`decision_function`, :term:`predict_proba`,
:term:`predict` as the target response. If set to 'auto', the response method is
tried in the before mentioned order.
class_of_interest : int, float, bool, str or None
The class considered when plotting the decision. Cannot be None if
multiclass and `response_method` is 'predict_proba' or 'decision_function'.
.. versionadded:: 1.4
Returns
-------
prediction_method : list of str or str
The name or list of names of the response methods to use.
|
_check_boundary_response_method
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_plot/decision_boundary.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_plot/decision_boundary.py
|
BSD-3-Clause
|
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
"""Plot visualization.
Parameters
----------
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
xlabel : str, default=None
Overwrite the x-axis label.
ylabel : str, default=None
Overwrite the y-axis label.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`.
Returns
-------
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores computed values.
"""
check_matplotlib_support("DecisionBoundaryDisplay.plot")
import matplotlib as mpl
import matplotlib.pyplot as plt
if plot_method not in ("contourf", "contour", "pcolormesh"):
raise ValueError(
"plot_method must be 'contourf', 'contour', or 'pcolormesh'. "
f"Got {plot_method} instead."
)
if ax is None:
_, ax = plt.subplots()
plot_func = getattr(ax, plot_method)
if self.response.ndim == 2:
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
else: # self.response.ndim == 3
n_responses = self.response.shape[-1]
if (
isinstance(self.multiclass_colors, str)
or self.multiclass_colors is None
):
if isinstance(self.multiclass_colors, str):
cmap = self.multiclass_colors
else:
if n_responses <= 10:
cmap = "tab10"
else:
cmap = "gist_rainbow"
# Special case for the tab10 and tab20 colormaps that encode a
# discrete set of colors that are easily distinguishable
# contrary to other colormaps that are continuous.
if cmap == "tab10" and n_responses <= 10:
colors = plt.get_cmap("tab10", 10).colors[:n_responses]
elif cmap == "tab20" and n_responses <= 20:
colors = plt.get_cmap("tab20", 20).colors[:n_responses]
else:
colors = plt.get_cmap(cmap, n_responses).colors
elif isinstance(self.multiclass_colors, str):
colors = colors = plt.get_cmap(
self.multiclass_colors, n_responses
).colors
else:
colors = [mpl.colors.to_rgba(color) for color in self.multiclass_colors]
self.multiclass_colors_ = colors
multiclass_cmaps = [
mpl.colors.LinearSegmentedColormap.from_list(
f"colormap_{class_idx}", [(1.0, 1.0, 1.0, 1.0), (r, g, b, 1.0)]
)
for class_idx, (r, g, b, _) in enumerate(colors)
]
self.surface_ = []
for class_idx, cmap in enumerate(multiclass_cmaps):
response = np.ma.array(
self.response[:, :, class_idx],
mask=~(self.response.argmax(axis=2) == class_idx),
)
# `cmap` should not be in kwargs
safe_kwargs = kwargs.copy()
if "cmap" in safe_kwargs:
del safe_kwargs["cmap"]
warnings.warn(
"Plotting max class of multiclass 'decision_function' or "
"'predict_proba', thus 'multiclass_colors' used and "
"'cmap' kwarg ignored."
)
self.surface_.append(
plot_func(self.xx0, self.xx1, response, cmap=cmap, **safe_kwargs)
)
if xlabel is not None or not ax.get_xlabel():
xlabel = self.xlabel if xlabel is None else xlabel
ax.set_xlabel(xlabel)
if ylabel is not None or not ax.get_ylabel():
ylabel = self.ylabel if ylabel is None else ylabel
ax.set_ylabel(ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
|
Plot visualization.
Parameters
----------
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
xlabel : str, default=None
Overwrite the x-axis label.
ylabel : str, default=None
Overwrite the y-axis label.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`.
Returns
-------
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores computed values.
|
plot
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_plot/decision_boundary.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_plot/decision_boundary.py
|
BSD-3-Clause
|
def from_estimator(
cls,
estimator,
X,
*,
grid_resolution=100,
eps=1.0,
plot_method="contourf",
response_method="auto",
class_of_interest=None,
multiclass_colors=None,
xlabel=None,
ylabel=None,
ax=None,
**kwargs,
):
"""Plot decision boundary given an estimator.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : object
Trained estimator used to plot the decision boundary.
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
Input data that should be only 2-dimensional.
grid_resolution : int, default=100
Number of grid points to use for plotting decision boundary.
Higher values will make the plot look nicer but be slower to
render.
eps : float, default=1.0
Extends the minimum and maximum values of X for evaluating the
response function.
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
response_method : {'auto', 'decision_function', 'predict_proba', \
'predict'}, default='auto'
Specifies whether to use :term:`decision_function`,
:term:`predict_proba` or :term:`predict` as the target response.
If set to 'auto', the response method is tried in the order as
listed above.
.. versionchanged:: 1.6
For multiclass problems, 'auto' no longer defaults to 'predict'.
class_of_interest : int, float, bool or str, default=None
The class to be plotted when `response_method` is 'predict_proba'
or 'decision_function'. If None, `estimator.classes_[1]` is considered
the positive class for binary classifiers. For multiclass
classifiers, if None, all classes will be represented in the
decision boundary plot; the class with the highest response value
at each point is plotted. The color of each class can be set via
`multiclass_colors`.
.. versionadded:: 1.4
multiclass_colors : list of str, or str, default=None
Specifies how to color each class when plotting multiclass
'predict_proba' or 'decision_function' and `class_of_interest` is
None. Ignored in all other cases.
Possible inputs are:
* list: list of Matplotlib
`color <https://matplotlib.org/stable/users/explain/colors/colors.html#colors-def>`_
strings, of length `n_classes`
* str: name of :class:`matplotlib.colors.Colormap`
* None: 'tab10' colormap is used to sample colors if the number of
classes is less than or equal to 10, otherwise 'gist_rainbow'
colormap.
Single color colormaps will be generated from the colors in the list or
colors taken from the colormap, and passed to the `cmap` parameter of
the `plot_method`.
.. versionadded:: 1.7
xlabel : str, default=None
The label used for the x-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ylabel : str, default=None
The label used for the y-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keyword arguments to be passed to the
`plot_method`.
Returns
-------
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores the result.
See Also
--------
DecisionBoundaryDisplay : Decision boundary visualization.
sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the
confusion matrix given an estimator, the data, and the label.
sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the
confusion matrix given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> iris = load_iris()
>>> X = iris.data[:, :2]
>>> classifier = LogisticRegression().fit(X, iris.target)
>>> disp = DecisionBoundaryDisplay.from_estimator(
... classifier, X, response_method="predict",
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
... alpha=0.5,
... )
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
check_is_fitted(estimator)
import matplotlib as mpl
if not grid_resolution > 1:
raise ValueError(
"grid_resolution must be greater than 1. Got"
f" {grid_resolution} instead."
)
if not eps >= 0:
raise ValueError(
f"eps must be greater than or equal to 0. Got {eps} instead."
)
possible_plot_methods = ("contourf", "contour", "pcolormesh")
if plot_method not in possible_plot_methods:
available_methods = ", ".join(possible_plot_methods)
raise ValueError(
f"plot_method must be one of {available_methods}. "
f"Got {plot_method} instead."
)
num_features = _num_features(X)
if num_features != 2:
raise ValueError(
f"n_features must be equal to 2. Got {num_features} instead."
)
if (
response_method in ("predict_proba", "decision_function", "auto")
and multiclass_colors is not None
and hasattr(estimator, "classes_")
and (n_classes := len(estimator.classes_)) > 2
):
if isinstance(multiclass_colors, list):
if len(multiclass_colors) != n_classes:
raise ValueError(
"When 'multiclass_colors' is a list, it must be of the same "
f"length as 'estimator.classes_' ({n_classes}), got: "
f"{len(multiclass_colors)}."
)
elif any(
not mpl.colors.is_color_like(col) for col in multiclass_colors
):
raise ValueError(
"When 'multiclass_colors' is a list, it can only contain valid"
f" Matplotlib color names. Got: {multiclass_colors}"
)
if isinstance(multiclass_colors, str):
if multiclass_colors not in mpl.pyplot.colormaps():
raise ValueError(
"When 'multiclass_colors' is a string, it must be a valid "
f"Matplotlib colormap. Got: {multiclass_colors}"
)
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
xx0, xx1 = np.meshgrid(
np.linspace(x0_min, x0_max, grid_resolution),
np.linspace(x1_min, x1_max, grid_resolution),
)
X_grid = np.c_[xx0.ravel(), xx1.ravel()]
if _is_pandas_df(X) or _is_polars_df(X):
adapter = _get_adapter_from_container(X)
X_grid = adapter.create_container(
X_grid,
X_grid,
columns=X.columns,
)
prediction_method = _check_boundary_response_method(
estimator, response_method, class_of_interest
)
try:
response, _, response_method_used = _get_response_values(
estimator,
X_grid,
response_method=prediction_method,
pos_label=class_of_interest,
return_response_method_used=True,
)
except ValueError as exc:
if "is not a valid label" in str(exc):
# re-raise a more informative error message since `pos_label` is unknown
# to our user when interacting with
# `DecisionBoundaryDisplay.from_estimator`
raise ValueError(
f"class_of_interest={class_of_interest} is not a valid label: It "
f"should be one of {estimator.classes_}"
) from exc
raise
# convert classes predictions into integers
if response_method_used == "predict" and hasattr(estimator, "classes_"):
encoder = LabelEncoder()
encoder.classes_ = estimator.classes_
response = encoder.transform(response)
if response.ndim == 1:
response = response.reshape(*xx0.shape)
else:
if is_regressor(estimator):
raise ValueError("Multi-output regressors are not supported")
if class_of_interest is not None:
# For the multiclass case, `_get_response_values` returns the response
# as-is. Thus, we have a column per class and we need to select the
# column corresponding to the positive class.
col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0]
response = response[:, col_idx].reshape(*xx0.shape)
else:
response = response.reshape(*xx0.shape, response.shape[-1])
if xlabel is None:
xlabel = X.columns[0] if hasattr(X, "columns") else ""
if ylabel is None:
ylabel = X.columns[1] if hasattr(X, "columns") else ""
display = cls(
xx0=xx0,
xx1=xx1,
response=response,
multiclass_colors=multiclass_colors,
xlabel=xlabel,
ylabel=ylabel,
)
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
|
Plot decision boundary given an estimator.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : object
Trained estimator used to plot the decision boundary.
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
Input data that should be only 2-dimensional.
grid_resolution : int, default=100
Number of grid points to use for plotting decision boundary.
Higher values will make the plot look nicer but be slower to
render.
eps : float, default=1.0
Extends the minimum and maximum values of X for evaluating the
response function.
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
response_method : {'auto', 'decision_function', 'predict_proba', 'predict'}, default='auto'
Specifies whether to use :term:`decision_function`,
:term:`predict_proba` or :term:`predict` as the target response.
If set to 'auto', the response method is tried in the order as
listed above.
.. versionchanged:: 1.6
For multiclass problems, 'auto' no longer defaults to 'predict'.
class_of_interest : int, float, bool or str, default=None
The class to be plotted when `response_method` is 'predict_proba'
or 'decision_function'. If None, `estimator.classes_[1]` is considered
the positive class for binary classifiers. For multiclass
classifiers, if None, all classes will be represented in the
decision boundary plot; the class with the highest response value
at each point is plotted. The color of each class can be set via
`multiclass_colors`.
.. versionadded:: 1.4
multiclass_colors : list of str, or str, default=None
Specifies how to color each class when plotting multiclass
'predict_proba' or 'decision_function' and `class_of_interest` is
None. Ignored in all other cases.
Possible inputs are:
* list: list of Matplotlib
`color <https://matplotlib.org/stable/users/explain/colors/colors.html#colors-def>`_
strings, of length `n_classes`
* str: name of :class:`matplotlib.colors.Colormap`
* None: 'tab10' colormap is used to sample colors if the number of
classes is less than or equal to 10, otherwise 'gist_rainbow'
colormap.
Single color colormaps will be generated from the colors in the list or
colors taken from the colormap, and passed to the `cmap` parameter of
the `plot_method`.
.. versionadded:: 1.7
xlabel : str, default=None
The label used for the x-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ylabel : str, default=None
The label used for the y-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keyword arguments to be passed to the
`plot_method`.
Returns
-------
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores the result.
See Also
--------
DecisionBoundaryDisplay : Decision boundary visualization.
sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the
confusion matrix given an estimator, the data, and the label.
sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the
confusion matrix given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> iris = load_iris()
>>> X = iris.data[:, :2]
>>> classifier = LogisticRegression().fit(X, iris.target)
>>> disp = DecisionBoundaryDisplay.from_estimator(
... classifier, X, response_method="predict",
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
... alpha=0.5,
... )
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
<...>
>>> plt.show()
|
from_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_plot/decision_boundary.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_plot/decision_boundary.py
|
BSD-3-Clause
|
def from_estimator(
cls,
estimator,
X,
features,
*,
sample_weight=None,
categorical_features=None,
feature_names=None,
target=None,
response_method="auto",
n_cols=3,
grid_resolution=100,
percentiles=(0.05, 0.95),
custom_values=None,
method="auto",
n_jobs=None,
verbose=0,
line_kw=None,
ice_lines_kw=None,
pd_line_kw=None,
contour_kw=None,
ax=None,
kind="average",
centered=False,
subsample=1000,
random_state=None,
):
"""Partial dependence (PD) and individual conditional expectation (ICE) plots.
Partial dependence plots, individual conditional expectation plots, or an
overlay of both can be plotted by setting the `kind` parameter.
This method generates one plot for each entry in `features`. The plots
are arranged in a grid with `n_cols` columns. For one-way partial
dependence plots, the deciles of the feature values are shown on the
x-axis. For two-way plots, the deciles are shown on both axes and PDPs
are contour plots.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Inspection Guide <partial_dependence>`.
For an example on how to use this class method, see
:ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`.
.. note::
:func:`PartialDependenceDisplay.from_estimator` does not support using the
same axes with multiple calls. To plot the partial dependence for
multiple estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import PartialDependenceDisplay
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> X, y = make_friedman1()
>>> est1 = LinearRegression().fit(X, y)
>>> est2 = RandomForestRegressor().fit(X, y)
>>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,
... [1, 2])
>>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],
... ax=disp1.axes_)
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
.. versionadded:: 1.0
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is `'brute'`.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If `features[i]` is an integer or a string, a one-way PDP is created;
if `features[i]` is a tuple, a two-way PDP is created (only supported
with `kind='average'`). Each tuple must be of size 2.
If any entry is a string, then it must be in ``feature_names``.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape \
(n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, default=None
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is `'recursion'`, the response is always the output of
:term:`decision_function`.
n_cols : int, default=3
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, default=100
The number of equally spaced points on the axes of the plots, for each
target feature.
This parameter is overridden by `custom_values` if that parameter is set.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
This parameter is overridden by `custom_values` if that parameter is set.
custom_values : dict
A dictionary mapping the index of an element of `features` to an
array of values where the partial dependence should be calculated
for that feature. Setting a range of values for a feature overrides
`grid_resolution` and `percentiles`.
.. versionadded:: 1.7
method : str, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the ICEs by design, it is not compatible with ICE and
thus `kind` must be `'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
n_jobs : int, default=None
The number of CPUs to use to compute the partial dependences.
Computation is parallelized over features specified by the `features`
parameter.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbose output during PD computations.
line_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots. It can be used to define common
properties for both `ice_lines_kw` and `pdp_line_kw`.
ice_lines_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For ICE lines in the one-way partial dependence plots.
The key value pairs defined in `ice_lines_kw` takes priority over
`line_kw`.
pd_line_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For partial dependence in one-way partial dependence plots.
The key value pairs defined in `pd_line_kw` takes priority over
`line_kw`.
contour_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
kind : {'average', 'individual', 'both'}, default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
centered : bool, default=False
If `True`, the ICE and PD lines will start at the origin of the
y-axis. By default, no centering is done.
.. versionadded:: 1.1
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If `float`, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If `int`, represents the
absolute number samples to use.
Note that the full dataset is still used to calculate averaged partial
dependence when `kind='both'`.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None` and `kind` is either `'both'` or `'individual'`.
See :term:`Glossary <random_state>` for details.
Returns
-------
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
See Also
--------
partial_dependence : Compute Partial Dependence values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.inspection import PartialDependenceDisplay
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
import matplotlib.pyplot as plt
# set target_idx for multi-class estimators
if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError("target must be specified for multi-class")
target_idx = np.searchsorted(estimator.classes_, target)
if (
not (0 <= target_idx < len(estimator.classes_))
or estimator.classes_[target_idx] != target
):
raise ValueError("target not in est.classes_, got {}".format(target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not (hasattr(X, "__array__") or sparse.issparse(X)):
X = check_array(X, ensure_all_finite="allow-nan", dtype=object)
n_features = X.shape[1]
feature_names = _check_feature_names(X, feature_names)
# expand kind to always be a list of str
kind_ = [kind] * len(features) if isinstance(kind, str) else kind
if len(kind_) != len(features):
raise ValueError(
"When `kind` is provided as a list of strings, it should contain "
f"as many elements as `features`. `kind` contains {len(kind_)} "
f"element(s) and `features` contains {len(features)} element(s)."
)
# convert features into a seq of int tuples
tmp_features, ice_for_two_way_pd = [], []
for kind_plot, fxs in zip(kind_, features):
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(
_get_feature_index(fx, feature_names=feature_names) for fx in fxs
)
except TypeError as e:
raise ValueError(
"Each entry in features must be either an int, "
"a string, or an iterable of size at most 2."
) from e
if not 1 <= np.size(fxs) <= 2:
raise ValueError(
"Each entry in features must be either an int, "
"a string, or an iterable of size at most 2."
)
# store the information if 2-way PD was requested with ICE to later
# raise a ValueError with an exhaustive list of problematic
# settings.
ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1)
tmp_features.append(fxs)
if any(ice_for_two_way_pd):
# raise an error and be specific regarding the parameter values
# when 1- and 2-way PD were requested
kind_ = [
"average" if forcing_average else kind_plot
for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_)
]
raise ValueError(
"ICE plot cannot be rendered for 2-way feature interactions. "
"2-way feature interactions mandates PD plots using the "
"'average' kind: "
f"features={features!r} should be configured to use "
f"kind={kind_!r} explicitly."
)
features = tmp_features
if categorical_features is None:
is_categorical = [
(False,) if len(fxs) == 1 else (False, False) for fxs in features
]
else:
# we need to create a boolean indicator of which features are
# categorical from the categorical_features list.
categorical_features = np.asarray(categorical_features)
if categorical_features.dtype.kind == "b":
# categorical features provided as a list of boolean
if categorical_features.size != n_features:
raise ValueError(
"When `categorical_features` is a boolean array-like, "
"the array should be of shape (n_features,). Got "
f"{categorical_features.size} elements while `X` contains "
f"{n_features} features."
)
is_categorical = [
tuple(categorical_features[fx] for fx in fxs) for fxs in features
]
elif categorical_features.dtype.kind in ("i", "O", "U"):
# categorical features provided as a list of indices or feature names
categorical_features_idx = [
_get_feature_index(cat, feature_names=feature_names)
for cat in categorical_features
]
is_categorical = [
tuple([idx in categorical_features_idx for idx in fxs])
for fxs in features
]
else:
raise ValueError(
"Expected `categorical_features` to be an array-like of boolean,"
f" integer, or string. Got {categorical_features.dtype} instead."
)
for cats in is_categorical:
if np.size(cats) == 2 and (cats[0] != cats[1]):
raise ValueError(
"Two-way partial dependence plots are not supported for pairs"
" of continuous and categorical features."
)
# collect the indices of the categorical features targeted by the partial
# dependence computation
categorical_features_targeted = set(
[
fx
for fxs, cats in zip(features, is_categorical)
for fx in fxs
if any(cats)
]
)
if categorical_features_targeted:
min_n_cats = min(
[
len(_unique(_safe_indexing(X, idx, axis=1)))
for idx in categorical_features_targeted
]
)
if grid_resolution < min_n_cats:
raise ValueError(
"The resolution of the computed grid is less than the "
"minimum number of categories in the targeted categorical "
"features. Expect the `grid_resolution` to be greater than "
f"{min_n_cats}. Got {grid_resolution} instead."
)
for is_cat, kind_plot in zip(is_categorical, kind_):
if any(is_cat) and kind_plot != "average":
raise ValueError(
"It is not possible to display individual effects for"
" categorical features."
)
# Early exit if the axes does not have the correct number of axes
if ax is not None and not isinstance(ax, plt.Axes):
axes = np.asarray(ax, dtype=object)
if axes.size != len(features):
raise ValueError(
"Expected ax to have {} axes, got {}".format(
len(features), axes.size
)
)
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError(
"All entries of features must be less than "
"len(feature_names) = {0}, got {1}.".format(len(feature_names), i)
)
if isinstance(subsample, numbers.Integral):
if subsample <= 0:
raise ValueError(
f"When an integer, subsample={subsample} should be positive."
)
elif isinstance(subsample, numbers.Real):
if subsample <= 0 or subsample >= 1:
raise ValueError(
f"When a floating-point, subsample={subsample} should be in "
"the (0, 1) range."
)
# compute predictions and/or averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(
estimator,
X,
fxs,
sample_weight=sample_weight,
feature_names=feature_names,
categorical_features=categorical_features,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
percentiles=percentiles,
kind=kind_plot,
custom_values=custom_values,
)
for kind_plot, fxs in zip(kind_, features)
)
# For multioutput regression, we can only check the validity of target
# now that we have the predictions.
# Also note: as multiclass-multioutput classifiers are not supported,
# multiclass and multioutput scenario are mutually exclusive. So there is
# no risk of overwriting target_idx here.
pd_result = pd_results[0] # checking the first result is enough
n_tasks = (
pd_result.average.shape[0]
if kind_[0] == "average"
else pd_result.individual.shape[0]
)
if is_regressor(estimator) and n_tasks > 1:
if target is None:
raise ValueError("target must be specified for multi-output regressors")
if not 0 <= target <= n_tasks:
raise ValueError(
"target must be in [0, n_tasks], got {}.".format(target)
)
target_idx = target
deciles = {}
for fxs, cats in zip(features, is_categorical):
for fx, cat in zip(fxs, cats):
if not cat and fx not in deciles:
X_col = _safe_indexing(X, fx, axis=1)
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
display = cls(
pd_results=pd_results,
features=features,
feature_names=feature_names,
target_idx=target_idx,
deciles=deciles,
kind=kind,
subsample=subsample,
random_state=random_state,
is_categorical=is_categorical,
)
return display.plot(
ax=ax,
n_cols=n_cols,
line_kw=line_kw,
ice_lines_kw=ice_lines_kw,
pd_line_kw=pd_line_kw,
contour_kw=contour_kw,
centered=centered,
)
|
Partial dependence (PD) and individual conditional expectation (ICE) plots.
Partial dependence plots, individual conditional expectation plots, or an
overlay of both can be plotted by setting the `kind` parameter.
This method generates one plot for each entry in `features`. The plots
are arranged in a grid with `n_cols` columns. For one-way partial
dependence plots, the deciles of the feature values are shown on the
x-axis. For two-way plots, the deciles are shown on both axes and PDPs
are contour plots.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Inspection Guide <partial_dependence>`.
For an example on how to use this class method, see
:ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`.
.. note::
:func:`PartialDependenceDisplay.from_estimator` does not support using the
same axes with multiple calls. To plot the partial dependence for
multiple estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import PartialDependenceDisplay
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> X, y = make_friedman1()
>>> est1 = LinearRegression().fit(X, y)
>>> est2 = RandomForestRegressor().fit(X, y)
>>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,
... [1, 2])
>>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],
... ax=disp1.axes_)
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
.. versionadded:: 1.0
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is `'brute'`.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If `features[i]` is an integer or a string, a one-way PDP is created;
if `features[i]` is a tuple, a two-way PDP is created (only supported
with `kind='average'`). Each tuple must be of size 2.
If any entry is a string, then it must be in ``feature_names``.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape (n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, default=None
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : {'auto', 'predict_proba', 'decision_function'}, default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is `'recursion'`, the response is always the output of
:term:`decision_function`.
n_cols : int, default=3
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, default=100
The number of equally spaced points on the axes of the plots, for each
target feature.
This parameter is overridden by `custom_values` if that parameter is set.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
This parameter is overridden by `custom_values` if that parameter is set.
custom_values : dict
A dictionary mapping the index of an element of `features` to an
array of values where the partial dependence should be calculated
for that feature. Setting a range of values for a feature overrides
`grid_resolution` and `percentiles`.
.. versionadded:: 1.7
method : str, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the ICEs by design, it is not compatible with ICE and
thus `kind` must be `'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
n_jobs : int, default=None
The number of CPUs to use to compute the partial dependences.
Computation is parallelized over features specified by the `features`
parameter.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbose output during PD computations.
line_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots. It can be used to define common
properties for both `ice_lines_kw` and `pdp_line_kw`.
ice_lines_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For ICE lines in the one-way partial dependence plots.
The key value pairs defined in `ice_lines_kw` takes priority over
`line_kw`.
pd_line_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For partial dependence in one-way partial dependence plots.
The key value pairs defined in `pd_line_kw` takes priority over
`line_kw`.
contour_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
kind : {'average', 'individual', 'both'}, default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
centered : bool, default=False
If `True`, the ICE and PD lines will start at the origin of the
y-axis. By default, no centering is done.
.. versionadded:: 1.1
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If `float`, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If `int`, represents the
absolute number samples to use.
Note that the full dataset is still used to calculate averaged partial
dependence when `kind='both'`.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None` and `kind` is either `'both'` or `'individual'`.
See :term:`Glossary <random_state>` for details.
Returns
-------
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
See Also
--------
partial_dependence : Compute Partial Dependence values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.inspection import PartialDependenceDisplay
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])
<...>
>>> plt.show()
|
from_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_plot/partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_plot/partial_dependence.py
|
BSD-3-Clause
|
def _get_sample_count(self, n_samples):
"""Compute the number of samples as an integer."""
if isinstance(self.subsample, numbers.Integral):
if self.subsample < n_samples:
return self.subsample
return n_samples
elif isinstance(self.subsample, numbers.Real):
return ceil(n_samples * self.subsample)
return n_samples
|
Compute the number of samples as an integer.
|
_get_sample_count
|
python
|
scikit-learn/scikit-learn
|
sklearn/inspection/_plot/partial_dependence.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/inspection/_plot/partial_dependence.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.