code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_variance_correctness(copy):
"""Check the accuracy of PCA's internal variance calculation"""
rng = np.random.RandomState(0)
X = rng.randn(1000, 200)
pca = PCA().fit(X)
pca_var = pca.explained_variance_ / pca.explained_variance_ratio_
true_var = np.var(X, ddof=1, axis=0).sum()
np.testing.assert_allclose(pca_var, true_var)
|
Check the accuracy of PCA's internal variance calculation
|
test_variance_correctness
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_pca.py
|
BSD-3-Clause
|
def test_spca_feature_names_out(SPCA):
"""Check feature names out for *SparsePCA."""
rng = np.random.RandomState(0)
n_samples, n_features = 12, 10
X = rng.randn(n_samples, n_features)
model = SPCA(n_components=4).fit(X)
names = model.get_feature_names_out()
estimator_name = SPCA.__name__.lower()
assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names)
|
Check feature names out for *SparsePCA.
|
test_spca_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_sparse_pca.py
|
BSD-3-Clause
|
def test_spca_early_stopping(global_random_seed):
"""Check that `tol` and `max_no_improvement` act as early stopping."""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 50, 10
X = rng.randn(n_samples, n_features)
# vary the tolerance to force the early stopping of one of the model
model_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=0.5, random_state=global_random_seed
).fit(X)
model_not_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=1e-3, random_state=global_random_seed
).fit(X)
assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
# force the max number of no improvement to a large value to check that
# it does help to early stop
model_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed
).fit(X)
model_not_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed
).fit(X)
assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
|
Check that `tol` and `max_no_improvement` act as early stopping.
|
test_spca_early_stopping
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_sparse_pca.py
|
BSD-3-Clause
|
def test_equivalence_components_pca_spca(global_random_seed):
"""Check the equivalence of the components found by PCA and SparsePCA.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23932
"""
rng = np.random.RandomState(global_random_seed)
X = rng.randn(50, 4)
n_components = 2
pca = PCA(
n_components=n_components,
svd_solver="randomized",
random_state=0,
).fit(X)
spca = SparsePCA(
n_components=n_components,
method="lars",
ridge_alpha=0,
alpha=0,
random_state=0,
).fit(X)
assert_allclose(pca.components_, spca.components_)
|
Check the equivalence of the components found by PCA and SparsePCA.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23932
|
test_equivalence_components_pca_spca
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_sparse_pca.py
|
BSD-3-Clause
|
def test_sparse_pca_inverse_transform(global_random_seed):
"""Check that `inverse_transform` in `SparsePCA` and `PCA` are similar."""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 10, 5
X = rng.randn(n_samples, n_features)
n_components = 2
spca = SparsePCA(
n_components=n_components,
alpha=1e-12,
ridge_alpha=1e-12,
random_state=global_random_seed,
)
pca = PCA(n_components=n_components, random_state=global_random_seed)
X_trans_spca = spca.fit_transform(X)
X_trans_pca = pca.fit_transform(X)
assert_allclose(
spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca)
)
|
Check that `inverse_transform` in `SparsePCA` and `PCA` are similar.
|
test_sparse_pca_inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_sparse_pca.py
|
BSD-3-Clause
|
def test_transform_inverse_transform_round_trip(SPCA, global_random_seed):
"""Check the `transform` and `inverse_transform` round trip with no loss of
information.
"""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 10, 5
X = rng.randn(n_samples, n_features)
n_components = n_features
spca = SPCA(
n_components=n_components,
alpha=1e-12,
ridge_alpha=1e-12,
random_state=global_random_seed,
)
X_trans_spca = spca.fit_transform(X)
assert_allclose(spca.inverse_transform(X_trans_spca), X)
|
Check the `transform` and `inverse_transform` round trip with no loss of
information.
|
test_transform_inverse_transform_round_trip
|
python
|
scikit-learn/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/decomposition/tests/test_sparse_pca.py
|
BSD-3-Clause
|
def _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap_samples,
n_features,
n_samples,
max_features,
max_samples,
):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(
random_state, bootstrap_features, n_features, max_features
)
sample_indices = _generate_indices(
random_state, bootstrap_samples, n_samples, max_samples
)
return feature_indices, sample_indices
|
Randomly draw feature and sample indices.
|
_generate_bagging_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _parallel_build_estimators(
n_estimators,
ensemble,
X,
y,
seeds,
total_n_estimators,
verbose,
check_input,
fit_params,
):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
has_check_input = has_fit_parameter(ensemble.estimator_, "check_input")
requires_feature_indexing = bootstrap_features or max_features != n_features
# Build estimators
estimators = []
estimators_features = []
# TODO: (slep6) remove if condition for unrouted sample_weight when metadata
# routing can't be disabled.
support_sample_weight = has_fit_parameter(ensemble.estimator_, "sample_weight")
if not _routing_enabled() and (
not support_sample_weight and fit_params.get("sample_weight") is not None
):
raise ValueError(
"The base estimator doesn't support sample weight, but sample_weight is "
"passed to the fit method."
)
for i in range(n_estimators):
if verbose > 1:
print(
"Building estimator %d of %d for this parallel run (total %d)..."
% (i + 1, n_estimators, total_n_estimators)
)
random_state = seeds[i]
estimator = ensemble._make_estimator(append=False, random_state=random_state)
if has_check_input:
estimator_fit = partial(estimator.fit, check_input=check_input)
else:
estimator_fit = estimator.fit
# Draw random feature, sample indices
features, indices = _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap,
n_features,
n_samples,
max_features,
max_samples,
)
fit_params_ = fit_params.copy()
# TODO(SLEP6): remove if condition for unrouted sample_weight when metadata
# routing can't be disabled.
# 1. If routing is enabled, we will check if the routing supports sample
# weight and use it if it does.
# 2. If routing is not enabled, we will check if the base
# estimator supports sample_weight and use it if it does.
# Note: Row sampling can be achieved either through setting sample_weight or
# by indexing. The former is more efficient. Therefore, use this method
# if possible, otherwise use indexing.
if _routing_enabled():
request_or_router = get_routing_for_object(ensemble.estimator_)
consumes_sample_weight = request_or_router.consumes(
"fit", ("sample_weight",)
)
else:
consumes_sample_weight = support_sample_weight
if consumes_sample_weight:
# Draw sub samples, using sample weights, and then fit
curr_sample_weight = _check_sample_weight(
fit_params_.pop("sample_weight", None), X
).copy()
if bootstrap:
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices_mask = ~indices_to_mask(indices, n_samples)
curr_sample_weight[not_indices_mask] = 0
fit_params_["sample_weight"] = curr_sample_weight
X_ = X[:, features] if requires_feature_indexing else X
estimator_fit(X_, y, **fit_params_)
else:
# cannot use sample_weight, so use indexing
y_ = _safe_indexing(y, indices)
X_ = _safe_indexing(X, indices)
fit_params_ = _check_method_params(X, params=fit_params_, indices=indices)
if requires_feature_indexing:
X_ = X_[:, features]
estimator_fit(X_, y_, **fit_params_)
estimators.append(estimator)
estimators_features.append(features)
return estimators, estimators_features
|
Private function used to build a batch of estimators within a job.
|
_parallel_build_estimators
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _parallel_predict_proba(
estimators,
estimators_features,
X,
n_classes,
predict_params=None,
predict_proba_params=None,
):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(
X[:, features], **(predict_params or {})
)
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += proba_estimator[
:, range(len(estimator.classes_))
]
else:
# Resort to voting
predictions = estimator.predict(
X[:, features], **(predict_proba_params or {})
)
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
|
Private function used to compute (proba-)predictions within a job.
|
_parallel_predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes, params):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features], **params)
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))],
)
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)
return log_proba
|
Private function used to compute log probabilities within a job.
|
_parallel_predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _parallel_decision_function(estimators, estimators_features, X, params):
"""Private function used to compute decisions within a job."""
return sum(
estimator.decision_function(X[:, features], **params)
for estimator, features in zip(estimators, estimators_features)
)
|
Private function used to compute decisions within a job.
|
_parallel_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _parallel_predict_regression(estimators, estimators_features, X, params):
"""Private function used to compute predictions within a job."""
return sum(
estimator.predict(X[:, features], **params)
for estimator, features in zip(estimators, estimators_features)
)
|
Private function used to compute predictions within a job.
|
_parallel_predict_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, **fit_params):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(fit_params, self, "fit")
# Convert data (X is required to be 2d and indexable)
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
multi_output=True,
)
return self._fit(
X,
y,
max_samples=self.max_samples,
sample_weight=sample_weight,
**fit_params,
)
|
Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _fit(
self,
X,
y,
max_samples=None,
max_depth=None,
check_input=True,
sample_weight=None,
**fit_params,
):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
max_depth : int, default=None
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
check_input : bool, default=True
Override value used when fitting base estimator. Only supported
if the base estimator has a check_input parameter for fit function.
If the meta-estimator already checks the input, set this value to
False to prevent redundant input validation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
**fit_params : dict, default=None
Parameters to pass to the :term:`fit` method of the underlying
estimator.
Returns
-------
self : object
Fitted estimator.
"""
random_state = check_random_state(self.random_state)
# Remap output
n_samples = X.shape[0]
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator(self._get_estimator())
if sample_weight is not None:
fit_params["sample_weight"] = sample_weight
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit=fit_params)
if "sample_weight" in fit_params:
routed_params.estimator.fit["sample_weight"] = fit_params[
"sample_weight"
]
if max_depth is not None:
self.estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
elif not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
if max_samples > X.shape[0]:
raise ValueError("max_samples must be <= n_samples")
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
elif isinstance(self.max_features, float):
max_features = int(self.max_features * self.n_features_in_)
if max_features > self.n_features_in_:
raise ValueError("max_features must be <= n_features")
max_features = max(1, int(max_features))
# Store validated integer feature sampling value
self._max_features = max_features
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
n_more_estimators, self.n_jobs
)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
seeds[starts[i] : starts[i + 1]],
total_n_estimators,
verbose=self.verbose,
check_input=check_input,
fit_params=routed_params.estimator.fit,
)
for i in range(n_jobs)
)
# Reduce
self.estimators_ += list(
itertools.chain.from_iterable(t[0] for t in all_results)
)
self.estimators_features_ += list(
itertools.chain.from_iterable(t[1] for t in all_results)
)
if self.oob_score:
self._set_oob_score(X, y)
return self
|
Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
max_depth : int, default=None
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
check_input : bool, default=True
Override value used when fitting base estimator. Only supported
if the base estimator has a check_input parameter for fit function.
If the meta-estimator already checks the input, set this value to
False to prevent redundant input validation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
**fit_params : dict, default=None
Parameters to pass to the :term:`fit` method of the underlying
estimator.
Returns
-------
self : object
Fitted estimator.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
method_mapping = MethodMapping()
method_mapping.add(caller="fit", callee="fit").add(
caller="decision_function", callee="decision_function"
)
# the router needs to be built depending on whether the sub-estimator has a
# `predict_proba` method (as BaggingClassifier decides dynamically at runtime):
if hasattr(self._get_estimator(), "predict_proba"):
(
method_mapping.add(caller="predict", callee="predict_proba").add(
caller="predict_proba", callee="predict_proba"
)
)
else:
(
method_mapping.add(caller="predict", callee="predict").add(
caller="predict_proba", callee="predict"
)
)
# the router needs to be built depending on whether the sub-estimator has a
# `predict_log_proba` method (as BaggingClassifier decides dynamically at
# runtime):
if hasattr(self._get_estimator(), "predict_log_proba"):
method_mapping.add(caller="predict_log_proba", callee="predict_log_proba")
else:
# if `predict_log_proba` is not available in BaggingClassifier's
# sub-estimator, the routing should go to its `predict_proba` if it is
# available or else to its `predict` method; according to how
# `sample_weight` is passed to the respective methods dynamically at
# runtime:
if hasattr(self._get_estimator(), "predict_proba"):
method_mapping.add(caller="predict_log_proba", callee="predict_proba")
else:
method_mapping.add(caller="predict_log_proba", callee="predict")
router.add(estimator=self._get_estimator(), method_mapping=method_mapping)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _get_estimator(self):
"""Resolve which estimator to return (default is DecisionTreeClassifier)"""
if self.estimator is None:
return DecisionTreeClassifier()
return self.estimator
|
Resolve which estimator to return (default is DecisionTreeClassifier)
|
_get_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def predict(self, X, **params):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_proba` (if available) or the `predict`
method (otherwise) of the sub-estimators via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
_raise_for_params(params, self, "predict")
predicted_probabilitiy = self.predict_proba(X, **params)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0)
|
Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_proba` (if available) or the `predict`
method (otherwise) of the sub-estimators via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def predict_proba(self, X, **params):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_proba` (if available) or the `predict`
method (otherwise) of the sub-estimators via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
_raise_for_params(params, self, "predict_proba")
check_is_fitted(self)
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "predict_proba", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(predict_proba=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
predict_params=routed_params.estimator.get("predict", None),
predict_proba_params=routed_params.estimator.get("predict_proba", None),
)
for i in range(n_jobs)
)
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
|
Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_proba` (if available) or the `predict`
method (otherwise) of the sub-estimators via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def predict_log_proba(self, X, **params):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_log_proba`, the `predict_proba` or the
`proba` method of the sub-estimators via the metadata routing API. The
routing is tried in the mentioned order depending on whether this method is
available on the sub-estimator.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
_raise_for_params(params, self, "predict_log_proba")
check_is_fitted(self)
if hasattr(self.estimator_, "predict_log_proba"):
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "predict_log_proba", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(predict_log_proba=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
params=routed_params.estimator.predict_log_proba,
)
for i in range(n_jobs)
)
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
else:
log_proba = np.log(self.predict_proba(X, **params))
return log_proba
|
Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict_log_proba`, the `predict_proba` or the
`proba` method of the sub-estimators via the metadata routing API. The
routing is tried in the mentioned order depending on whether this method is
available on the sub-estimator.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def decision_function(self, X, **params):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `decision_function` method of the sub-estimators
via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
_raise_for_params(params, self, "decision_function")
check_is_fitted(self)
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "decision_function", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(decision_function=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
params=routed_params.estimator.decision_function,
)
for i in range(n_jobs)
)
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
|
Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `decision_function` method of the sub-estimators
via the metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def predict(self, X, **params):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict` method of the sub-estimators via the
metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
_raise_for_params(params, self, "predict")
check_is_fitted(self)
# Check data
X = validate_data(
self,
X,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
reset=False,
)
if _routing_enabled():
routed_params = process_routing(self, "predict", **params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(predict=Bunch())
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
params=routed_params.estimator.predict,
)
for i in range(n_jobs)
)
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
|
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
**params : dict
Parameters routed to the `predict` method of the sub-estimators via the
metadata routing API.
.. versionadded:: 1.7
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _get_estimator(self):
"""Resolve which estimator to return (default is DecisionTreeClassifier)"""
if self.estimator is None:
return DecisionTreeRegressor()
return self.estimator
|
Resolve which estimator to return (default is DecisionTreeClassifier)
|
_get_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_bagging.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_bagging.py
|
BSD-3-Clause
|
def _fit_single_estimator(
estimator, X, y, fit_params, message_clsname=None, message=None
):
"""Private function used to fit an estimator within a job."""
# TODO(SLEP6): remove if-condition for unrouted sample_weight when metadata
# routing can't be disabled.
if not _routing_enabled() and "sample_weight" in fit_params:
try:
with _print_elapsed_time(message_clsname, message):
estimator.fit(X, y, sample_weight=fit_params["sample_weight"])
except TypeError as exc:
if "unexpected keyword argument 'sample_weight'" in str(exc):
raise TypeError(
"Underlying estimator {} does not support sample weights.".format(
estimator.__class__.__name__
)
) from exc
raise
else:
with _print_elapsed_time(message_clsname, message):
estimator.fit(X, y, **fit_params)
return estimator
|
Private function used to fit an estimator within a job.
|
_fit_single_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_base.py
|
BSD-3-Clause
|
def _set_random_states(estimator, random_state=None):
"""Set fixed random_state parameters for an estimator.
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
integers. Pass an int for reproducible output across multiple function
calls.
See :term:`Glossary <random_state>`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
"""
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == "random_state" or key.endswith("__random_state"):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
|
Set fixed random_state parameters for an estimator.
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
integers. Pass an int for reproducible output across multiple function
calls.
See :term:`Glossary <random_state>`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
|
_set_random_states
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_base.py
|
BSD-3-Clause
|
def _validate_estimator(self, default=None):
"""Check the base estimator.
Sets the `estimator_` attributes.
"""
if self.estimator is not None:
self.estimator_ = self.estimator
else:
self.estimator_ = default
|
Check the base estimator.
Sets the `estimator_` attributes.
|
_validate_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_base.py
|
BSD-3-Clause
|
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator
|
Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
|
_make_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_base.py
|
BSD-3-Clause
|
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(effective_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int)
n_estimators_per_job[: n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
|
Private function used to partition estimators between jobs.
|
_partition_estimators
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_base.py
|
BSD-3-Clause
|
def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, Integral):
if max_samples > n_samples:
msg = "`max_samples` must be <= n_samples={} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, Real):
return max(round(n_samples * max_samples), 1)
|
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
|
_get_n_samples_bootstrap
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(
random_state, n_samples, n_samples_bootstrap
)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
|
Private function used to forest._set_oob_score function.
|
_generate_unsampled_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _parallel_build_trees(
tree,
bootstrap,
X,
y,
sample_weight,
tree_idx,
n_trees,
verbose=0,
class_weight=None,
n_samples_bootstrap=None,
missing_values_in_feature_mask=None,
):
"""
Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(
tree.random_state, n_samples, n_samples_bootstrap
)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == "subsample":
with catch_warnings():
simplefilter("ignore", DeprecationWarning)
curr_sample_weight *= compute_sample_weight("auto", y, indices=indices)
elif class_weight == "balanced_subsample":
curr_sample_weight *= compute_sample_weight("balanced", y, indices=indices)
tree._fit(
X,
y,
sample_weight=curr_sample_weight,
check_input=False,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
else:
tree._fit(
X,
y,
sample_weight=sample_weight,
check_input=False,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
return tree
|
Private function used to fit a single tree in parallel.
|
_parallel_build_trees
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def apply(self, X):
"""
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(delayed(tree.apply)(X, check_input=False) for tree in self.estimators_)
return np.array(results).T
|
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
|
apply
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def decision_path(self, X):
"""
Return the decision path in the forest.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(tree.decision_path)(X, check_input=False)
for tree in self.estimators_
)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
|
Return the decision path in the forest.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
|
decision_path
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Fitted estimator.
"""
# Validate or convert input data
if issparse(y):
raise ValueError("sparse multilabel-indicator for y is not supported.")
X, y = validate_data(
self,
X,
y,
multi_output=True,
accept_sparse="csc",
dtype=DTYPE,
ensure_all_finite=False,
)
# _compute_missing_values_in_feature_mask checks if X has missing values and
# will raise an error if the underlying tree base estimator can't handle missing
# values. Only the criterion is required to determine if the tree supports
# missing values.
estimator = type(self.estimator)(criterion=self.criterion)
missing_values_in_feature_mask = (
estimator._compute_missing_values_in_feature_mask(
X, estimator_name=self.__class__.__name__
)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn(
(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError(
"Some value(s) of y are negative which is "
"not allowed for Poisson regression."
)
if np.sum(y) <= 0:
raise ValueError(
"Sum of y is not strictly positive which "
"is necessary for Poisson regression."
)
self._n_samples, self.n_outputs_ = y.shape
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
if not self.bootstrap and self.max_samples is not None:
raise ValueError(
"`max_sample` cannot be set if `bootstrap=False`. "
"Either switch to `bootstrap=True` or set "
"`max_sample=None`."
)
elif self.bootstrap:
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples=X.shape[0], max_samples=self.max_samples
)
else:
n_samples_bootstrap = None
self._n_samples_bootstrap = n_samples_bootstrap
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [
self._make_estimator(append=False, random_state=random_state)
for i in range(n_more_estimators)
]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(_parallel_build_trees)(
t,
self.bootstrap,
X,
y,
sample_weight,
i,
len(trees),
verbose=self.verbose,
class_weight=self.class_weight,
n_samples_bootstrap=n_samples_bootstrap,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
for i, t in enumerate(trees)
)
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score and (
n_more_estimators > 0 or not hasattr(self, "oob_score_")
):
y_type = type_of_target(y)
if y_type == "unknown" or (
is_classifier(self) and y_type == "multiclass-multioutput"
):
# FIXME: we could consider to support multiclass-multioutput if
# we introduce or reuse a constructor parameter (e.g.
# oob_score) allowing our user to pass a callable defining the
# scoring strategy on OOB sample.
raise ValueError(
"The type of target cannot be used to compute OOB "
f"estimates. Got {y_type} while only the following are "
"supported: continuous, continuous-multioutput, binary, "
"multiclass, multilabel-indicator."
)
if callable(self.oob_score):
self._set_oob_score_and_attributes(
X, y, scoring_function=self.oob_score
)
else:
self._set_oob_score_and_attributes(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
|
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Default depends on whether
this is a regression (R2 score) or classification problem
(accuracy score).
"""
|
Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Default depends on whether
this is a regression (R2 score) or classification problem
(accuracy score).
|
_set_oob_score_and_attributes
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _compute_oob_predictions(self, X, y):
"""Compute and set the OOB score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
Returns
-------
oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or \
(n_samples, 1, n_outputs)
The OOB predictions.
"""
# Prediction requires X to be in CSR format
if issparse(X):
X = X.tocsr()
n_samples = y.shape[0]
n_outputs = self.n_outputs_
if is_classifier(self) and hasattr(self, "n_classes_"):
# n_classes_ is a ndarray at this stage
# all the supported type of target will have the same number of
# classes in all outputs
oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs)
else:
# for regression, n_classes_ does not exist and we create an empty
# axis to be consistent with the classification case and make
# the array operations compatible with the 2 settings
oob_pred_shape = (n_samples, 1, n_outputs)
oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64)
n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64)
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples,
self.max_samples,
)
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state,
n_samples,
n_samples_bootstrap,
)
y_pred = self._get_oob_predictions(estimator, X[unsampled_indices, :])
oob_pred[unsampled_indices, ...] += y_pred
n_oob_pred[unsampled_indices, :] += 1
for k in range(n_outputs):
if (n_oob_pred == 0).any():
warn(
(
"Some inputs do not have OOB scores. This probably means "
"too few trees were used to compute any reliable OOB "
"estimates."
),
UserWarning,
)
n_oob_pred[n_oob_pred == 0] = 1
oob_pred[..., k] /= n_oob_pred[..., [k]]
return oob_pred
|
Compute and set the OOB score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
Returns
-------
oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or (n_samples, 1, n_outputs)
The OOB predictions.
|
_compute_oob_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _validate_X_predict(self, X):
"""
Validate X whenever one tries to predict, apply, predict_proba."""
check_is_fitted(self)
if self.estimators_[0]._support_missing_values(X):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=DTYPE,
accept_sparse="csr",
reset=False,
ensure_all_finite=ensure_all_finite,
)
if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based sparse matrices")
return X
|
Validate X whenever one tries to predict, apply, predict_proba.
|
_validate_X_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def feature_importances_(self):
"""
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
check_is_fitted(self)
all_importances = Parallel(n_jobs=self.n_jobs, prefer="threads")(
delayed(getattr)(tree, "feature_importances_")
for tree in self.estimators_
if tree.tree_.node_count > 1
)
if not all_importances:
return np.zeros(self.n_features_in_, dtype=np.float64)
all_importances = np.mean(all_importances, axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
|
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
|
feature_importances_
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
|
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
|
_accumulate_prediction
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeClassifier object
A single decision tree classifier.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, n_classes, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict_proba(X, check_input=False)
y_pred = np.asarray(y_pred)
if y_pred.ndim == 2:
# binary and multiclass
y_pred = y_pred[..., np.newaxis]
else:
# Roll the first `n_outputs` axis to the last axis. We will reshape
# from a shape of (n_outputs, n_samples, n_classes) to a shape of
# (n_samples, n_classes, n_outputs).
y_pred = np.rollaxis(y_pred, axis=0, start=3)
return y_pred
|
Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeClassifier object
A single decision tree classifier.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, n_classes, n_outputs)
The OOB associated predictions.
|
_get_oob_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `accuracy_score`.
"""
self.oob_decision_function_ = super()._compute_oob_predictions(X, y)
if self.oob_decision_function_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1)
if scoring_function is None:
scoring_function = accuracy_score
self.oob_score_ = scoring_function(
y, np.argmax(self.oob_decision_function_, axis=1)
)
|
Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `accuracy_score`.
|
_set_oob_score_and_attributes
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def predict(self, X):
"""
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[k], axis=1), axis=0
)
return predictions
|
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
all_proba = [
np.zeros((X.shape[0], j), dtype=np.float64)
for j in np.atleast_1d(self.n_classes_)
]
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")(
delayed(_accumulate_prediction)(e.predict_proba, X, all_proba, lock)
for e in self.estimators_
)
for proba in all_proba:
proba /= len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
|
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest.
The class probability of a single tree is the fraction of samples of
the same class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def predict_log_proba(self, X):
"""
Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : ndarray of shape (n_samples, n_classes), or a list of such arrays
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def predict(self, X):
"""
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")(
delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_
)
y_hat /= len(self.estimators_)
return y_hat
|
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeRegressor object
A single decision tree regressor.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, 1, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict(X, check_input=False)
if y_pred.ndim == 1:
# single output regression
y_pred = y_pred[:, np.newaxis, np.newaxis]
else:
# multioutput regression
y_pred = y_pred[:, np.newaxis, :]
return y_pred
|
Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeRegressor object
A single decision tree regressor.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, 1, n_outputs)
The OOB associated predictions.
|
_get_oob_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `r2_score`.
"""
self.oob_prediction_ = super()._compute_oob_predictions(X, y).squeeze(axis=1)
if self.oob_prediction_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_prediction_ = self.oob_prediction_.squeeze(axis=-1)
if scoring_function is None:
scoring_function = r2_score
self.oob_score_ = scoring_function(y, self.oob_prediction_)
|
Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `r2_score`.
|
_set_oob_score_and_attributes
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=DTYPE
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
target_features = np.asarray(target_features, dtype=np.intp, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
for tree in self.estimators_:
# Note: we don't sum in parallel because the GIL isn't released in
# the fast method.
tree.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
# Average over the forest
averaged_predictions /= len(self.estimators_)
return averaged_predictions
|
Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=DTYPE
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
|
_compute_partial_dependence_recursion
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns the instance itself.
"""
# Parameters are validated in fit_transform
self.fit_transform(X, y, sample_weight=sample_weight)
return self
|
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, sample_weight=None):
"""
Fit estimator and transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
"""
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=_num_samples(X))
super().fit(X, y, sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse_output=self.sparse_output)
output = self.one_hot_encoder_.fit_transform(self.apply(X))
self._n_features_out = output.shape[1]
return output
|
Fit estimator and transform dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
X_transformed : sparse matrix of shape (n_samples, n_out)
Transformed dataset.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names, in the format of
`randomtreesembedding_{tree}_{leaf}`, where `tree` is the tree used
to generate the leaf and `leaf` is the index of a leaf node
in that tree. Note that the node indexing scheme is used to
index both nodes with children (split nodes) and leaf nodes.
Only the latter can be present as output features.
As a consequence, there are missing indices in the output
feature names.
"""
check_is_fitted(self, "_n_features_out")
_check_feature_names_in(
self, input_features=input_features, generate_names=False
)
feature_names = [
f"randomtreesembedding_{tree}_{leaf}"
for tree in range(self.n_estimators)
for leaf in self.one_hot_encoder_.categories_[tree]
]
return np.asarray(feature_names, dtype=object)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names, in the format of
`randomtreesembedding_{tree}_{leaf}`, where `tree` is the tree used
to generate the leaf and `leaf` is the index of a leaf node
in that tree. Note that the node indexing scheme is used to
index both nodes with children (split nodes) and leaf nodes.
Only the latter can be present as output features.
As a consequence, there are missing indices in the output
feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_forest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py
|
BSD-3-Clause
|
def _safe_divide(numerator, denominator):
"""Prevents overflow and division by zero."""
# This is used for classifiers where the denominator might become zero exactly.
# For instance for log loss, HalfBinomialLoss, if proba=0 or proba=1 exactly, then
# denominator = hessian = 0, and we should set the node value in the line search to
# zero as there is no improvement of the loss possible.
# For numerical safety, we do this already for extremely tiny values.
if abs(denominator) < 1e-150:
return 0.0
else:
# Cast to Python float to trigger Python errors, e.g. ZeroDivisionError,
# without relying on `np.errstate` that is not supported by Pyodide.
result = float(numerator) / float(denominator)
# Cast to Python float to trigger a ZeroDivisionError without relying
# on `np.errstate` that is not supported by Pyodide.
result = float(numerator) / float(denominator)
if math.isinf(result):
warnings.warn("overflow encountered in _safe_divide", RuntimeWarning)
return result
|
Prevents overflow and division by zero.
|
_safe_divide
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _init_raw_predictions(X, estimator, loss, use_predict_proba):
"""Return the initial raw predictions.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data array.
estimator : object
The estimator to use to compute the predictions.
loss : BaseLoss
An instance of a loss function class.
use_predict_proba : bool
Whether estimator.predict_proba is used instead of estimator.predict.
Returns
-------
raw_predictions : ndarray of shape (n_samples, K)
The initial raw predictions. K is equal to 1 for binary
classification and regression, and equal to the number of classes
for multiclass classification. ``raw_predictions`` is casted
into float64.
"""
# TODO: Use loss.fit_intercept_only where appropriate instead of
# DummyRegressor which is the default given by the `init` parameter,
# see also _init_state.
if use_predict_proba:
# Our parameter validation, set via _fit_context and _parameter_constraints
# already guarantees that estimator has a predict_proba method.
predictions = estimator.predict_proba(X)
if not loss.is_multiclass:
predictions = predictions[:, 1] # probability of positive class
eps = np.finfo(np.float32).eps # FIXME: This is quite large!
predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64)
else:
predictions = estimator.predict(X).astype(np.float64)
if predictions.ndim == 1:
return loss.link.link(predictions).reshape(-1, 1)
else:
return loss.link.link(predictions)
|
Return the initial raw predictions.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data array.
estimator : object
The estimator to use to compute the predictions.
loss : BaseLoss
An instance of a loss function class.
use_predict_proba : bool
Whether estimator.predict_proba is used instead of estimator.predict.
Returns
-------
raw_predictions : ndarray of shape (n_samples, K)
The initial raw predictions. K is equal to 1 for binary
classification and regression, and equal to the number of classes
for multiclass classification. ``raw_predictions`` is casted
into float64.
|
_init_raw_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _update_terminal_regions(
loss,
tree,
X,
y,
neg_gradient,
raw_prediction,
sample_weight,
sample_mask,
learning_rate=0.1,
k=0,
):
"""Update the leaf values to be predicted by the tree and raw_prediction.
The current raw predictions of the model (of this stage) are updated.
Additionally, the terminal regions (=leaves) of the given tree are updated as well.
This corresponds to the line search step in "Greedy Function Approximation" by
Friedman, Algorithm 1 step 5.
Update equals:
argmin_{x} loss(y_true, raw_prediction_old + x * tree.value)
For non-trivial cases like the Binomial loss, the update has no closed formula and
is an approximation, again, see the Friedman paper.
Also note that the update formula for the SquaredError is the identity. Therefore,
in this case, the leaf values don't need an update and only the raw_predictions are
updated (with the learning rate included).
Parameters
----------
loss : BaseLoss
tree : tree.Tree
The tree object.
X : ndarray of shape (n_samples, n_features)
The data array.
y : ndarray of shape (n_samples,)
The target labels.
neg_gradient : ndarray of shape (n_samples,)
The negative gradient.
raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions (i.e. values from the tree leaves) of the
tree ensemble at iteration ``i - 1``.
sample_weight : ndarray of shape (n_samples,)
The weight of each sample.
sample_mask : ndarray of shape (n_samples,)
The sample mask to be used.
learning_rate : float, default=0.1
Learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default=0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
if not isinstance(loss, HalfSquaredError):
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
if isinstance(loss, HalfBinomialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
# Make a single Newton-Raphson step, see "Additive Logistic Regression:
# A Statistical View of Boosting" FHT00 and note that we use a slightly
# different version (factor 2) of "F" with proba=expit(raw_prediction).
# Our node estimate is given by:
# sum(w * (y - prob)) / sum(w * prob * (1 - prob))
# we take advantage that: y - prob = neg_gradient
neg_g = neg_gradient.take(indices, axis=0)
prob = y_ - neg_g
# numerator = negative gradient = y - prob
numerator = np.average(neg_g, weights=sw)
# denominator = hessian = prob * (1 - prob)
denominator = np.average(prob * (1 - prob), weights=sw)
return _safe_divide(numerator, denominator)
elif isinstance(loss, HalfMultinomialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
# we take advantage that: y - prob = neg_gradient
neg_g = neg_gradient.take(indices, axis=0)
prob = y_ - neg_g
K = loss.n_classes
# numerator = negative gradient * (k - 1) / k
# Note: The factor (k - 1)/k appears in the original papers "Greedy
# Function Approximation" by Friedman and "Additive Logistic
# Regression" by Friedman, Hastie, Tibshirani. This factor is, however,
# wrong or at least arbitrary as it directly multiplies the
# learning_rate. We keep it for backward compatibility.
numerator = np.average(neg_g, weights=sw)
numerator *= (K - 1) / K
# denominator = (diagonal) hessian = prob * (1 - prob)
denominator = np.average(prob * (1 - prob), weights=sw)
return _safe_divide(numerator, denominator)
elif isinstance(loss, ExponentialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
neg_g = neg_gradient.take(indices, axis=0)
# numerator = negative gradient = y * exp(-raw) - (1-y) * exp(raw)
numerator = np.average(neg_g, weights=sw)
# denominator = hessian = y * exp(-raw) + (1-y) * exp(raw)
# if y=0: hessian = exp(raw) = -neg_g
# y=1: hessian = exp(-raw) = neg_g
hessian = neg_g.copy()
hessian[y_ == 0] *= -1
denominator = np.average(hessian, weights=sw)
return _safe_divide(numerator, denominator)
else:
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
return loss.fit_intercept_only(
y_true=y_ - raw_prediction[indices, k],
sample_weight=sw,
)
# update each leaf (= perform line search)
for leaf in np.nonzero(tree.children_left == TREE_LEAF)[0]:
indices = np.nonzero(masked_terminal_regions == leaf)[
0
] # of terminal regions
y_ = y.take(indices, axis=0)
sw = None if sample_weight is None else sample_weight[indices]
update = compute_update(y_, indices, neg_gradient, raw_prediction, k)
# TODO: Multiply here by learning rate instead of everywhere else.
tree.value[leaf, 0, 0] = update
# update predictions (both in-bag and out-of-bag)
raw_prediction[:, k] += learning_rate * tree.value[:, 0, 0].take(
terminal_regions, axis=0
)
|
Update the leaf values to be predicted by the tree and raw_prediction.
The current raw predictions of the model (of this stage) are updated.
Additionally, the terminal regions (=leaves) of the given tree are updated as well.
This corresponds to the line search step in "Greedy Function Approximation" by
Friedman, Algorithm 1 step 5.
Update equals:
argmin_{x} loss(y_true, raw_prediction_old + x * tree.value)
For non-trivial cases like the Binomial loss, the update has no closed formula and
is an approximation, again, see the Friedman paper.
Also note that the update formula for the SquaredError is the identity. Therefore,
in this case, the leaf values don't need an update and only the raw_predictions are
updated (with the learning rate included).
Parameters
----------
loss : BaseLoss
tree : tree.Tree
The tree object.
X : ndarray of shape (n_samples, n_features)
The data array.
y : ndarray of shape (n_samples,)
The target labels.
neg_gradient : ndarray of shape (n_samples,)
The negative gradient.
raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions (i.e. values from the tree leaves) of the
tree ensemble at iteration ``i - 1``.
sample_weight : ndarray of shape (n_samples,)
The weight of each sample.
sample_mask : ndarray of shape (n_samples,)
The sample mask to be used.
learning_rate : float, default=0.1
Learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default=0
The index of the estimator being updated.
|
_update_terminal_regions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def set_huber_delta(loss, y_true, raw_prediction, sample_weight=None):
"""Calculate and set self.closs.delta based on self.quantile."""
abserr = np.abs(y_true - raw_prediction.squeeze())
# sample_weight is always a ndarray, never None.
delta = _weighted_percentile(abserr, sample_weight, 100 * loss.quantile)
loss.closs.delta = float(delta)
|
Calculate and set self.closs.delta based on self.quantile.
|
set_huber_delta
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def init(self, est, begin_at_stage=0):
"""Initialize reporter
Parameters
----------
est : Estimator
The estimator
begin_at_stage : int, default=0
stage at which to begin reporting
"""
# header fields and line format str
header_fields = ["Iter", "Train Loss"]
verbose_fmt = ["{iter:>10d}", "{train_score:>16.4f}"]
# do oob?
if est.subsample < 1:
header_fields.append("OOB Improve")
verbose_fmt.append("{oob_impr:>16.4f}")
header_fields.append("Remaining Time")
verbose_fmt.append("{remaining_time:>16s}")
# print the header line
print(("%10s " + "%16s " * (len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = " ".join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
|
Initialize reporter
Parameters
----------
est : Estimator
The estimator
begin_at_stage : int, default=0
stage at which to begin reporting
|
init
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def update(self, j, est):
"""Update reporter with new iteration.
Parameters
----------
j : int
The new iteration.
est : Estimator
The estimator.
"""
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = (
(est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)
)
if remaining_time > 60:
remaining_time = "{0:.2f}m".format(remaining_time / 60.0)
else:
remaining_time = "{0:.2f}s".format(remaining_time)
print(
self.verbose_fmt.format(
iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time,
)
)
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
|
Update reporter with new iteration.
Parameters
----------
j : int
The new iteration.
est : Estimator
The estimator.
|
update
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _init_state(self):
"""Initialize model state and allocate model state data structures."""
self.init_ = self.init
if self.init_ is None:
if is_classifier(self):
self.init_ = DummyClassifier(strategy="prior")
elif isinstance(self._loss, (AbsoluteError, HuberLoss)):
self.init_ = DummyRegressor(strategy="quantile", quantile=0.5)
elif isinstance(self._loss, PinballLoss):
self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha)
else:
self.init_ = DummyRegressor(strategy="mean")
self.estimators_ = np.empty(
(self.n_estimators, self.n_trees_per_iteration_), dtype=object
)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_score_ = np.nan
|
Initialize model state and allocate model state data structures.
|
_init_state
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
if hasattr(self, "estimators_"):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, "train_score_"):
del self.train_score_
if hasattr(self, "oob_improvement_"):
del self.oob_improvement_
if hasattr(self, "oob_scores_"):
del self.oob_scores_
if hasattr(self, "oob_score_"):
del self.oob_score_
if hasattr(self, "init_"):
del self.init_
if hasattr(self, "_rng"):
del self._rng
|
Clear the state of the gradient boosting model.
|
_clear_state
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes."""
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError(
"resize with smaller n_estimators %d < %d"
% (total_n_estimators, self.estimators_[0])
)
self.estimators_ = np.resize(
self.estimators_, (total_n_estimators, self.n_trees_per_iteration_)
)
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if self.subsample < 1 or hasattr(self, "oob_improvement_"):
# if do oob resize arrays or create new if not available
if hasattr(self, "oob_improvement_"):
self.oob_improvement_ = np.resize(
self.oob_improvement_, total_n_estimators
)
self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
self.oob_score_ = np.nan
else:
self.oob_improvement_ = np.zeros(
(total_n_estimators,), dtype=np.float64
)
self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_score_ = np.nan
|
Add additional ``n_estimators`` entries to all attributes.
|
_resize_state
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshotting.
Returns
-------
self : object
Fitted estimator.
"""
if not self.warm_start:
self._clear_state()
# Check input
# Since check_array converts both X and y to the same dtype, but the
# trees use different types for X and y, checking them separately.
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=DTYPE,
multi_output=True,
)
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
if sample_weight_is_none:
y = self._encode_y(y=y, sample_weight=None)
else:
y = self._encode_y(y=y, sample_weight=sample_weight)
y = column_or_1d(y, warn=True) # TODO: Is this still required?
self._set_max_features()
# self.loss is guaranteed to be a string
self._loss = self._get_loss(sample_weight=sample_weight)
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
random_state=self.random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if is_classifier(self):
if self.n_classes_ != np.unique(y_train).shape[0]:
# We choose to error here. The problem is that the init
# estimator would be trained on y, which has some missing
# classes now, so its predictions would not have the
# correct shape.
raise ValueError(
"The training data after the early stopping split "
"is missing some classes. Try using another random "
"seed."
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
n_samples = X_train.shape[0]
# First time calling fit.
if not self._is_fitted():
# init state
self._init_state()
# fit initial model and initialize raw predictions
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=np.float64,
)
else:
# XXX clean this once we have a support_sample_weight tag
if sample_weight_is_none:
self.init_.fit(X_train, y_train)
else:
msg = (
"The initial estimator {} does not support sample "
"weights.".format(self.init_.__class__.__name__)
)
try:
self.init_.fit(
X_train, y_train, sample_weight=sample_weight_train
)
except TypeError as e:
if "unexpected keyword argument 'sample_weight'" in str(e):
# regular estimator without SW support
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
except ValueError as e:
if (
"pass parameters to specific steps of "
"your pipeline using the "
"stepname__parameter" in str(e)
): # pipeline
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
raw_predictions = _init_raw_predictions(
X_train, self.init_, self._loss, is_classifier(self)
)
begin_at_stage = 0
# The rng state must be preserved if warm_start is True
self._rng = check_random_state(self.random_state)
# warm start: this is not the first time fit was called
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"estimators_.shape[0]=%d when "
"warm_start==True" % (self.n_estimators, self.estimators_.shape[0])
)
begin_at_stage = self.estimators_.shape[0]
# The requirements of _raw_predict
# are more constrained than fit. It accepts only CSR
# matrices. Finite values have already been checked in _validate_data.
X_train = check_array(
X_train,
dtype=DTYPE,
order="C",
accept_sparse="csr",
ensure_all_finite=False,
)
raw_predictions = self._raw_predict(X_train)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(
X_train,
y_train,
raw_predictions,
sample_weight_train,
self._rng,
X_val,
y_val,
sample_weight_val,
begin_at_stage,
monitor,
)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, "oob_improvement_"):
# OOB scores were computed
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.oob_scores_ = self.oob_scores_[:n_stages]
self.oob_score_ = self.oob_scores_[-1]
self.n_estimators_ = n_stages
return self
|
Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshotting.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _fit_stages(
self,
X,
y,
raw_predictions,
sample_weight,
random_state,
X_val,
y_val,
sample_weight_val,
begin_at_stage=0,
monitor=None,
):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples,), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
loss_history = np.full(self.n_iter_no_change, np.inf)
# We create a generator to get the predictions for X_val after
# the addition of each successive stage
y_val_pred_iter = self._staged_raw_predict(X_val, check_input=False)
# Older versions of GBT had its own loss functions. With the new common
# private loss function submodule _loss, we often are a factor of 2
# away from the old version. Here we keep backward compatibility for
# oob_scores_ and oob_improvement_, even if the old way is quite
# inconsistent (sometimes the gradient is half the gradient, sometimes
# not).
if isinstance(
self._loss,
(
HalfSquaredError,
HalfBinomialLoss,
),
):
factor = 2
else:
factor = 1
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag, random_state)
y_oob_masked = y[~sample_mask]
sample_weight_oob_masked = sample_weight[~sample_mask]
if i == 0: # store the initial loss to compute the OOB score
initial_loss = factor * self._loss(
y_true=y_oob_masked,
raw_prediction=raw_predictions[~sample_mask],
sample_weight=sample_weight_oob_masked,
)
# fit next stage of trees
raw_predictions = self._fit_stage(
i,
X,
y,
raw_predictions,
sample_weight,
sample_mask,
random_state,
X_csc=X_csc,
X_csr=X_csr,
)
# track loss
if do_oob:
self.train_score_[i] = factor * self._loss(
y_true=y[sample_mask],
raw_prediction=raw_predictions[sample_mask],
sample_weight=sample_weight[sample_mask],
)
self.oob_scores_[i] = factor * self._loss(
y_true=y_oob_masked,
raw_prediction=raw_predictions[~sample_mask],
sample_weight=sample_weight_oob_masked,
)
previous_loss = initial_loss if i == 0 else self.oob_scores_[i - 1]
self.oob_improvement_[i] = previous_loss - self.oob_scores_[i]
self.oob_score_ = self.oob_scores_[-1]
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = factor * self._loss(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
# We also provide an early stopping based on the score from
# validation set (X_val, y_val), if n_iter_no_change is set
if self.n_iter_no_change is not None:
# By calling next(y_val_pred_iter), we get the predictions
# for X_val after the addition of the current stage
validation_loss = factor * self._loss(
y_val, next(y_val_pred_iter), sample_weight_val
)
# Require validation_score to be better (less) than at least
# one of the last n_iter_no_change evaluations
if np.any(validation_loss + self.tol < loss_history):
loss_history[i % len(loss_history)] = validation_loss
else:
break
return i + 1
|
Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
|
_fit_stages
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _raw_predict_init(self, X):
"""Check input and compute raw predictions of the init estimator."""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(X.shape[0], self.n_trees_per_iteration_), dtype=np.float64
)
else:
raw_predictions = _init_raw_predictions(
X, self.init_, self._loss, is_classifier(self)
)
return raw_predictions
|
Check input and compute raw predictions of the init estimator.
|
_raw_predict_init
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _raw_predict(self, X):
"""Return the sum of the trees raw predictions (+ init estimator)."""
check_is_fitted(self)
raw_predictions = self._raw_predict_init(X)
predict_stages(self.estimators_, X, self.learning_rate, raw_predictions)
return raw_predictions
|
Return the sum of the trees raw predictions (+ init estimator).
|
_raw_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _staged_raw_predict(self, X, check_input=True):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
If False, the input arrays X will not be checked.
Returns
-------
raw_predictions : generator of ndarray of shape (n_samples, k)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
if check_input:
X = validate_data(
self, X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
)
raw_predictions = self._raw_predict_init(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, raw_predictions)
yield raw_predictions.copy()
|
Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
If False, the input arrays X will not be checked.
Returns
-------
raw_predictions : generator of ndarray of shape (n_samples, k)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
|
_staged_raw_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def feature_importances_(self):
"""The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
self._check_initialized()
relevant_trees = [
tree
for stage in self.estimators_
for tree in stage
if tree.tree_.node_count > 1
]
if not relevant_trees:
# degenerate case where all trees have only one node
return np.zeros(shape=self.n_features_in_, dtype=np.float64)
relevant_feature_importances = [
tree.tree_.compute_feature_importances(normalize=False)
for tree in relevant_trees
]
avg_feature_importances = np.mean(
relevant_feature_importances, axis=0, dtype=np.float64
)
return avg_feature_importances / np.sum(avg_feature_importances)
|
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
|
feature_importances_
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features,), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape \
(n_trees_per_iteration_, n_samples)
The value of the partial dependence function on each grid point.
"""
if self.init is not None:
warnings.warn(
"Using recursion method with a non-constant init predictor "
"will lead to incorrect partial dependence values. "
"Got init=%s." % self.init,
UserWarning,
)
grid = np.asarray(grid, dtype=DTYPE, order="C")
n_estimators, n_trees_per_stage = self.estimators_.shape
averaged_predictions = np.zeros(
(n_trees_per_stage, grid.shape[0]), dtype=np.float64, order="C"
)
target_features = np.asarray(target_features, dtype=np.intp, order="C")
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = self.estimators_[stage, k].tree_
tree.compute_partial_dependence(
grid, target_features, averaged_predictions[k]
)
averaged_predictions *= self.learning_rate
return averaged_predictions
|
Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features,), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_trees_per_iteration_, n_samples)
The value of the partial dependence function on each grid point.
|
_compute_partial_dependence_recursion
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
|
Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators, n_classes)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
In the case of binary classification n_classes is 1.
|
apply
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape (n_samples,).
"""
X = validate_data(
self, X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
)
raw_predictions = self._raw_predict(X)
if raw_predictions.shape[1] == 1:
return raw_predictions.ravel()
return raw_predictions
|
Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape (n_samples,).
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
raw_predictions = self.decision_function(X)
if raw_predictions.ndim == 1: # decision_function already squeezed it
encoded_classes = (raw_predictions >= 0).astype(int)
else:
encoded_classes = np.argmax(raw_predictions, axis=1)
return self.classes_[encoded_classes]
|
Predict class for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
if self.n_classes_ == 2: # n_trees_per_iteration_ = 1
for raw_predictions in self._staged_raw_predict(X):
encoded_classes = (raw_predictions.squeeze() >= 0).astype(int)
yield self.classes_.take(encoded_classes, axis=0)
else:
for raw_predictions in self._staged_raw_predict(X):
encoded_classes = np.argmax(raw_predictions, axis=1)
yield self.classes_.take(encoded_classes, axis=0)
|
Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
|
staged_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
try:
for raw_predictions in self._staged_raw_predict(X):
yield self._loss.predict_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError as e:
raise AttributeError(
"loss=%r does not support predict_proba" % self.loss
) from e
|
Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
|
staged_predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
X = validate_data(
self, X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
)
# In regression we can directly return the raw value from the trees.
return self._raw_predict(X).ravel()
|
Predict regression target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
"""
leaves = super().apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
|
Apply trees in the ensemble to X, return leaf indices.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will
be converted to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in each estimator.
|
apply
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_gb.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_gb.py
|
BSD-3-Clause
|
def _parallel_compute_tree_depths(
tree,
X,
features,
tree_decision_path_lengths,
tree_avg_path_lengths,
depths,
lock,
):
"""Parallel computation of isolation tree depth."""
if features is None:
X_subset = X
else:
X_subset = X[:, features]
leaves_index = tree.apply(X_subset, check_input=False)
with lock:
depths += (
tree_decision_path_lengths[leaves_index]
+ tree_avg_path_lengths[leaves_index]
- 1.0
)
|
Parallel computation of isolation tree depth.
|
_parallel_compute_tree_depths
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self, X, accept_sparse=["csc"], dtype=tree_dtype, ensure_all_finite=False
)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, str) and self.max_samples == "auto":
max_samples = min(256, n_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # max_samples is float
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
self._average_path_length_per_tree, self._decision_path_lengths = zip(
*[
(
_average_path_length(tree.tree_.n_node_samples),
tree.tree_.compute_node_depths(),
)
for tree in self.estimators_
]
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# Else, define offset_ wrt contamination parameter
# To avoid performing input validation a second time we call
# _score_samples rather than score_samples.
# _score_samples expects a CSR matrix, so we convert if necessary.
if issparse(X):
X = X.tocsr()
self.offset_ = np.percentile(self._score_samples(X), 100.0 * self.contamination)
return self
|
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
Notes
-----
The predict method can be parallelized by setting a joblib context. This
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, predict may actually be faster
without parallelization for a small number of samples,
such as for 1000 samples or less. The user can set the
number of jobs in the joblib context to control the number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the predict method is not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.predict(X)
"""
check_is_fitted(self)
decision_func = self.decision_function(X)
is_inlier = np.ones_like(decision_func, dtype=int)
is_inlier[decision_func < 0] = -1
return is_inlier
|
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
Notes
-----
The predict method can be parallelized by setting a joblib context. This
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, predict may actually be faster
without parallelization for a small number of samples,
such as for 1000 samples or less. The user can set the
number of jobs in the joblib context to control the number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the predict method is not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.predict(X)
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
Notes
-----
The decision_function method can be parallelized by setting a joblib context.
This inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, calculating the score may
actually be faster without parallelization for a small number of samples,
such as for 1000 samples or less.
The user can set the number of jobs in the joblib context to control the
number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the decision_function method is
# not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.decision_function(X)
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
|
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
Notes
-----
The decision_function method can be parallelized by setting a joblib context.
This inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, calculating the score may
actually be faster without parallelization for a small number of samples,
such as for 1000 samples or less.
The user can set the number of jobs in the joblib context to control the
number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the decision_function method is
# not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.decision_function(X)
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
Notes
-----
The score function method can be parallelized by setting a joblib context. This
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, calculating the score may
actually be faster without parallelization for a small number of samples,
such as for 1000 samples or less.
The user can set the number of jobs in the joblib context to control the
number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the score_samples method is not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.score(X)
"""
# Check data
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=tree_dtype,
reset=False,
ensure_all_finite=False,
)
return self._score_samples(X)
|
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
Notes
-----
The score function method can be parallelized by setting a joblib context. This
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, calculating the score may
actually be faster without parallelization for a small number of samples,
such as for 1000 samples or less.
The user can set the number of jobs in the joblib context to control the
number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the score_samples method is not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.score(X)
|
score_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def _score_samples(self, X):
"""Private version of score_samples without input validation.
Input validation would remove feature names, so we disable it.
"""
# Code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Take the opposite of the scores as bigger is better (here less abnormal)
return -self._compute_chunked_score_samples(X)
|
Private version of score_samples without input validation.
Input validation would remove feature names, so we disable it.
|
_score_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
Returns
-------
scores : ndarray of shape (n_samples,)
The score of each sample in X.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
average_path_length_max_samples = _average_path_length([self._max_samples])
# Note: we use default n_jobs value, i.e. sequential computation, which
# we expect to be more performant that parallelizing for small number
# of samples, e.g. < 1k samples. Default n_jobs value can be overridden
# by using joblib.parallel_backend context manager around
# ._compute_score_samples. Using a higher n_jobs may speed up the
# computation of the scores, e.g. for > 1k samples. See
# https://github.com/scikit-learn/scikit-learn/pull/28622 for more
# details.
lock = threading.Lock()
Parallel(
verbose=self.verbose,
require="sharedmem",
)(
delayed(_parallel_compute_tree_depths)(
tree,
X,
features if subsample_features else None,
self._decision_path_lengths[tree_idx],
self._average_path_length_per_tree[tree_idx],
depths,
lock,
)
for tree_idx, (tree, features) in enumerate(
zip(self.estimators_, self.estimators_features_)
)
)
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (
# For a single training sample, denominator and depth are 0.
# Therefore, we set the score manually to 1.
-np.divide(
depths, denominator, out=np.ones_like(depths), where=denominator != 0
)
)
return scores
|
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
Returns
-------
scores : ndarray of shape (n_samples,)
The score of each sample in X.
|
_compute_score_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def _average_path_length(n_samples_leaf):
"""
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
|
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
|
_average_path_length
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_iforest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_iforest.py
|
BSD-3-Clause
|
def _concatenate_predictions(self, X, predictions):
"""Concatenate the predictions of each first layer learner and
possibly the input dataset `X`.
If `X` is sparse and `self.passthrough` is False, the output of
`transform` will be dense (the predictions). If `X` is sparse
and `self.passthrough` is True, the output of `transform` will
be sparse.
This helper is in charge of ensuring the predictions are 2D arrays and
it will drop one of the probability column when using probabilities
in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1)
When `y` type is `"multilabel-indicator"`` and the method used is
`predict_proba`, `preds` can be either a `ndarray` of shape
`(n_samples, n_class)` or for some estimators a list of `ndarray`.
This function will drop one of the probability column in this situation as well.
"""
X_meta = []
for est_idx, preds in enumerate(predictions):
if isinstance(preds, list):
# `preds` is here a list of `n_targets` 2D ndarrays of
# `n_classes` columns. The k-th column contains the
# probabilities of the samples belonging the k-th class.
#
# Since those probabilities must sum to one for each sample,
# we can work with probabilities of `n_classes - 1` classes.
# Hence we drop the first column.
for pred in preds:
X_meta.append(pred[:, 1:])
elif preds.ndim == 1:
# Some estimator return a 1D array for predictions
# which must be 2-dimensional arrays.
X_meta.append(preds.reshape(-1, 1))
elif (
self.stack_method_[est_idx] == "predict_proba"
and len(self.classes_) == 2
):
# Remove the first column when using probabilities in
# binary classification because both features `preds` are perfectly
# collinear.
X_meta.append(preds[:, 1:])
else:
X_meta.append(preds)
self._n_feature_outs = [pred.shape[1] for pred in X_meta]
if self.passthrough:
X_meta.append(X)
if sparse.issparse(X):
return sparse.hstack(X_meta, format=X.format)
return np.hstack(X_meta)
|
Concatenate the predictions of each first layer learner and
possibly the input dataset `X`.
If `X` is sparse and `self.passthrough` is False, the output of
`transform` will be dense (the predictions). If `X` is sparse
and `self.passthrough` is True, the output of `transform` will
be sparse.
This helper is in charge of ensuring the predictions are 2D arrays and
it will drop one of the probability column when using probabilities
in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1)
When `y` type is `"multilabel-indicator"`` and the method used is
`predict_proba`, `preds` can be either a `ndarray` of shape
`(n_samples, n_class)` or for some estimators a list of `ndarray`.
This function will drop one of the probability column in this situation as well.
|
_concatenate_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Dict of metadata, potentially containing sample_weight as a
key-value pair. If sample_weight is not present, then samples are
equally weighted. Note that sample_weight is supported only if all
underlying estimators support sample weights.
.. versionadded:: 1.6
Returns
-------
self : object
"""
# all_estimators contains all estimators, the one to be fitted and the
# 'drop' string.
names, all_estimators = self._validate_estimators()
self._validate_final_estimator()
stack_method = [self.stack_method] * len(all_estimators)
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
for name in names:
routed_params[name] = Bunch(fit={})
if "sample_weight" in fit_params:
routed_params[name].fit["sample_weight"] = fit_params[
"sample_weight"
]
if self.cv == "prefit":
self.estimators_ = []
for estimator in all_estimators:
if estimator != "drop":
check_is_fitted(estimator)
self.estimators_.append(estimator)
else:
# Fit the base estimators on the whole training data. Those
# base estimators will be used in transform, predict, and
# predict_proba. They are exposed publicly.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(
clone(est), X, y, routed_params[name]["fit"]
)
for name, est in zip(names, all_estimators)
if est != "drop"
)
self.named_estimators_ = Bunch()
est_fitted_idx = 0
for name_est, org_est in zip(names, all_estimators):
if org_est != "drop":
current_estimator = self.estimators_[est_fitted_idx]
self.named_estimators_[name_est] = current_estimator
est_fitted_idx += 1
if hasattr(current_estimator, "feature_names_in_"):
self.feature_names_in_ = current_estimator.feature_names_in_
else:
self.named_estimators_[name_est] = "drop"
self.stack_method_ = [
self._method_name(name, est, meth)
for name, est, meth in zip(names, all_estimators, stack_method)
]
if self.cv == "prefit":
# Generate predictions from prefit models
predictions = [
getattr(estimator, predict_method)(X)
for estimator, predict_method in zip(all_estimators, self.stack_method_)
if estimator != "drop"
]
else:
# To train the meta-classifier using the most data as possible, we use
# a cross-validation to obtain the output of the stacked estimators.
# To ensure that the data provided to each estimator are the same,
# we need to set the random state of the cv if there is one and we
# need to take a copy.
cv = check_cv(self.cv, y=y, classifier=is_classifier(self))
if hasattr(cv, "random_state") and cv.random_state is None:
cv.random_state = np.random.RandomState()
predictions = Parallel(n_jobs=self.n_jobs)(
delayed(cross_val_predict)(
clone(est),
X,
y,
cv=deepcopy(cv),
method=meth,
n_jobs=self.n_jobs,
params=routed_params[name]["fit"],
verbose=self.verbose,
)
for name, est, meth in zip(names, all_estimators, self.stack_method_)
if est != "drop"
)
# Only not None or not 'drop' estimators will be used in transform.
# Remove the None from the method as well.
self.stack_method_ = [
meth
for (meth, est) in zip(self.stack_method_, all_estimators)
if est != "drop"
]
X_meta = self._concatenate_predictions(X, predictions)
_fit_single_estimator(self.final_estimator_, X_meta, y, fit_params=fit_params)
return self
|
Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Dict of metadata, potentially containing sample_weight as a
key-value pair. If sample_weight is not present, then samples are
equally weighted. Note that sample_weight is supported only if all
underlying estimators support sample weights.
.. versionadded:: 1.6
Returns
-------
self : object
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
f"{self.__class__.__name__} object has no attribute n_features_in_"
) from nfe
return self.estimators_[0].n_features_in_
|
Number of features seen during :term:`fit`.
|
n_features_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def _transform(self, X):
"""Concatenate and return the predictions of the estimators."""
check_is_fitted(self)
predictions = [
getattr(est, meth)(X)
for est, meth in zip(self.estimators_, self.stack_method_)
if est != "drop"
]
return self._concatenate_predictions(X, predictions)
|
Concatenate and return the predictions of the estimators.
|
_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features. The input feature names are only used when `passthrough` is
`True`.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
If `passthrough` is `False`, then only the names of `estimators` are used
to generate the output feature names.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(
self, input_features, generate_names=self.passthrough
)
class_name = self.__class__.__name__.lower()
non_dropped_estimators = (
name for name, est in self.estimators if est != "drop"
)
meta_names = []
for est, n_features_out in zip(non_dropped_estimators, self._n_feature_outs):
if n_features_out == 1:
meta_names.append(f"{class_name}_{est}")
else:
meta_names.extend(
f"{class_name}_{est}{i}" for i in range(n_features_out)
)
if self.passthrough:
return np.concatenate((meta_names, input_features))
return np.asarray(meta_names, dtype=object)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features. The input feature names are only used when `passthrough` is
`True`.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
If `passthrough` is `False`, then only the names of `estimators` are used
to generate the output feature names.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
check_is_fitted(self)
return self.final_estimator_.predict(self.transform(X), **predict_params)
|
Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
# `self.estimators` is a list of (name, est) tuples
for name, estimator in self.estimators:
router.add(
**{name: estimator},
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
try:
final_estimator_ = self.final_estimator_
except AttributeError:
final_estimator_ = self.final_estimator
router.add(
final_estimator_=final_estimator_,
method_mapping=MethodMapping().add(caller="predict", callee="predict"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def _validate_estimators(self):
"""Overload the method of `_BaseHeterogeneousEnsemble` to be more
lenient towards the type of `estimators`.
Regressors can be accepted for some cases such as ordinal regression.
"""
if len(self.estimators) == 0:
raise ValueError(
"Invalid 'estimators' attribute, 'estimators' should be a "
"non-empty list of (string, estimator) tuples."
)
names, estimators = zip(*self.estimators)
self._validate_names(names)
has_estimator = any(est != "drop" for est in estimators)
if not has_estimator:
raise ValueError(
"All estimators are dropped. At least one is required "
"to be an estimator."
)
return names, estimators
|
Overload the method of `_BaseHeterogeneousEnsemble` to be more
lenient towards the type of `estimators`.
Regressors can be accepted for some cases such as ordinal regression.
|
_validate_estimators
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values. Note that `y` will be internally encoded in
numerically increasing order or lexicographic order. If the order
matter (e.g. for ordinal regression), one should numerically encode
the target `y` before calling :term:`fit`.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns a fitted instance of estimator.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
check_classification_targets(y)
if type_of_target(y) == "multilabel-indicator":
self._label_encoder = [LabelEncoder().fit(yk) for yk in y.T]
self.classes_ = [le.classes_ for le in self._label_encoder]
y_encoded = np.array(
[
self._label_encoder[target_idx].transform(target)
for target_idx, target in enumerate(y.T)
]
).T
else:
self._label_encoder = LabelEncoder().fit(y)
self.classes_ = self._label_encoder.classes_
y_encoded = self._label_encoder.transform(y)
return super().fit(X, y_encoded, **fit_params)
|
Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values. Note that `y` will be internally encoded in
numerically increasing order or lexicographic order. If the order
matter (e.g. for ordinal regression), one should numerically encode
the target `y` before calling :term:`fit`.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns a fitted instance of estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `predict` method of the
`final_estimator`.
- If `enable_metadata_routing=True`: Parameters safely routed to
the `predict` method of the `final_estimator`. See :ref:`Metadata
Routing User Guide <metadata_routing>` for more details.
.. versionchanged:: 1.6
`**predict_params` can be routed via metadata routing API.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
if _routing_enabled():
routed_params = process_routing(self, "predict", **predict_params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
routed_params.final_estimator_ = Bunch(predict={})
routed_params.final_estimator_.predict = predict_params
y_pred = super().predict(X, **routed_params.final_estimator_["predict"])
if isinstance(self._label_encoder, list):
# Handle the multilabel-indicator case
y_pred = np.array(
[
self._label_encoder[target_idx].inverse_transform(target)
for target_idx, target in enumerate(y_pred.T)
]
).T
else:
y_pred = self._label_encoder.inverse_transform(y_pred)
return y_pred
|
Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `predict` method of the
`final_estimator`.
- If `enable_metadata_routing=True`: Parameters safely routed to
the `predict` method of the `final_estimator`. See :ref:`Metadata
Routing User Guide <metadata_routing>` for more details.
.. versionchanged:: 1.6
`**predict_params` can be routed via metadata routing API.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Predict class probabilities for `X` using the final estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes) or \
list of ndarray of shape (n_output,)
The class probabilities of the input samples.
"""
check_is_fitted(self)
y_pred = self.final_estimator_.predict_proba(self.transform(X))
if isinstance(self._label_encoder, list):
# Handle the multilabel-indicator cases
y_pred = np.array([preds[:, 0] for preds in y_pred]).T
return y_pred
|
Predict class probabilities for `X` using the final estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes) or list of ndarray of shape (n_output,)
The class probabilities of the input samples.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns a fitted instance.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
y = column_or_1d(y, warn=True)
return super().fit(X, y, **fit_params)
|
Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns a fitted instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def fit_transform(self, X, y, **fit_params):
"""Fit the estimators and return the predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y_preds : ndarray of shape (n_samples, n_estimators)
Prediction outputs for each estimator.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
return super().fit_transform(X, y, **fit_params)
|
Fit the estimators and return the predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
y_preds : ndarray of shape (n_samples, n_estimators)
Prediction outputs for each estimator.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `predict` method of the
`final_estimator`.
- If `enable_metadata_routing=True`: Parameters safely routed to
the `predict` method of the `final_estimator`. See :ref:`Metadata
Routing User Guide <metadata_routing>` for more details.
.. versionchanged:: 1.6
`**predict_params` can be routed via metadata routing API.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
if _routing_enabled():
routed_params = process_routing(self, "predict", **predict_params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
routed_params.final_estimator_ = Bunch(predict={})
routed_params.final_estimator_.predict = predict_params
y_pred = super().predict(X, **routed_params.final_estimator_["predict"])
return y_pred
|
Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `predict` method of the
`final_estimator`.
- If `enable_metadata_routing=True`: Parameters safely routed to
the `predict` method of the `final_estimator`. See :ref:`Metadata
Routing User Guide <metadata_routing>` for more details.
.. versionchanged:: 1.6
`**predict_params` can be routed via metadata routing API.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_stacking.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_stacking.py
|
BSD-3-Clause
|
def _weights_not_none(self):
"""Get the weights of not `None` estimators."""
if self.weights is None:
return None
return [w for est, w in zip(self.estimators, self.weights) if est[1] != "drop"]
|
Get the weights of not `None` estimators.
|
_weights_not_none
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_voting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_voting.py
|
BSD-3-Clause
|
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.estimators_[0].n_features_in_
|
Number of features seen during :term:`fit`.
|
n_features_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_voting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_voting.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
# `self.estimators` is a list of (name, est) tuples
for name, estimator in self.estimators:
router.add(
**{name: estimator},
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_voting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_voting.py
|
BSD-3-Clause
|
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
y_type = type_of_target(y, input_name="y")
if y_type in ("unknown", "continuous"):
# raise a specific ValueError for non-classification tasks
raise ValueError(
f"Unknown label type: {y_type}. Maybe you are trying to fit a "
"classifier, which expects discrete classes on a "
"regression target with continuous values."
)
elif y_type not in ("binary", "multiclass"):
# raise a NotImplementedError for backward compatibility for non-supported
# classification tasks
raise NotImplementedError(
f"{self.__class__.__name__} only supports binary or multiclass "
"classification. Multilabel and multi-output classification are not "
"supported."
)
self.le_ = LabelEncoder().fit(y)
self.classes_ = self.le_.classes_
transformed_y = self.le_.transform(y)
return super().fit(X, transformed_y, **fit_params)
|
Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_voting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_voting.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
maj : array-like of shape (n_samples,)
Predicted class labels.
"""
check_is_fitted(self)
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
maj = self.le_.inverse_transform(maj)
return maj
|
Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
maj : array-like of shape (n_samples,)
Predicted class labels.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_voting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_voting.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.