code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def fit(
self,
X,
y,
sample_weight=None,
*,
X_val=None,
y_val=None,
sample_weight_val=None,
):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
X_val : array-like of shape (n_val, n_features)
Additional sample of features for validation used in early stopping.
In a `Pipeline`, `X_val` can be transformed the same way as `X` with
`Pipeline(..., transform_input=["X_val"])`.
.. versionadded:: 1.7
y_val : array-like of shape (n_samples,)
Additional sample of target values for validation used in early stopping.
.. versionadded:: 1.7
sample_weight_val : array-like of shape (n_samples,) default=None
Additional weights for validation used in early stopping.
.. versionadded:: 1.7
Returns
-------
self : object
Fitted estimator.
"""
fit_start_time = time()
acc_find_split_time = 0.0 # time spent finding the best splits
acc_apply_split_time = 0.0 # time spent splitting nodes
acc_compute_hist_time = 0.0 # time spent computing histograms
# time spent predicting X for gradient and hessians update
acc_prediction_time = 0.0
X, known_categories = self._preprocess_X(X, reset=True)
y = _check_y(y, estimator=self)
y = self._encode_y(y)
check_consistent_length(X, y)
# Do not create unit sample weights by default to later skip some
# computation
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
# TODO: remove when PDP supports sample weights
self._fitted_with_sw = True
sample_weight = self._finalize_sample_weight(sample_weight, y)
validation_data_provided = X_val is not None or y_val is not None
if validation_data_provided:
if y_val is None:
raise ValueError("X_val is provided, but y_val was not provided.")
if X_val is None:
raise ValueError("y_val is provided, but X_val was not provided.")
X_val = self._preprocess_X(X_val, reset=False)
y_val = _check_y(y_val, estimator=self)
y_val = self._encode_y_val(y_val)
check_consistent_length(X_val, y_val)
if sample_weight_val is not None:
sample_weight_val = _check_sample_weight(
sample_weight_val, X_val, dtype=np.float64
)
if self.early_stopping is False:
raise ValueError(
"X_val and y_val are passed to fit while at the same time "
"early_stopping is False. When passing X_val and y_val to fit,"
"early_stopping should be set to either 'auto' or True."
)
# Note: At this point, we could delete self._label_encoder if it exists.
# But we don't to keep the code even simpler.
rng = check_random_state(self.random_state)
# When warm starting, we want to reuse the same seed that was used
# the first time fit was called (e.g. train/val split).
# For feature subsampling, we want to continue with the rng we started with.
if not self.warm_start or not self._is_fitted():
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
feature_subsample_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
self._feature_subsample_rng = np.random.default_rng(feature_subsample_seed)
self._validate_parameters()
monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
# _preprocess_X places the categorical features at the beginning,
# change the order of monotonic_cst accordingly
if self.is_categorical_ is not None:
monotonic_cst_remapped = np.concatenate(
(
monotonic_cst[self.is_categorical_],
monotonic_cst[~self.is_categorical_],
)
)
else:
monotonic_cst_remapped = monotonic_cst
# used for validation in predict
n_samples, self._n_features = X.shape
# Encode constraints into a list of sets of features indices (integers).
interaction_cst = self._check_interaction_cst(self._n_features)
# we need this stateful variable to tell raw_predict() that it was
# called from fit() (this current method), and that the data it has
# received is pre-binned.
# predicting is faster on pre-binned data, so we want early stopping
# predictions to be made on pre-binned data. Unfortunately the _scorer
# can only call predict() or predict_proba(), not raw_predict(), and
# there's no way to tell the scorer that it needs to predict binned
# data.
self._in_fit = True
# `_openmp_effective_n_threads` is used to take cgroups CPU quotes
# into account when determine the maximum number of threads to use.
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
self._loss = self._get_loss(sample_weight=sample_weight)
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == "auto":
self.do_early_stopping_ = n_samples > 10_000
else:
self.do_early_stopping_ = self.early_stopping
# create validation data if needed
self._use_validation_data = (
self.validation_fraction is not None or validation_data_provided
)
if (
self.do_early_stopping_
and self._use_validation_data
and not validation_data_provided
):
# stratify for classification
# instead of checking predict_proba, loss.n_classes >= 2 would also work
stratify = y if hasattr(self._loss, "predict_proba") else None
# Save the state of the RNG for the training and validation split.
# This is needed in order to have the same split when using
# warm starting.
if sample_weight is None:
X_train, X_val, y_train, y_val = train_test_split(
X,
y,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
sample_weight_train = sample_weight_val = None
else:
# TODO: incorporate sample_weight in sampling here, as well as
# stratify
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
if not validation_data_provided:
X_val = y_val = sample_weight_val = None
# Bin the data
# For ease of use of the API, the user-facing GBDT classes accept the
# parameter max_bins, which doesn't take into account the bin for
# missing values (which is always allocated). However, since max_bins
# isn't the true maximal number of bins, all other private classes
# (binmapper, histbuilder...) accept n_bins instead, which is the
# actual total number of bins. Everywhere in the code, the
# convention is that n_bins == max_bins + 1
n_bins = self.max_bins + 1 # + 1 for missing values
self._bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=self._is_categorical_remapped,
known_categories=known_categories,
random_state=self._random_seed,
n_threads=n_threads,
)
X_binned_train = self._bin_data(X_train, is_training_data=True)
if X_val is not None:
X_binned_val = self._bin_data(X_val, is_training_data=False)
else:
X_binned_val = None
# Uses binned data to check for missing values
has_missing_values = (
(X_binned_train == self._bin_mapper.missing_values_bin_idx_)
.any(axis=0)
.astype(np.uint8)
)
if self.verbose:
print("Fitting gradient boosted rounds:")
n_samples = X_binned_train.shape[0]
scoring_is_predefined_string = self.scoring in _SCORERS
need_raw_predictions_val = X_binned_val is not None and (
scoring_is_predefined_string or self.scoring == "loss"
)
# First time calling fit, or no warm start
if not (self._is_fitted() and self.warm_start):
# Clear random state and score attributes
self._clear_state()
# initialize raw_predictions: those are the accumulated values
# predicted by the trees for the training data. raw_predictions has
# shape (n_samples, n_trees_per_iteration) where
# n_trees_per_iterations is n_classes in multiclass classification,
# else 1.
# self._baseline_prediction has shape (1, n_trees_per_iteration)
self._baseline_prediction = self._loss.fit_intercept_only(
y_true=y_train, sample_weight=sample_weight_train
).reshape((1, -1))
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# predictors is a matrix (list of lists) of TreePredictor objects
# with shape (n_iter_, n_trees_per_iteration)
self._predictors = predictors = []
# Initialize structures and attributes related to early stopping
self._scorer = None # set if scoring != loss
raw_predictions_val = None # set if use val and scoring is a string
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
# populate train_score and validation_score with the
# predictions of the initial model (before the first tree)
# Create raw_predictions_val for storing the raw predictions of
# the validation data.
if need_raw_predictions_val:
raw_predictions_val = np.zeros(
shape=(X_binned_val.shape[0], self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions_val += self._baseline_prediction
if self.scoring == "loss":
# we're going to compute scoring w.r.t the loss. As losses
# take raw predictions as input (unlike the scorers), we
# can optimize a bit and avoid repeating computing the
# predictions of the previous trees. We'll reuse
# raw_predictions (as it's needed for training anyway) for
# evaluating the training loss.
self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
n_threads=n_threads,
)
else:
self._scorer = check_scoring(self, self.scoring)
# _scorer is a callable with signature (est, X, y) and
# calls est.predict() or est.predict_proba() depending on
# its nature.
# Unfortunately, each call to _scorer() will compute
# the predictions of all the trees. So we use a subset of
# the training set to compute train scores.
# Compute the subsample set
(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
indices_small_train,
) = self._get_small_trainset(
X_binned_train,
y_train,
sample_weight_train,
self._random_seed,
)
# If the scorer is a predefined string, then we optimize
# the evaluation by reusing the incrementally updated raw
# predictions.
if scoring_is_predefined_string:
raw_predictions_small_train = raw_predictions[
indices_small_train
]
else:
raw_predictions_small_train = None
self._check_early_stopping_scorer(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
raw_predictions_small_train=raw_predictions_small_train,
raw_predictions_val=raw_predictions_val,
)
begin_at_stage = 0
# warm start: this is not the first time fit was called
else:
# Check that the maximum number of iterations is not smaller
# than the number of iterations from the previous fit
if self.max_iter < self.n_iter_:
raise ValueError(
"max_iter=%d must be larger than or equal to "
"n_iter_=%d when warm_start==True" % (self.max_iter, self.n_iter_)
)
# Convert array attributes to lists
self.train_score_ = self.train_score_.tolist()
self.validation_score_ = self.validation_score_.tolist()
# Compute raw predictions
raw_predictions = self._raw_predict(X_binned_train, n_threads=n_threads)
if self.do_early_stopping_ and need_raw_predictions_val:
raw_predictions_val = self._raw_predict(
X_binned_val, n_threads=n_threads
)
else:
raw_predictions_val = None
if self.do_early_stopping_ and self.scoring != "loss":
# Compute the subsample set
(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
indices_small_train,
) = self._get_small_trainset(
X_binned_train, y_train, sample_weight_train, self._random_seed
)
# Get the predictors from the previous fit
predictors = self._predictors
begin_at_stage = self.n_iter_
# initialize gradients and hessians (empty arrays).
# shape = (n_samples, n_trees_per_iteration).
gradient, hessian = self._loss.init_gradient_and_hessian(
n_samples=n_samples, dtype=G_H_DTYPE, order="F"
)
for iteration in range(begin_at_stage, self.max_iter):
if self.verbose >= 2:
iteration_start_time = time()
print(
"[{}/{}] ".format(iteration + 1, self.max_iter), end="", flush=True
)
# Update gradients and hessians, inplace
# Note that self._loss expects shape (n_samples,) for
# n_trees_per_iteration = 1 else shape (n_samples, n_trees_per_iteration).
if self._loss.constant_hessian:
self._loss.gradient(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
gradient_out=gradient,
n_threads=n_threads,
)
else:
self._loss.gradient_hessian(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
gradient_out=gradient,
hessian_out=hessian,
n_threads=n_threads,
)
# Append a list since there may be more than 1 predictor per iter
predictors.append([])
# 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
# on gradient and hessian to simplify the loop over n_trees_per_iteration_.
if gradient.ndim == 1:
g_view = gradient.reshape((-1, 1))
h_view = hessian.reshape((-1, 1))
else:
g_view = gradient
h_view = hessian
# Build `n_trees_per_iteration` trees.
for k in range(self.n_trees_per_iteration_):
grower = TreeGrower(
X_binned=X_binned_train,
gradients=g_view[:, k],
hessians=h_view[:, k],
n_bins=n_bins,
n_bins_non_missing=self._bin_mapper.n_bins_non_missing_,
has_missing_values=has_missing_values,
is_categorical=self._is_categorical_remapped,
monotonic_cst=monotonic_cst_remapped,
interaction_cst=interaction_cst,
max_leaf_nodes=self.max_leaf_nodes,
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
l2_regularization=self.l2_regularization,
feature_fraction_per_split=self.max_features,
rng=self._feature_subsample_rng,
shrinkage=self.learning_rate,
n_threads=n_threads,
)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
acc_compute_hist_time += grower.total_compute_hist_time
if not self._loss.differentiable:
_update_leaves_values(
loss=self._loss,
grower=grower,
y_true=y_train,
raw_prediction=raw_predictions[:, k],
sample_weight=sample_weight_train,
)
predictor = grower.make_predictor(
binning_thresholds=self._bin_mapper.bin_thresholds_
)
predictors[-1].append(predictor)
# Update raw_predictions with the predictions of the newly
# created tree.
tic_pred = time()
_update_raw_predictions(raw_predictions[:, k], grower, n_threads)
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if self.do_early_stopping_:
# Update raw_predictions_val with the newest tree(s)
if need_raw_predictions_val:
for k, pred in enumerate(self._predictors[-1]):
raw_predictions_val[:, k] += pred.predict_binned(
X_binned_val,
self._bin_mapper.missing_values_bin_idx_,
n_threads,
)
if self.scoring == "loss":
should_early_stop = self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
n_threads=n_threads,
)
else:
# If the scorer is a predefined string, then we optimize the
# evaluation by reusing the incrementally computed raw predictions.
if scoring_is_predefined_string:
raw_predictions_small_train = raw_predictions[
indices_small_train
]
else:
raw_predictions_small_train = None
should_early_stop = self._check_early_stopping_scorer(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
raw_predictions_small_train=raw_predictions_small_train,
raw_predictions_val=raw_predictions_val,
)
if self.verbose >= 2:
self._print_iteration_stats(iteration_start_time)
# maybe we could also early stop if all the trees are stumps?
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum(
predictor.get_n_leaf_nodes()
for predictors_at_ith_iteration in self._predictors
for predictor in predictors_at_ith_iteration
)
n_predictors = sum(
len(predictors_at_ith_iteration)
for predictors_at_ith_iteration in self._predictors
)
print(
"Fit {} trees in {:.3f} s, ({} total leaves)".format(
n_predictors, duration, n_total_leaves
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent computing histograms:", acc_compute_hist_time
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent finding best splits:", acc_find_split_time
)
)
print(
"{:<32} {:.3f}s".format(
"Time spent applying splits:", acc_apply_split_time
)
)
print(
"{:<32} {:.3f}s".format("Time spent predicting:", acc_prediction_time)
)
self.train_score_ = np.asarray(self.train_score_)
self.validation_score_ = np.asarray(self.validation_score_)
del self._in_fit # hard delete so we're sure it can't be used anymore
return self
|
Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
X_val : array-like of shape (n_val, n_features)
Additional sample of features for validation used in early stopping.
In a `Pipeline`, `X_val` can be transformed the same way as `X` with
`Pipeline(..., transform_input=["X_val"])`.
.. versionadded:: 1.7
y_val : array-like of shape (n_samples,)
Additional sample of target values for validation used in early stopping.
.. versionadded:: 1.7
sample_weight_val : array-like of shape (n_samples,) default=None
Additional weights for validation used in early stopping.
.. versionadded:: 1.7
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
for var in ("train_score_", "validation_score_"):
if hasattr(self, var):
delattr(self, var)
|
Clear the state of the gradient boosting model.
|
_clear_state
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _get_small_trainset(self, X_binned_train, y_train, sample_weight_train, seed):
"""Compute the indices of the subsample set and return this set.
For efficiency, we need to subsample the training set to compute scores
with scorers.
"""
# TODO: incorporate sample_weights here in `resample`
subsample_size = 10000
if X_binned_train.shape[0] > subsample_size:
indices = np.arange(X_binned_train.shape[0])
stratify = y_train if is_classifier(self) else None
indices = resample(
indices,
n_samples=subsample_size,
replace=False,
random_state=seed,
stratify=stratify,
)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
if sample_weight_train is not None:
sample_weight_small_train = sample_weight_train[indices]
else:
sample_weight_small_train = None
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
return (
X_binned_small_train,
y_small_train,
sample_weight_small_train,
indices,
)
else:
return X_binned_train, y_train, sample_weight_train, slice(None)
|
Compute the indices of the subsample set and return this set.
For efficiency, we need to subsample the training set to compute scores
with scorers.
|
_get_small_trainset
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _check_early_stopping_scorer(
self,
X_binned_small_train,
y_small_train,
sample_weight_small_train,
X_binned_val,
y_val,
sample_weight_val,
raw_predictions_small_train=None,
raw_predictions_val=None,
):
"""Check if fitting should be early-stopped based on scorer.
Scores are computed on validation data or on training data.
"""
if is_classifier(self):
y_small_train = self.classes_[y_small_train.astype(int)]
self.train_score_.append(
self._score_with_raw_predictions(
X_binned_small_train,
y_small_train,
sample_weight_small_train,
raw_predictions_small_train,
)
)
if self._use_validation_data:
if is_classifier(self):
y_val = self.classes_[y_val.astype(int)]
self.validation_score_.append(
self._score_with_raw_predictions(
X_binned_val, y_val, sample_weight_val, raw_predictions_val
)
)
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
|
Check if fitting should be early-stopped based on scorer.
Scores are computed on validation data or on training data.
|
_check_early_stopping_scorer
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _check_early_stopping_loss(
self,
raw_predictions,
y_train,
sample_weight_train,
raw_predictions_val,
y_val,
sample_weight_val,
n_threads=1,
):
"""Check if fitting should be early-stopped based on loss.
Scores are computed on validation data or on training data.
"""
self.train_score_.append(
-self._loss(
y_true=y_train,
raw_prediction=raw_predictions,
sample_weight=sample_weight_train,
n_threads=n_threads,
)
)
if self._use_validation_data:
self.validation_score_.append(
-self._loss(
y_true=y_val,
raw_prediction=raw_predictions_val,
sample_weight=sample_weight_val,
n_threads=n_threads,
)
)
return self._should_stop(self.validation_score_)
else:
return self._should_stop(self.train_score_)
|
Check if fitting should be early-stopped based on loss.
Scores are computed on validation data or on training data.
|
_check_early_stopping_loss
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _should_stop(self, scores):
"""
Return True (do early stopping) if the last n scores aren't better
than the (n-1)th-to-last score, up to some tolerance.
"""
reference_position = self.n_iter_no_change + 1
if len(scores) < reference_position:
return False
# A higher score is always better. Higher tol means that it will be
# harder for subsequent iteration to be considered an improvement upon
# the reference score, and therefore it is more likely to early stop
# because of the lack of significant improvement.
reference_score = scores[-reference_position] + self.tol
recent_scores = scores[-reference_position + 1 :]
recent_improvements = [score > reference_score for score in recent_scores]
return not any(recent_improvements)
|
Return True (do early stopping) if the last n scores aren't better
than the (n-1)th-to-last score, up to some tolerance.
|
_should_stop
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _bin_data(self, X, is_training_data):
"""Bin data X.
If is_training_data, then fit the _bin_mapper attribute.
Else, the binned data is converted to a C-contiguous array.
"""
description = "training" if is_training_data else "validation"
if self.verbose:
print(
"Binning {:.3f} GB of {} data: ".format(X.nbytes / 1e9, description),
end="",
flush=True,
)
tic = time()
if is_training_data:
X_binned = self._bin_mapper.fit_transform(X) # F-aligned array
else:
X_binned = self._bin_mapper.transform(X) # F-aligned array
# We convert the array to C-contiguous since predicting is faster
# with this layout (training is faster on F-arrays though)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print("{:.3f} s".format(duration))
return X_binned
|
Bin data X.
If is_training_data, then fit the _bin_mapper attribute.
Else, the binned data is converted to a C-contiguous array.
|
_bin_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _print_iteration_stats(self, iteration_start_time):
"""Print info about the current fitting iteration."""
log_msg = ""
predictors_of_ith_iteration = [
predictors_list
for predictors_list in self._predictors[-1]
if predictors_list
]
n_trees = len(predictors_of_ith_iteration)
max_depth = max(
predictor.get_max_depth() for predictor in predictors_of_ith_iteration
)
n_leaves = sum(
predictor.get_n_leaf_nodes() for predictor in predictors_of_ith_iteration
)
if n_trees == 1:
log_msg += "{} tree, {} leaves, ".format(n_trees, n_leaves)
else:
log_msg += "{} trees, {} leaves ".format(n_trees, n_leaves)
log_msg += "({} on avg), ".format(int(n_leaves / n_trees))
log_msg += "max depth = {}, ".format(max_depth)
if self.do_early_stopping_:
if self.scoring == "loss":
factor = -1 # score_ arrays contain the negative loss
name = "loss"
else:
factor = 1
name = "score"
log_msg += "train {}: {:.5f}, ".format(name, factor * self.train_score_[-1])
if self._use_validation_data:
log_msg += "val {}: {:.5f}, ".format(
name, factor * self.validation_score_[-1]
)
iteration_time = time() - iteration_start_time
log_msg += "in {:0.3f}s".format(iteration_time)
print(log_msg)
|
Print info about the current fitting iteration.
|
_print_iteration_stats
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _raw_predict(self, X, n_threads=None):
"""Return the sum of the leaves values over all predictors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Returns
-------
raw_predictions : array, shape (n_samples, n_trees_per_iteration)
The raw predicted values.
"""
check_is_fitted(self)
is_binned = getattr(self, "_in_fit", False)
if not is_binned:
X = self._preprocess_X(X, reset=False)
n_samples = X.shape[0]
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# We intentionally decouple the number of threads used at prediction
# time from the number of threads used at fit time because the model
# can be deployed on a different machine for prediction purposes.
n_threads = _openmp_effective_n_threads(n_threads)
self._predict_iterations(
X, self._predictors, raw_predictions, is_binned, n_threads
)
return raw_predictions
|
Return the sum of the leaves values over all predictors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Returns
-------
raw_predictions : array, shape (n_samples, n_trees_per_iteration)
The raw predicted values.
|
_raw_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _predict_iterations(self, X, predictors, raw_predictions, is_binned, n_threads):
"""Add the predictions of the predictors to raw_predictions."""
if not is_binned:
(
known_cat_bitsets,
f_idx_map,
) = self._bin_mapper.make_known_categories_bitsets()
for predictors_of_ith_iteration in predictors:
for k, predictor in enumerate(predictors_of_ith_iteration):
if is_binned:
predict = partial(
predictor.predict_binned,
missing_values_bin_idx=self._bin_mapper.missing_values_bin_idx_,
n_threads=n_threads,
)
else:
predict = partial(
predictor.predict,
known_cat_bitsets=known_cat_bitsets,
f_idx_map=f_idx_map,
n_threads=n_threads,
)
raw_predictions[:, k] += predict(X)
|
Add the predictions of the predictors to raw_predictions.
|
_predict_iterations
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _staged_raw_predict(self, X):
"""Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
raw_predictions : generator of ndarray of shape \
(n_samples, n_trees_per_iteration)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._preprocess_X(X, reset=False)
if X.shape[1] != self._n_features:
raise ValueError(
"X has {} features but this estimator was trained with "
"{} features.".format(X.shape[1], self._n_features)
)
n_samples = X.shape[0]
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# We intentionally decouple the number of threads used at prediction
# time from the number of threads used at fit time because the model
# can be deployed on a different machine for prediction purposes.
n_threads = _openmp_effective_n_threads()
for iteration in range(len(self._predictors)):
self._predict_iterations(
X,
self._predictors[iteration : iteration + 1],
raw_predictions,
is_binned=False,
n_threads=n_threads,
)
yield raw_predictions.copy()
|
Compute raw predictions of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
raw_predictions : generator of ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
|
_staged_raw_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray, shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray, shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray, shape \
(n_trees_per_iteration, n_samples)
The value of the partial dependence function on each grid point.
"""
if getattr(self, "_fitted_with_sw", False):
raise NotImplementedError(
"{} does not support partial dependence "
"plots with the 'recursion' method when "
"sample weights were given during fit "
"time.".format(self.__class__.__name__)
)
grid = np.asarray(grid, dtype=X_DTYPE, order="C")
averaged_predictions = np.zeros(
(self.n_trees_per_iteration_, grid.shape[0]), dtype=Y_DTYPE
)
target_features = np.asarray(target_features, dtype=np.intp, order="C")
for predictors_of_ith_iteration in self._predictors:
for k, predictor in enumerate(predictors_of_ith_iteration):
predictor.compute_partial_dependence(
grid, target_features, averaged_predictions[k]
)
# Note that the learning rate is already accounted for in the leaves
# values.
return averaged_predictions
|
Fast partial dependence computation.
Parameters
----------
grid : ndarray, shape (n_samples, n_target_features), dtype=np.float32
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray, shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray, shape (n_trees_per_iteration, n_samples)
The value of the partial dependence function on each grid point.
|
_compute_partial_dependence_recursion
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict values for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
# Return inverse link of raw predictions after converting
# shape (n_samples, 1) to (n_samples,)
return self._loss.link.inverse(self._raw_predict(X).ravel())
|
Predict values for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict classes for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted classes.
"""
# TODO: This could be done in parallel
raw_predictions = self._raw_predict(X)
if raw_predictions.shape[1] == 1:
# np.argmax([0.5, 0.5]) is 0, not 1. Therefore "> 0" not ">= 0" to be
# consistent with the multiclass case.
encoded_classes = (raw_predictions.ravel() > 0).astype(int)
else:
encoded_classes = np.argmax(raw_predictions, axis=1)
return self.classes_[encoded_classes]
|
Predict classes for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The predicted classes.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def staged_predict(self, X):
"""Predict classes at each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes of the input samples, for each iteration.
"""
for raw_predictions in self._staged_raw_predict(X):
if raw_predictions.shape[1] == 1:
# np.argmax([0, 0]) is 0, not 1, therefore "> 0" not ">= 0"
encoded_classes = (raw_predictions.ravel() > 0).astype(int)
else:
encoded_classes = np.argmax(raw_predictions, axis=1)
yield self.classes_.take(encoded_classes, axis=0)
|
Predict classes at each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes of the input samples, for each iteration.
|
staged_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
decision : ndarray, shape (n_samples,) or \
(n_samples, n_trees_per_iteration)
The raw predicted values (i.e. the sum of the trees leaves) for
each sample. n_trees_per_iteration is equal to the number of
classes in multiclass classification.
"""
decision = self._raw_predict(X)
if decision.shape[1] == 1:
decision = decision.ravel()
return decision
|
Compute the decision function of ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
decision : ndarray, shape (n_samples,) or (n_samples, n_trees_per_iteration)
The raw predicted values (i.e. the sum of the trees leaves) for
each sample. n_trees_per_iteration is equal to the number of
classes in multiclass classification.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
decision : generator of ndarray of shape (n_samples,) or \
(n_samples, n_trees_per_iteration)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
classes corresponds to that in the attribute :term:`classes_`.
"""
for staged_decision in self._staged_raw_predict(X):
if staged_decision.shape[1] == 1:
staged_decision = staged_decision.ravel()
yield staged_decision
|
Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Yields
------
decision : generator of ndarray of shape (n_samples,) or (n_samples, n_trees_per_iteration)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
classes corresponds to that in the attribute :term:`classes_`.
|
staged_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
|
BSD-3-Clause
|
def set_children_bounds(self, lower, upper):
"""Set children values bounds to respect monotonic constraints."""
# These are bounds for the node's *children* values, not the node's
# value. The bounds are used in the splitter when considering potential
# left and right child.
self.children_lower_bound = lower
self.children_upper_bound = upper
|
Set children values bounds to respect monotonic constraints.
|
set_children_bounds
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _validate_parameters(
self,
X_binned,
min_gain_to_split,
min_hessian_to_split,
):
"""Validate parameters passed to __init__.
Also validate parameters passed to splitter.
"""
if X_binned.dtype != np.uint8:
raise NotImplementedError("X_binned must be of type uint8.")
if not X_binned.flags.f_contiguous:
raise ValueError(
"X_binned should be passed as Fortran contiguous "
"array for maximum efficiency."
)
if min_gain_to_split < 0:
raise ValueError(
"min_gain_to_split={} must be positive.".format(min_gain_to_split)
)
if min_hessian_to_split < 0:
raise ValueError(
"min_hessian_to_split={} must be positive.".format(min_hessian_to_split)
)
|
Validate parameters passed to __init__.
Also validate parameters passed to splitter.
|
_validate_parameters
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _apply_shrinkage(self):
"""Multiply leaves values by shrinkage parameter.
This must be done at the very end of the growing process. If this were
done during the growing process e.g. in finalize_leaf(), then a leaf
would be shrunk but its sibling would potentially not be (if it's a
non-leaf), which would lead to a wrong computation of the 'middle'
value needed to enforce the monotonic constraints.
"""
for leaf in self.finalized_leaves:
leaf.value *= self.shrinkage
|
Multiply leaves values by shrinkage parameter.
This must be done at the very end of the growing process. If this were
done during the growing process e.g. in finalize_leaf(), then a leaf
would be shrunk but its sibling would potentially not be (if it's a
non-leaf), which would lead to a wrong computation of the 'middle'
value needed to enforce the monotonic constraints.
|
_apply_shrinkage
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _initialize_root(self):
"""Initialize root node and finalize it if needed."""
tic = time()
if self.interaction_cst is not None:
allowed_features = set().union(*self.interaction_cst)
allowed_features = np.fromiter(
allowed_features, dtype=np.uint32, count=len(allowed_features)
)
arbitrary_feature = allowed_features[0]
else:
allowed_features = None
arbitrary_feature = 0
# TreeNode init needs the total sum of gradients and hessians. Therefore, we
# first compute the histograms and then compute the total grad/hess on an
# arbitrary feature histogram. This way we replace a loop over n_samples by a
# loop over n_bins.
histograms = self.histogram_builder.compute_histograms_brute(
self.splitter.partition, # =self.root.sample_indices
allowed_features,
)
self.total_compute_hist_time += time() - tic
tic = time()
n_samples = self.X_binned.shape[0]
depth = 0
histogram_array = np.asarray(histograms[arbitrary_feature])
sum_gradients = histogram_array["sum_gradients"].sum()
if self.histogram_builder.hessians_are_constant:
sum_hessians = self.histogram_builder.hessians[0] * n_samples
else:
sum_hessians = histogram_array["sum_hessians"].sum()
self.root = TreeNode(
depth=depth,
sample_indices=self.splitter.partition,
partition_start=0,
partition_stop=n_samples,
sum_gradients=sum_gradients,
sum_hessians=sum_hessians,
value=0,
)
if self.root.n_samples < 2 * self.min_samples_leaf:
# Do not even bother computing any splitting statistics.
self._finalize_leaf(self.root)
return
if sum_hessians < self.splitter.min_hessian_to_split:
self._finalize_leaf(self.root)
return
if self.interaction_cst is not None:
self.root.interaction_cst_indices = range(len(self.interaction_cst))
self.root.allowed_features = allowed_features
self.root.histograms = histograms
self._compute_best_split_and_push(self.root)
self.total_find_split_time += time() - tic
|
Initialize root node and finalize it if needed.
|
_initialize_root
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _compute_best_split_and_push(self, node):
"""Compute the best possible split (SplitInfo) of a given node.
Also push it in the heap of splittable nodes if gain isn't zero.
The gain of a node is 0 if either all the leaves are pure
(best gain = 0), or if no split would satisfy the constraints,
(min_hessians_to_split, min_gain_to_split, min_samples_leaf)
"""
node.split_info = self.splitter.find_node_split(
n_samples=node.n_samples,
histograms=node.histograms,
sum_gradients=node.sum_gradients,
sum_hessians=node.sum_hessians,
value=node.value,
lower_bound=node.children_lower_bound,
upper_bound=node.children_upper_bound,
allowed_features=node.allowed_features,
)
if node.split_info.gain <= 0: # no valid split
self._finalize_leaf(node)
else:
heappush(self.splittable_nodes, node)
|
Compute the best possible split (SplitInfo) of a given node.
Also push it in the heap of splittable nodes if gain isn't zero.
The gain of a node is 0 if either all the leaves are pure
(best gain = 0), or if no split would satisfy the constraints,
(min_hessians_to_split, min_gain_to_split, min_samples_leaf)
|
_compute_best_split_and_push
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def split_next(self):
"""Split the node with highest potential gain.
Returns
-------
left : TreeNode
The resulting left child.
right : TreeNode
The resulting right child.
"""
# Consider the node with the highest loss reduction (a.k.a. gain)
node = heappop(self.splittable_nodes)
tic = time()
(
sample_indices_left,
sample_indices_right,
right_child_pos,
) = self.splitter.split_indices(node.split_info, node.sample_indices)
self.total_apply_split_time += time() - tic
depth = node.depth + 1
n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)
n_leaf_nodes += 2
left_child_node = TreeNode(
depth=depth,
sample_indices=sample_indices_left,
partition_start=node.partition_start,
partition_stop=node.partition_start + right_child_pos,
sum_gradients=node.split_info.sum_gradient_left,
sum_hessians=node.split_info.sum_hessian_left,
value=node.split_info.value_left,
)
right_child_node = TreeNode(
depth=depth,
sample_indices=sample_indices_right,
partition_start=left_child_node.partition_stop,
partition_stop=node.partition_stop,
sum_gradients=node.split_info.sum_gradient_right,
sum_hessians=node.split_info.sum_hessian_right,
value=node.split_info.value_right,
)
node.right_child = right_child_node
node.left_child = left_child_node
# set interaction constraints (the indices of the constraints sets)
if self.interaction_cst is not None:
# Calculate allowed_features and interaction_cst_indices only once. Child
# nodes inherit them before they get split.
(
left_child_node.allowed_features,
left_child_node.interaction_cst_indices,
) = self._compute_interactions(node)
right_child_node.interaction_cst_indices = (
left_child_node.interaction_cst_indices
)
right_child_node.allowed_features = left_child_node.allowed_features
if not self.has_missing_values[node.split_info.feature_idx]:
# If no missing values are encountered at fit time, then samples
# with missing values during predict() will go to whichever child
# has the most samples.
node.split_info.missing_go_to_left = (
left_child_node.n_samples > right_child_node.n_samples
)
self.n_nodes += 2
self.n_categorical_splits += node.split_info.is_categorical
if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
self._finalize_splittable_nodes()
return left_child_node, right_child_node
if self.max_depth is not None and depth == self.max_depth:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
return left_child_node, right_child_node
if left_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(left_child_node)
if right_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(right_child_node)
if self.with_monotonic_cst:
# Set value bounds for respecting monotonic constraints
# See test_nodes_values() for details
if (
self.monotonic_cst[node.split_info.feature_idx]
== MonotonicConstraint.NO_CST
):
lower_left = lower_right = node.children_lower_bound
upper_left = upper_right = node.children_upper_bound
else:
mid = (left_child_node.value + right_child_node.value) / 2
if (
self.monotonic_cst[node.split_info.feature_idx]
== MonotonicConstraint.POS
):
lower_left, upper_left = node.children_lower_bound, mid
lower_right, upper_right = mid, node.children_upper_bound
else: # NEG
lower_left, upper_left = mid, node.children_upper_bound
lower_right, upper_right = node.children_lower_bound, mid
left_child_node.set_children_bounds(lower_left, upper_left)
right_child_node.set_children_bounds(lower_right, upper_right)
# Compute histograms of children, and compute their best possible split
# (if needed)
should_split_left = not left_child_node.is_leaf
should_split_right = not right_child_node.is_leaf
if should_split_left or should_split_right:
# We will compute the histograms of both nodes even if one of them
# is a leaf, since computing the second histogram is very cheap
# (using histogram subtraction).
n_samples_left = left_child_node.sample_indices.shape[0]
n_samples_right = right_child_node.sample_indices.shape[0]
if n_samples_left < n_samples_right:
smallest_child = left_child_node
largest_child = right_child_node
else:
smallest_child = right_child_node
largest_child = left_child_node
# We use the brute O(n_samples) method on the child that has the
# smallest number of samples, and the subtraction trick O(n_bins)
# on the other one.
# Note that both left and right child have the same allowed_features.
tic = time()
smallest_child.histograms = self.histogram_builder.compute_histograms_brute(
smallest_child.sample_indices, smallest_child.allowed_features
)
largest_child.histograms = (
self.histogram_builder.compute_histograms_subtraction(
node.histograms,
smallest_child.histograms,
smallest_child.allowed_features,
)
)
# node.histograms is reused in largest_child.histograms. To break cyclic
# memory references and help garbage collection, we set it to None.
node.histograms = None
self.total_compute_hist_time += time() - tic
tic = time()
if should_split_left:
self._compute_best_split_and_push(left_child_node)
if should_split_right:
self._compute_best_split_and_push(right_child_node)
self.total_find_split_time += time() - tic
# Release memory used by histograms as they are no longer needed
# for leaf nodes since they won't be split.
for child in (left_child_node, right_child_node):
if child.is_leaf:
del child.histograms
# Release memory used by histograms as they are no longer needed for
# internal nodes once children histograms have been computed.
del node.histograms
return left_child_node, right_child_node
|
Split the node with highest potential gain.
Returns
-------
left : TreeNode
The resulting left child.
right : TreeNode
The resulting right child.
|
split_next
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _compute_interactions(self, node):
r"""Compute features allowed by interactions to be inherited by child nodes.
Example: Assume constraints [{0, 1}, {1, 2}].
1 <- Both constraint groups could be applied from now on
/ \
1 2 <- Left split still fulfills both constraint groups.
/ \ / \ Right split at feature 2 has only group {1, 2} from now on.
LightGBM uses the same logic for overlapping groups. See
https://github.com/microsoft/LightGBM/issues/4481 for details.
Parameters:
----------
node : TreeNode
A node that might have children. Based on its feature_idx, the interaction
constraints for possible child nodes are computed.
Returns
-------
allowed_features : ndarray, dtype=uint32
Indices of features allowed to split for children.
interaction_cst_indices : list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
"""
# Note:
# - Case of no interactions is already captured before function call.
# - This is for nodes that are already split and have a
# node.split_info.feature_idx.
allowed_features = set()
interaction_cst_indices = []
for i in node.interaction_cst_indices:
if node.split_info.feature_idx in self.interaction_cst[i]:
interaction_cst_indices.append(i)
allowed_features.update(self.interaction_cst[i])
return (
np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)),
interaction_cst_indices,
)
|
Compute features allowed by interactions to be inherited by child nodes.
Example: Assume constraints [{0, 1}, {1, 2}].
1 <- Both constraint groups could be applied from now on
/ \
1 2 <- Left split still fulfills both constraint groups.
/ \ / \ Right split at feature 2 has only group {1, 2} from now on.
LightGBM uses the same logic for overlapping groups. See
https://github.com/microsoft/LightGBM/issues/4481 for details.
Parameters:
----------
node : TreeNode
A node that might have children. Based on its feature_idx, the interaction
constraints for possible child nodes are computed.
Returns
-------
allowed_features : ndarray, dtype=uint32
Indices of features allowed to split for children.
interaction_cst_indices : list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
|
_compute_interactions
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _finalize_splittable_nodes(self):
"""Transform all splittable nodes into leaves.
Used when some constraint is met e.g. maximum number of leaves or
maximum depth."""
while len(self.splittable_nodes) > 0:
node = self.splittable_nodes.pop()
self._finalize_leaf(node)
|
Transform all splittable nodes into leaves.
Used when some constraint is met e.g. maximum number of leaves or
maximum depth.
|
_finalize_splittable_nodes
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def make_predictor(self, binning_thresholds):
"""Make a TreePredictor object out of the current tree.
Parameters
----------
binning_thresholds : array-like of floats
Corresponds to the bin_thresholds_ attribute of the BinMapper.
For each feature, this stores:
- the bin frontiers for continuous features
- the unique raw category values for categorical features
Returns
-------
A TreePredictor object.
"""
predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE)
binned_left_cat_bitsets = np.zeros(
(self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
)
raw_left_cat_bitsets = np.zeros(
(self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
)
_fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
self.root,
binning_thresholds,
self.n_bins_non_missing,
)
return TreePredictor(
predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets
)
|
Make a TreePredictor object out of the current tree.
Parameters
----------
binning_thresholds : array-like of floats
Corresponds to the bin_thresholds_ attribute of the BinMapper.
For each feature, this stores:
- the bin frontiers for continuous features
- the unique raw category values for categorical features
Returns
-------
A TreePredictor object.
|
make_predictor
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def _fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
grower_node,
binning_thresholds,
n_bins_non_missing,
next_free_node_idx=0,
next_free_bitset_idx=0,
):
"""Helper used in make_predictor to set the TreePredictor fields."""
node = predictor_nodes[next_free_node_idx]
node["count"] = grower_node.n_samples
node["depth"] = grower_node.depth
if grower_node.split_info is not None:
node["gain"] = grower_node.split_info.gain
else:
node["gain"] = -1
node["value"] = grower_node.value
if grower_node.is_leaf:
# Leaf node
node["is_leaf"] = True
return next_free_node_idx + 1, next_free_bitset_idx
split_info = grower_node.split_info
feature_idx, bin_idx = split_info.feature_idx, split_info.bin_idx
node["feature_idx"] = feature_idx
node["bin_threshold"] = bin_idx
node["missing_go_to_left"] = split_info.missing_go_to_left
node["is_categorical"] = split_info.is_categorical
if split_info.bin_idx == n_bins_non_missing[feature_idx] - 1:
# Split is on the last non-missing bin: it's a "split on nans".
# All nans go to the right, the rest go to the left.
# Note: for categorical splits, bin_idx is 0 and we rely on the bitset
node["num_threshold"] = np.inf
elif split_info.is_categorical:
categories = binning_thresholds[feature_idx]
node["bitset_idx"] = next_free_bitset_idx
binned_left_cat_bitsets[next_free_bitset_idx] = split_info.left_cat_bitset
set_raw_bitset_from_binned_bitset(
raw_left_cat_bitsets[next_free_bitset_idx],
split_info.left_cat_bitset,
categories,
)
next_free_bitset_idx += 1
else:
node["num_threshold"] = binning_thresholds[feature_idx][bin_idx]
next_free_node_idx += 1
node["left"] = next_free_node_idx
next_free_node_idx, next_free_bitset_idx = _fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
grower_node.left_child,
binning_thresholds=binning_thresholds,
n_bins_non_missing=n_bins_non_missing,
next_free_node_idx=next_free_node_idx,
next_free_bitset_idx=next_free_bitset_idx,
)
node["right"] = next_free_node_idx
return _fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
grower_node.right_child,
binning_thresholds=binning_thresholds,
n_bins_non_missing=n_bins_non_missing,
next_free_node_idx=next_free_node_idx,
next_free_bitset_idx=next_free_bitset_idx,
)
|
Helper used in make_predictor to set the TreePredictor fields.
|
_fill_predictor_arrays
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/grower.py
|
BSD-3-Clause
|
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
"""Predict raw values for non-binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_raw_data(
self.nodes,
X,
self.raw_left_cat_bitsets,
known_cat_bitsets,
f_idx_map,
n_threads,
out,
)
return out
|
Predict raw values for non-binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/predictor.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/predictor.py
|
BSD-3-Clause
|
def predict_binned(self, X, missing_values_bin_idx, n_threads):
"""Predict raw values for binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
missing_values_bin_idx : uint8
Index of the bin that is used for missing values. This is the
index of the last bin and is always equal to max_bins (as passed
to the GBDT classes), or equivalently to n_bins - 1.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_binned_data(
self.nodes,
X,
self.binned_left_cat_bitsets,
missing_values_bin_idx,
n_threads,
out,
)
return out
|
Predict raw values for binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
missing_values_bin_idx : uint8
Index of the bin that is used for missing values. This is the
index of the last bin and is always equal to max_bins (as passed
to the GBDT classes), or equivalently to n_bins - 1.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
|
predict_binned
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/predictor.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/predictor.py
|
BSD-3-Clause
|
def get_equivalent_estimator(estimator, lib="lightgbm", n_classes=None):
"""Return an unfitted estimator from another lib with matching hyperparams.
This utility function takes care of renaming the sklearn parameters into
their LightGBM, XGBoost or CatBoost equivalent parameters.
# unmapped XGB parameters:
# - min_samples_leaf
# - min_data_in_bin
# - min_split_gain (there is min_split_loss though?)
# unmapped Catboost parameters:
# max_leaves
# min_*
"""
if lib not in ("lightgbm", "xgboost", "catboost"):
raise ValueError(
"accepted libs are lightgbm, xgboost, and catboost. got {}".format(lib)
)
sklearn_params = estimator.get_params()
if sklearn_params["loss"] == "auto":
raise ValueError(
"auto loss is not accepted. We need to know if "
"the problem is binary or multiclass classification."
)
if sklearn_params["early_stopping"]:
raise NotImplementedError("Early stopping should be deactivated.")
lightgbm_loss_mapping = {
"squared_error": "regression_l2",
"absolute_error": "regression_l1",
"log_loss": "binary" if n_classes == 2 else "multiclass",
"gamma": "gamma",
"poisson": "poisson",
}
lightgbm_params = {
"objective": lightgbm_loss_mapping[sklearn_params["loss"]],
"learning_rate": sklearn_params["learning_rate"],
"n_estimators": sklearn_params["max_iter"],
"num_leaves": sklearn_params["max_leaf_nodes"],
"max_depth": sklearn_params["max_depth"],
"min_data_in_leaf": sklearn_params["min_samples_leaf"],
"reg_lambda": sklearn_params["l2_regularization"],
"max_bin": sklearn_params["max_bins"],
"min_data_in_bin": 1,
"min_sum_hessian_in_leaf": 1e-3,
"min_split_gain": 0,
"verbosity": 10 if sklearn_params["verbose"] else -10,
"boost_from_average": True,
"enable_bundle": False, # also makes feature order consistent
"subsample_for_bin": _BinMapper().subsample,
"poisson_max_delta_step": 1e-12,
"feature_fraction_bynode": sklearn_params["max_features"],
}
if sklearn_params["loss"] == "log_loss" and n_classes > 2:
# LightGBM multiplies hessians by 2 in multiclass loss.
lightgbm_params["min_sum_hessian_in_leaf"] *= 2
# LightGBM 3.0 introduced a different scaling of the hessian for the multiclass
# case.
# It is equivalent of scaling the learning rate.
# See https://github.com/microsoft/LightGBM/pull/3256.
if n_classes is not None:
lightgbm_params["learning_rate"] *= n_classes / (n_classes - 1)
# XGB
xgboost_loss_mapping = {
"squared_error": "reg:linear",
"absolute_error": "LEAST_ABSOLUTE_DEV_NOT_SUPPORTED",
"log_loss": "reg:logistic" if n_classes == 2 else "multi:softmax",
"gamma": "reg:gamma",
"poisson": "count:poisson",
}
xgboost_params = {
"tree_method": "hist",
"grow_policy": "lossguide", # so that we can set max_leaves
"objective": xgboost_loss_mapping[sklearn_params["loss"]],
"learning_rate": sklearn_params["learning_rate"],
"n_estimators": sklearn_params["max_iter"],
"max_leaves": sklearn_params["max_leaf_nodes"],
"max_depth": sklearn_params["max_depth"] or 0,
"lambda": sklearn_params["l2_regularization"],
"max_bin": sklearn_params["max_bins"],
"min_child_weight": 1e-3,
"verbosity": 2 if sklearn_params["verbose"] else 0,
"silent": sklearn_params["verbose"] == 0,
"n_jobs": -1,
"colsample_bynode": sklearn_params["max_features"],
}
# Catboost
catboost_loss_mapping = {
"squared_error": "RMSE",
# catboost does not support MAE when leaf_estimation_method is Newton
"absolute_error": "LEAST_ASBOLUTE_DEV_NOT_SUPPORTED",
"log_loss": "Logloss" if n_classes == 2 else "MultiClass",
"gamma": None,
"poisson": "Poisson",
}
catboost_params = {
"loss_function": catboost_loss_mapping[sklearn_params["loss"]],
"learning_rate": sklearn_params["learning_rate"],
"iterations": sklearn_params["max_iter"],
"depth": sklearn_params["max_depth"],
"reg_lambda": sklearn_params["l2_regularization"],
"max_bin": sklearn_params["max_bins"],
"feature_border_type": "Median",
"leaf_estimation_method": "Newton",
"verbose": bool(sklearn_params["verbose"]),
}
if lib == "lightgbm":
from lightgbm import LGBMClassifier, LGBMRegressor
if is_classifier(estimator):
return LGBMClassifier(**lightgbm_params)
else:
return LGBMRegressor(**lightgbm_params)
elif lib == "xgboost":
from xgboost import XGBClassifier, XGBRegressor
if is_classifier(estimator):
return XGBClassifier(**xgboost_params)
else:
return XGBRegressor(**xgboost_params)
else:
from catboost import CatBoostClassifier, CatBoostRegressor
if is_classifier(estimator):
return CatBoostClassifier(**catboost_params)
else:
return CatBoostRegressor(**catboost_params)
|
Return an unfitted estimator from another lib with matching hyperparams.
This utility function takes care of renaming the sklearn parameters into
their LightGBM, XGBoost or CatBoost equivalent parameters.
# unmapped XGB parameters:
# - min_samples_leaf
# - min_data_in_bin
# - min_split_gain (there is min_split_loss though?)
# unmapped Catboost parameters:
# max_leaves
# min_*
|
get_equivalent_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/utils.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/utils.py
|
BSD-3-Clause
|
def test_categorical_feature_negative_missing():
"""Make sure bin mapper treats negative categories as missing values."""
X = np.array(
[[4] * 500 + [1] * 3 + [5] * 10 + [-1] * 3 + [np.nan] * 4], dtype=X_DTYPE
).T
bin_mapper = _BinMapper(
n_bins=4,
is_categorical=np.array([True]),
known_categories=[np.array([1, 4, 5], dtype=X_DTYPE)],
).fit(X)
assert bin_mapper.n_bins_non_missing_ == [3]
X = np.array([[-1, 1, 3, 5, np.nan]], dtype=X_DTYPE).T
# Negative values for categorical features are considered as missing values.
# They are mapped to the bin of index `bin_mapper.missing_values_bin_idx_`,
# which is 3 here.
assert bin_mapper.missing_values_bin_idx_ == 3
expected_trans = np.array([[3, 0, 1, 2, 3]]).T
assert_array_equal(bin_mapper.transform(X), expected_trans)
|
Make sure bin mapper treats negative categories as missing values.
|
test_categorical_feature_negative_missing
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py
|
BSD-3-Clause
|
def _make_dumb_dataset(n_samples):
"""Make a dumb dataset to test early stopping."""
rng = np.random.RandomState(42)
X_dumb = rng.randn(n_samples, 1)
y_dumb = (X_dumb[:, 0] > 0).astype("int64")
return X_dumb, y_dumb
|
Make a dumb dataset to test early stopping.
|
_make_dumb_dataset
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_quantile_asymmetric_error(quantile):
"""Test quantile regression for asymmetric distributed targets."""
n_samples = 10_000
rng = np.random.RandomState(42)
# take care that X @ coef + intercept > 0
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
intercept = 1.23
coef = np.array([0.5, -2])
# For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
# the quantile at level q is:
# quantile(q) = - log(1 - q) / lambda
# scale = 1/lambda = -quantile(q) / log(1-q)
y = rng.exponential(
scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
)
model = HistGradientBoostingRegressor(
loss="quantile",
quantile=quantile,
max_iter=25,
random_state=0,
max_leaf_nodes=10,
).fit(X, y)
assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)
pinball_loss = PinballLoss(quantile=quantile)
loss_true_quantile = pinball_loss(y, X @ coef + intercept)
loss_pred_quantile = pinball_loss(y, model.predict(X))
# we are overfitting
assert loss_pred_quantile <= loss_true_quantile
|
Test quantile regression for asymmetric distributed targets.
|
test_quantile_asymmetric_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_early_stopping_with_sample_weights(monkeypatch):
"""Check that sample weights is passed in to the scorer and _raw_predict is not
called."""
mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
def mock_check_scoring(estimator, scoring):
assert scoring == "neg_median_absolute_error"
return mock_scorer
monkeypatch.setattr(
sklearn.ensemble._hist_gradient_boosting.gradient_boosting,
"check_scoring",
mock_check_scoring,
)
X, y = make_regression(random_state=0)
sample_weight = np.ones_like(y)
hist = HistGradientBoostingRegressor(
max_iter=2,
early_stopping=True,
random_state=0,
scoring="neg_median_absolute_error",
)
mock_raw_predict = Mock(side_effect=hist._raw_predict)
hist._raw_predict = mock_raw_predict
hist.fit(X, y, sample_weight=sample_weight)
# _raw_predict should never be called with scoring as a string
assert mock_raw_predict.call_count == 0
# For scorer is called twice (train and val) for the baseline score, and twice
# per iteration (train and val) after that. So 6 times in total for `max_iter=2`.
assert mock_scorer.call_count == 6
for arg_list in mock_scorer.call_args_list:
assert "sample_weight" in arg_list[1]
|
Check that sample weights is passed in to the scorer and _raw_predict is not
called.
|
test_early_stopping_with_sample_weights
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_check_interaction_cst(interaction_cst, n_features, result):
"""Check that _check_interaction_cst returns the expected list of sets"""
est = HistGradientBoostingRegressor()
est.set_params(interaction_cst=interaction_cst)
assert est._check_interaction_cst(n_features) == result
|
Check that _check_interaction_cst returns the expected list of sets
|
test_check_interaction_cst
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_interaction_cst_numerically():
"""Check that interaction constraints have no forbidden interactions."""
rng = np.random.RandomState(42)
n_samples = 1000
X = rng.uniform(size=(n_samples, 2))
# Construct y with a strong interaction term
# y = x0 + x1 + 5 * x0 * x1
y = np.hstack((X, 5 * X[:, [0]] * X[:, [1]])).sum(axis=1)
est = HistGradientBoostingRegressor(random_state=42)
est.fit(X, y)
est_no_interactions = HistGradientBoostingRegressor(
interaction_cst=[{0}, {1}], random_state=42
)
est_no_interactions.fit(X, y)
delta = 0.25
# Make sure we do not extrapolate out of the training set as tree-based estimators
# are very bad in doing so.
X_test = X[(X[:, 0] < 1 - delta) & (X[:, 1] < 1 - delta)]
X_delta_d_0 = X_test + [delta, 0]
X_delta_0_d = X_test + [0, delta]
X_delta_d_d = X_test + [delta, delta]
# Note: For the y from above as a function of x0 and x1, we have
# y(x0+d, x1+d) = y(x0, x1) + 5 * d * (2/5 + x0 + x1) + 5 * d**2
# y(x0+d, x1) = y(x0, x1) + 5 * d * (1/5 + x1)
# y(x0, x1+d) = y(x0, x1) + 5 * d * (1/5 + x0)
# Without interaction constraints, we would expect a result of 5 * d**2 for the
# following expression, but zero with constraints in place.
assert_allclose(
est_no_interactions.predict(X_delta_d_d)
+ est_no_interactions.predict(X_test)
- est_no_interactions.predict(X_delta_d_0)
- est_no_interactions.predict(X_delta_0_d),
0,
atol=1e-12,
)
# Correct result of the expressions is 5 * delta**2. But this is hard to achieve by
# a fitted tree-based model. However, with 100 iterations the expression should
# at least be positive!
assert np.all(
est.predict(X_delta_d_d)
+ est.predict(X_test)
- est.predict(X_delta_d_0)
- est.predict(X_delta_0_d)
> 0.01
)
|
Check that interaction constraints have no forbidden interactions.
|
test_interaction_cst_numerically
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_no_user_warning_with_scoring():
"""Check that no UserWarning is raised when scoring is set.
Non-regression test for #22907.
"""
pd = pytest.importorskip("pandas")
X, y = make_regression(n_samples=50, random_state=0)
X_df = pd.DataFrame(X, columns=[f"col{i}" for i in range(X.shape[1])])
est = HistGradientBoostingRegressor(
random_state=0, scoring="neg_mean_absolute_error", early_stopping=True
)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
est.fit(X_df, y)
|
Check that no UserWarning is raised when scoring is set.
Non-regression test for #22907.
|
test_no_user_warning_with_scoring
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_unknown_category_that_are_negative():
"""Check that unknown categories that are negative does not error.
Non-regression test for #24274.
"""
rng = np.random.RandomState(42)
n_samples = 1000
X = np.c_[rng.rand(n_samples), rng.randint(4, size=n_samples)]
y = np.zeros(shape=n_samples)
y[X[:, 1] % 2 == 0] = 1
hist = HistGradientBoostingRegressor(
random_state=0,
categorical_features=[False, True],
max_iter=10,
).fit(X, y)
# Check that negative values from the second column are treated like a
# missing category
X_test_neg = np.asarray([[1, -2], [3, -4]])
X_test_nan = np.asarray([[1, np.nan], [3, np.nan]])
assert_allclose(hist.predict(X_test_neg), hist.predict(X_test_nan))
|
Check that unknown categories that are negative does not error.
Non-regression test for #24274.
|
test_unknown_category_that_are_negative
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_X_val_in_fit(GradientBoosting, make_X_y, sample_weight, global_random_seed):
"""Test that passing X_val, y_val in fit is same as validation fraction."""
rng = np.random.RandomState(42)
n_samples = 100
X, y = make_X_y(n_samples=n_samples, random_state=rng)
if sample_weight:
sample_weight = np.abs(rng.normal(size=n_samples))
data = (X, y, sample_weight)
else:
sample_weight = None
data = (X, y)
rng_seed = global_random_seed
# Fit with validation fraction and early stopping.
m1 = GradientBoosting(
early_stopping=True,
validation_fraction=0.5,
random_state=rng_seed,
)
m1.fit(X, y, sample_weight)
# Do train-test split ourselves.
rng = check_random_state(rng_seed)
# We do the same as in the fit method.
stratify = y if isinstance(m1, HistGradientBoostingClassifier) else None
random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
X_train, X_val, y_train, y_val, *sw = train_test_split(
*data,
test_size=0.5,
stratify=stratify,
random_state=random_seed,
)
if sample_weight is not None:
sample_weight_train = sw[0]
sample_weight_val = sw[1]
else:
sample_weight_train = None
sample_weight_val = None
m2 = GradientBoosting(
early_stopping=True,
random_state=rng_seed,
)
m2.fit(
X_train,
y_train,
sample_weight=sample_weight_train,
X_val=X_val,
y_val=y_val,
sample_weight_val=sample_weight_val,
)
assert_allclose(m2.n_iter_, m1.n_iter_)
assert_allclose(m2.predict(X), m1.predict(X))
|
Test that passing X_val, y_val in fit is same as validation fraction.
|
test_X_val_in_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_X_val_raises_missing_y_val():
"""Test that an error is raised if X_val given but y_val None."""
X, y = make_classification(n_samples=4)
X, X_val = X[:2], X[2:]
y, y_val = y[:2], y[2:]
with pytest.raises(
ValueError,
match="X_val is provided, but y_val was not provided",
):
HistGradientBoostingClassifier().fit(X, y, X_val=X_val)
with pytest.raises(
ValueError,
match="y_val is provided, but X_val was not provided",
):
HistGradientBoostingClassifier().fit(X, y, y_val=y_val)
|
Test that an error is raised if X_val given but y_val None.
|
test_X_val_raises_missing_y_val
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_X_val_raises_with_early_stopping_false():
"""Test that an error is raised if X_val given but early_stopping is False."""
X, y = make_regression(n_samples=4)
X, X_val = X[:2], X[2:]
y, y_val = y[:2], y[2:]
with pytest.raises(
ValueError,
match="X_val and y_val are passed to fit while at the same time",
):
HistGradientBoostingRegressor(early_stopping=False).fit(
X, y, X_val=X_val, y_val=y_val
)
|
Test that an error is raised if X_val given but early_stopping is False.
|
test_X_val_raises_with_early_stopping_false
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_dataframe_categorical_results_same_as_ndarray(
dataframe_lib, HistGradientBoosting
):
"""Check that pandas categorical give the same results as ndarray."""
pytest.importorskip(dataframe_lib)
rng = np.random.RandomState(42)
n_samples = 5_000
n_cardinality = 50
max_bins = 100
f_num = rng.rand(n_samples)
f_cat = rng.randint(n_cardinality, size=n_samples)
# Make f_cat an informative feature
y = (f_cat % 3 == 0) & (f_num > 0.2)
X = np.c_[f_num, f_cat]
f_cat = [f"cat{c:0>3}" for c in f_cat]
X_df = _convert_container(
np.asarray([f_num, f_cat]).T,
dataframe_lib,
["f_num", "f_cat"],
categorical_feature_names=["f_cat"],
)
X_train, X_test, X_train_df, X_test_df, y_train, y_test = train_test_split(
X, X_df, y, random_state=0
)
hist_kwargs = dict(max_iter=10, max_bins=max_bins, random_state=0)
hist_np = HistGradientBoosting(categorical_features=[False, True], **hist_kwargs)
hist_np.fit(X_train, y_train)
hist_pd = HistGradientBoosting(categorical_features="from_dtype", **hist_kwargs)
hist_pd.fit(X_train_df, y_train)
# Check categories are correct and sorted
categories = hist_pd._preprocessor.named_transformers_["encoder"].categories_[0]
assert_array_equal(categories, np.unique(f_cat))
assert len(hist_np._predictors) == len(hist_pd._predictors)
for predictor_1, predictor_2 in zip(hist_np._predictors, hist_pd._predictors):
assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
score_np = hist_np.score(X_test, y_test)
score_pd = hist_pd.score(X_test_df, y_test)
assert score_np == pytest.approx(score_pd)
assert_allclose(hist_np.predict(X_test), hist_pd.predict(X_test_df))
|
Check that pandas categorical give the same results as ndarray.
|
test_dataframe_categorical_results_same_as_ndarray
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_dataframe_categorical_errors(dataframe_lib, HistGradientBoosting):
"""Check error cases for pandas categorical feature."""
pytest.importorskip(dataframe_lib)
msg = "Categorical feature 'f_cat' is expected to have a cardinality <= 16"
hist = HistGradientBoosting(categorical_features="from_dtype", max_bins=16)
rng = np.random.RandomState(42)
f_cat = rng.randint(0, high=100, size=100).astype(str)
X_df = _convert_container(
f_cat[:, None], dataframe_lib, ["f_cat"], categorical_feature_names=["f_cat"]
)
y = rng.randint(0, high=2, size=100)
with pytest.raises(ValueError, match=msg):
hist.fit(X_df, y)
|
Check error cases for pandas categorical feature.
|
test_dataframe_categorical_errors
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def test_categorical_different_order_same_model(dataframe_lib):
"""Check that the order of the categorical gives same model."""
pytest.importorskip(dataframe_lib)
rng = np.random.RandomState(42)
n_samples = 1_000
f_ints = rng.randint(low=0, high=2, size=n_samples)
# Construct a target with some noise
y = f_ints.copy()
flipped = rng.choice([True, False], size=n_samples, p=[0.1, 0.9])
y[flipped] = 1 - y[flipped]
# Construct categorical where 0 -> A and 1 -> B and 1 -> A and 0 -> B
f_cat_a_b = np.asarray(["A", "B"])[f_ints]
f_cat_b_a = np.asarray(["B", "A"])[f_ints]
df_a_b = _convert_container(
f_cat_a_b[:, None],
dataframe_lib,
["f_cat"],
categorical_feature_names=["f_cat"],
)
df_b_a = _convert_container(
f_cat_b_a[:, None],
dataframe_lib,
["f_cat"],
categorical_feature_names=["f_cat"],
)
hist_a_b = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
)
hist_b_a = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
)
hist_a_b.fit(df_a_b, y)
hist_b_a.fit(df_b_a, y)
assert len(hist_a_b._predictors) == len(hist_b_a._predictors)
for predictor_1, predictor_2 in zip(hist_a_b._predictors, hist_b_a._predictors):
assert len(predictor_1[0].nodes) == len(predictor_2[0].nodes)
|
Check that the order of the categorical gives same model.
|
test_categorical_different_order_same_model
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py
|
BSD-3-Clause
|
def true_decision_function(input_features):
"""Ground truth decision function
This is a very simple yet asymmetric decision tree. Therefore the
grower code should have no trouble recovering the decision function
from 10000 training samples.
"""
if input_features[0] <= n_bins // 2:
return -1
else:
return -1 if input_features[1] <= n_bins // 3 else 1
|
Ground truth decision function
This is a very simple yet asymmetric decision tree. Therefore the
grower code should have no trouble recovering the decision function
from 10000 training samples.
|
true_decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py
|
BSD-3-Clause
|
def test_grower_interaction_constraints():
"""Check that grower respects interaction constraints."""
n_features = 6
interaction_cst = [{0, 1}, {1, 2}, {3, 4, 5}]
n_samples = 10
n_bins = 6
root_feature_splits = []
def get_all_children(node):
res = []
if node.is_leaf:
return res
for n in [node.left_child, node.right_child]:
res.append(n)
res.extend(get_all_children(n))
return res
for seed in range(20):
rng = np.random.RandomState(seed)
X_binned = rng.randint(
0, n_bins - 1, size=(n_samples, n_features), dtype=X_BINNED_DTYPE
)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X_binned,
gradients,
hessians,
n_bins=n_bins,
min_samples_leaf=1,
interaction_cst=interaction_cst,
n_threads=n_threads,
)
grower.grow()
root_feature_idx = grower.root.split_info.feature_idx
root_feature_splits.append(root_feature_idx)
feature_idx_to_constraint_set = {
0: {0, 1},
1: {0, 1, 2},
2: {1, 2},
3: {3, 4, 5},
4: {3, 4, 5},
5: {3, 4, 5},
}
root_constraint_set = feature_idx_to_constraint_set[root_feature_idx]
for node in (grower.root.left_child, grower.root.right_child):
# Root's children's allowed_features must be the root's constraints set.
assert_array_equal(node.allowed_features, list(root_constraint_set))
for node in get_all_children(grower.root):
if node.is_leaf:
continue
# Ensure that each node uses a subset of features of its parent node.
parent_interaction_cst_indices = set(node.interaction_cst_indices)
right_interactions_cst_indices = set(
node.right_child.interaction_cst_indices
)
left_interactions_cst_indices = set(node.left_child.interaction_cst_indices)
assert right_interactions_cst_indices.issubset(
parent_interaction_cst_indices
)
assert left_interactions_cst_indices.issubset(
parent_interaction_cst_indices
)
# The features used for split must have been present in the root's
# constraint set.
assert node.split_info.feature_idx in root_constraint_set
# Make sure that every feature is used at least once as split for the root node.
assert (
len(set(root_feature_splits))
== len(set().union(*interaction_cst))
== n_features
)
|
Check that grower respects interaction constraints.
|
test_grower_interaction_constraints
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py
|
BSD-3-Clause
|
def get_leaves_values():
"""get leaves values from left to right"""
values = []
def depth_first_collect_leaf_values(node_idx):
node = nodes[node_idx]
if node["is_leaf"]:
values.append(node["value"])
return
depth_first_collect_leaf_values(node["left"])
depth_first_collect_leaf_values(node["right"])
depth_first_collect_leaf_values(0) # start at root (0)
return values
|
get leaves values from left to right
|
get_leaves_values
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py
|
BSD-3-Clause
|
def test_split_feature_fraction_per_split(forbidden_features):
"""Check that feature_fraction_per_split is respected.
Because we set `n_features = 4` and `feature_fraction_per_split = 0.25`, it means
that calling `splitter.find_node_split` will be allowed to select a split for a
single completely random feature at each call. So if we iterate enough, we should
cover all the allowed features, irrespective of the values of the gradients and
Hessians of the objective.
"""
n_features = 4
allowed_features = np.array(
list(set(range(n_features)) - forbidden_features), dtype=np.uint32
)
n_bins = 5
n_samples = 40
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
rng = np.random.default_rng(42)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.uniform(low=0.5, high=1, size=n_samples).astype(G_H_DTYPE)
sum_gradients = all_gradients.sum()
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_hessians = n_samples
hessians_are_constant = True
X_binned = np.asfortranarray(
rng.integers(low=0, high=n_bins - 1, size=(n_samples, n_features)),
dtype=X_BINNED_DTYPE,
)
X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE)
builder = HistogramBuilder(
X_binned,
n_bins,
all_gradients,
all_hessians,
hessians_are_constant,
n_threads,
)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
n_bins_non_missing = np.array([n_bins] * X_binned.shape[1], dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
params = dict(
X_binned=X_binned,
n_bins_non_missing=n_bins_non_missing,
missing_values_bin_idx=missing_values_bin_idx,
has_missing_values=has_missing_values,
is_categorical=is_categorical,
monotonic_cst=monotonic_cst,
l2_regularization=l2_regularization,
min_hessian_to_split=min_hessian_to_split,
min_samples_leaf=min_samples_leaf,
min_gain_to_split=min_gain_to_split,
hessians_are_constant=hessians_are_constant,
rng=rng,
)
splitter_subsample = Splitter(
feature_fraction_per_split=0.25, # THIS is the important setting here.
**params,
)
splitter_all_features = Splitter(feature_fraction_per_split=1.0, **params)
assert np.all(sample_indices == splitter_subsample.partition)
split_features_subsample = []
split_features_all = []
# The loop is to ensure that we split at least once on each feature.
# This is tracked by split_features and checked at the end.
for i in range(20):
si_root = splitter_subsample.find_node_split(
n_samples,
histograms,
sum_gradients,
sum_hessians,
value,
allowed_features=allowed_features,
)
split_features_subsample.append(si_root.feature_idx)
# This second splitter is our "counterfactual".
si_root = splitter_all_features.find_node_split(
n_samples,
histograms,
sum_gradients,
sum_hessians,
value,
allowed_features=allowed_features,
)
split_features_all.append(si_root.feature_idx)
# Make sure all features are split on.
assert set(split_features_subsample) == set(allowed_features)
# Make sure, our counterfactual always splits on same feature.
assert len(set(split_features_all)) == 1
|
Check that feature_fraction_per_split is respected.
Because we set `n_features = 4` and `feature_fraction_per_split = 0.25`, it means
that calling `splitter.find_node_split` will be allowed to select a split for a
single completely random feature at each call. So if we iterate enough, we should
cover all the allowed features, irrespective of the values of the gradients and
Hessians of the objective.
|
test_split_feature_fraction_per_split
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py
|
BSD-3-Clause
|
def _assert_predictor_equal(gb_1, gb_2, X):
"""Assert that two HistGBM instances are identical."""
# Check identical nodes for each tree
for pred_ith_1, pred_ith_2 in zip(gb_1._predictors, gb_2._predictors):
for predictor_1, predictor_2 in zip(pred_ith_1, pred_ith_2):
assert_array_equal(predictor_1.nodes, predictor_2.nodes)
# Check identical predictions
assert_allclose(gb_1.predict(X), gb_2.predict(X))
|
Assert that two HistGBM instances are identical.
|
_assert_predictor_equal
|
python
|
scikit-learn/scikit-learn
|
sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py
|
BSD-3-Clause
|
def _parse_values(s):
'''(INTERNAL) Split a line into a list of values'''
if not _RE_NONTRIVIAL_DATA.search(s):
# Fast path for trivial cases (unfortunately we have to handle missing
# values because of the empty string case :(.)
return [None if s in ('?', '') else s
for s in next(csv.reader([s]))]
# _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
if not any(errors):
return [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(s):
try:
return {int(k): _unquote(v)
for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
except ValueError:
# an ARFF syntax error in sparse data
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
else:
# an ARFF syntax error
for match in _RE_DENSE_VALUES.finditer(s):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
|
(INTERNAL) Split a line into a list of values
|
_parse_values
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def _decode_relation(self, s):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
'''
_, v = s.split(' ', 1)
v = v.strip()
if not _RE_RELATION.match(v):
raise BadRelationFormat()
res = str(v.strip('"\''))
return res
|
(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "
" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
|
_decode_relation
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def _decode_attribute(self, s):
'''(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
'''
_, v = s.split(' ', 1)
v = v.strip()
# Verify the general structure of declaration
m = _RE_ATTRIBUTE.match(v)
if not m:
raise BadAttributeFormat()
# Extracts the raw name and type
name, type_ = m.groups()
# Extracts the final name
name = str(name.strip('"\''))
# Extracts the final type
if type_[:1] == "{" and type_[-1:] == "}":
try:
type_ = _parse_values(type_.strip('{} '))
except Exception:
raise BadAttributeType()
if isinstance(type_, dict):
raise BadAttributeType()
else:
# If not nominal, verify the type name
type_ = str(type_).upper()
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
raise BadAttributeType()
return (name, type_)
|
(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "
" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
|
_decode_attribute
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def decode(self, s, encode_nominal=False, return_type=DENSE):
'''Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
'''
try:
return self._decode(s, encode_nominal=encode_nominal,
matrix_type=return_type)
except ArffException as e:
e.line = self._current_line
raise e
|
Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
|
decode
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def _encode_comment(self, s=''):
'''(INTERNAL) Encodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
If ``s`` is None, this method will simply return an empty comment.
:param s: (OPTIONAL) string.
:return: a string with the encoded comment line.
'''
if s:
return '%s %s'%(_TK_COMMENT, s)
else:
return '%s' % _TK_COMMENT
|
(INTERNAL) Encodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
If ``s`` is None, this method will simply return an empty comment.
:param s: (OPTIONAL) string.
:return: a string with the encoded comment line.
|
_encode_comment
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def _encode_relation(self, name):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string.
:param name: a string.
:return: a string with the encoded relation declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
return '%s %s'%(_TK_RELATION, name)
|
(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string.
:param name: a string.
:return: a string with the encoded relation declaration.
|
_encode_relation
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def _encode_attribute(self, name, type_):
'''(INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
This method must receive a the name of the attribute and its type, if
the attribute type is nominal, ``type`` must be a list of values.
:param name: a string.
:param type_: a string or a list of string.
:return: a string with the encoded attribute declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
if isinstance(type_, (tuple, list)):
type_tmp = ['%s' % encode_string(type_k) for type_k in type_]
type_ = '{%s}'%(', '.join(type_tmp))
return '%s %s %s'%(_TK_ATTRIBUTE, name, type_)
|
(INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
This method must receive a the name of the attribute and its type, if
the attribute type is nominal, ``type`` must be a list of values.
:param name: a string.
:param type_: a string or a list of string.
:return: a string with the encoded attribute declaration.
|
_encode_attribute
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def iter_encode(self, obj):
'''The iterative version of `arff.ArffEncoder.encode`.
This encodes iteratively a given object and return, one-by-one, the
lines of the ARFF file.
:param obj: the object containing the ARFF information.
:return: (yields) the ARFF file as strings.
'''
# DESCRIPTION
if obj.get('description', None):
for row in obj['description'].split('\n'):
yield self._encode_comment(row)
# RELATION
if not obj.get('relation'):
raise BadObject('Relation name not found or with invalid value.')
yield self._encode_relation(obj['relation'])
yield ''
# ATTRIBUTES
if not obj.get('attributes'):
raise BadObject('Attributes not found.')
attribute_names = set()
for attr in obj['attributes']:
# Verify for bad object format
if not isinstance(attr, (tuple, list)) or \
len(attr) != 2 or \
not isinstance(attr[0], str):
raise BadObject('Invalid attribute declaration "%s"'%str(attr))
if isinstance(attr[1], str):
# Verify for invalid types
if attr[1] not in _SIMPLE_TYPES:
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify for bad object format
elif not isinstance(attr[1], (tuple, list)):
raise BadObject('Invalid attribute type "%s"'%str(attr))
# Verify attribute name is not used twice
if attr[0] in attribute_names:
raise BadObject('Trying to use attribute name "%s" for the '
'second time.' % str(attr[0]))
else:
attribute_names.add(attr[0])
yield self._encode_attribute(attr[0], attr[1])
yield ''
attributes = obj['attributes']
# DATA
yield _TK_DATA
if 'data' in obj:
data = _get_data_object_for_encoding(obj.get('data'))
yield from data.encode_data(obj.get('data'), attributes)
yield ''
|
The iterative version of `arff.ArffEncoder.encode`.
This encodes iteratively a given object and return, one-by-one, the
lines of the ARFF file.
:param obj: the object containing the ARFF information.
:return: (yields) the ARFF file as strings.
|
iter_encode
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def load(fp, encode_nominal=False, return_type=DENSE):
'''Load a file-like object containing the ARFF document and convert it into
a Python object.
:param fp: a file-like object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(fp, encode_nominal=encode_nominal,
return_type=return_type)
|
Load a file-like object containing the ARFF document and convert it into
a Python object.
:param fp: a file-like object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
|
load
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def loads(s, encode_nominal=False, return_type=DENSE):
'''Convert a string instance containing the ARFF document into a Python
object.
:param s: a string object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
'''
decoder = ArffDecoder()
return decoder.decode(s, encode_nominal=encode_nominal,
return_type=return_type)
|
Convert a string instance containing the ARFF document into a Python
object.
:param s: a string object.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
:return: a dictionary.
|
loads
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def dump(obj, fp):
'''Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
'''
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + '\n')
last_row = row
fp.write(last_row)
return fp
|
Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object.
|
dump
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_arff.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_arff.py
|
BSD-3-Clause
|
def get_xp(xp: ModuleType) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
"""
Decorator to automatically replace xp with the corresponding array module.
Use like
import numpy as np
@get_xp(np)
def func(x, /, xp, kwarg=None):
return xp.func(x, kwarg=kwarg)
Note that xp must be a keyword argument and come after all non-keyword
arguments.
"""
def inner(f: Callable[..., _T], /) -> Callable[..., _T]:
@wraps(f)
def wrapped_f(*args: object, **kwargs: object) -> object:
return f(*args, xp=xp, **kwargs)
sig = signature(f)
new_sig = sig.replace(
parameters=[par for i, par in sig.parameters.items() if i != "xp"]
)
if wrapped_f.__doc__ is None:
wrapped_f.__doc__ = f"""\
Array API compatibility wrapper for {f.__name__}.
See the corresponding documentation in NumPy/CuPy and/or the array API
specification for more details.
"""
wrapped_f.__signature__ = new_sig # pyright: ignore[reportAttributeAccessIssue]
return wrapped_f # pyright: ignore[reportReturnType]
return inner
|
Decorator to automatically replace xp with the corresponding array module.
Use like
import numpy as np
@get_xp(np)
def func(x, /, xp, kwarg=None):
return xp.func(x, kwarg=kwarg)
Note that xp must be a keyword argument and come after all non-keyword
arguments.
|
get_xp
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/_internal.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/_internal.py
|
BSD-3-Clause
|
def isdtype(
dtype: DType,
kind: DType | str | tuple[DType | str, ...],
xp: Namespace,
*,
_tuple: bool = True, # Disallow nested tuples
) -> bool:
"""
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
"""
if isinstance(kind, tuple) and _tuple:
return any(
isdtype(dtype, k, xp, _tuple=False)
for k in cast("tuple[DType | str, ...]", kind)
)
elif isinstance(kind, str):
if kind == "bool":
return dtype == xp.bool_
elif kind == "signed integer":
return xp.issubdtype(dtype, xp.signedinteger)
elif kind == "unsigned integer":
return xp.issubdtype(dtype, xp.unsignedinteger)
elif kind == "integral":
return xp.issubdtype(dtype, xp.integer)
elif kind == "real floating":
return xp.issubdtype(dtype, xp.floating)
elif kind == "complex floating":
return xp.issubdtype(dtype, xp.complexfloating)
elif kind == "numeric":
return xp.issubdtype(dtype, xp.number)
else:
raise ValueError(f"Unrecognized data type kind: {kind!r}")
else:
# This will allow things that aren't required by the spec, like
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
# more strict here to match the type annotation? Note that the
# array_api_strict implementation will be very strict.
return dtype == kind
|
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
|
isdtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_aliases.py
|
BSD-3-Clause
|
def _is_jax_zero_gradient_array(x: object) -> TypeGuard[_ZeroGradientArray]:
"""Return True if `x` is a zero-gradient array.
These arrays are a design quirk of Jax that may one day be removed.
See https://github.com/google/jax/issues/20620.
"""
# Fast exit
try:
dtype = x.dtype # type: ignore[attr-defined]
except AttributeError:
return False
cls = cast(Hashable, type(dtype))
if not _issubclass_fast(cls, "numpy.dtypes", "VoidDType"):
return False
if "jax" not in sys.modules:
return False
import jax
# jax.float0 is a np.dtype([('float0', 'V')])
return dtype == jax.float0
|
Return True if `x` is a zero-gradient array.
These arrays are a design quirk of Jax that may one day be removed.
See https://github.com/google/jax/issues/20620.
|
_is_jax_zero_gradient_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_numpy_array(x: object) -> TypeGuard[npt.NDArray[Any]]:
"""
Return True if `x` is a NumPy array.
This function does not import NumPy if it has not already been imported
and is therefore cheap to use.
This also returns True for `ndarray` subclasses and NumPy scalar objects.
See Also
--------
array_namespace
is_array_api_obj
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
# TODO: Should we reject ndarray subclasses?
cls = cast(Hashable, type(x))
return (
_issubclass_fast(cls, "numpy", "ndarray")
or _issubclass_fast(cls, "numpy", "generic")
) and not _is_jax_zero_gradient_array(x)
|
Return True if `x` is a NumPy array.
This function does not import NumPy if it has not already been imported
and is therefore cheap to use.
This also returns True for `ndarray` subclasses and NumPy scalar objects.
See Also
--------
array_namespace
is_array_api_obj
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
|
is_numpy_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_cupy_array(x: object) -> bool:
"""
Return True if `x` is a CuPy array.
This function does not import CuPy if it has not already been imported
and is therefore cheap to use.
This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "cupy", "ndarray")
|
Return True if `x` is a CuPy array.
This function does not import CuPy if it has not already been imported
and is therefore cheap to use.
This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
|
is_cupy_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_torch_array(x: object) -> TypeIs[torch.Tensor]:
"""
Return True if `x` is a PyTorch tensor.
This function does not import PyTorch if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "torch", "Tensor")
|
Return True if `x` is a PyTorch tensor.
This function does not import PyTorch if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_dask_array
is_jax_array
is_pydata_sparse_array
|
is_torch_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_ndonnx_array(x: object) -> TypeIs[ndx.Array]:
"""
Return True if `x` is a ndonnx Array.
This function does not import ndonnx if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "ndonnx", "Array")
|
Return True if `x` is a ndonnx Array.
This function does not import ndonnx if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
|
is_ndonnx_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_dask_array(x: object) -> TypeIs[da.Array]:
"""
Return True if `x` is a dask.array Array.
This function does not import dask if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "dask.array", "Array")
|
Return True if `x` is a dask.array Array.
This function does not import dask if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_jax_array
is_pydata_sparse_array
|
is_dask_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_jax_array(x: object) -> TypeIs[jax.Array]:
"""
Return True if `x` is a JAX array.
This function does not import JAX if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "jax", "Array") or _is_jax_zero_gradient_array(x)
|
Return True if `x` is a JAX array.
This function does not import JAX if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_pydata_sparse_array
|
is_jax_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_pydata_sparse_array(x: object) -> TypeIs[sparse.SparseArray]:
"""
Return True if `x` is an array from the `sparse` package.
This function does not import `sparse` if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
"""
# TODO: Account for other backends.
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "sparse", "SparseArray")
|
Return True if `x` is an array from the `sparse` package.
This function does not import `sparse` if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
|
is_pydata_sparse_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_array_api_obj(x: object) -> TypeIs[_ArrayApiObj]: # pyright: ignore[reportUnknownParameterType]
"""
Return True if `x` is an array API compatible array object.
See Also
--------
array_namespace
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
"""
return (
hasattr(x, '__array_namespace__')
or _is_array_api_cls(cast(Hashable, type(x)))
)
|
Return True if `x` is an array API compatible array object.
See Also
--------
array_namespace
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
|
is_array_api_obj
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def array_namespace(
*xs: Array | complex | None,
api_version: str | None = None,
use_compat: bool | None = None,
) -> Namespace:
"""
Get the array API compatible namespace for the arrays `xs`.
Parameters
----------
xs: arrays
one or more arrays. xs can also be Python scalars (bool, int, float,
complex, or None), which are ignored.
api_version: str
The newest version of the spec that you need support for (currently
the compat library wrapped APIs support v2024.12).
use_compat: bool or None
If None (the default), the native namespace will be returned if it is
already array API compatible, otherwise a compat wrapper is used. If
True, the compat library wrapped library will be returned. If False,
the native library namespace is returned.
Returns
-------
out: namespace
The array API compatible namespace corresponding to the arrays in `xs`.
Raises
------
TypeError
If `xs` contains arrays from different array libraries or contains a
non-array.
Typical usage is to pass the arguments of a function to
`array_namespace()` at the top of a function to get the corresponding
array API namespace:
.. code:: python
def your_function(x, y):
xp = array_api_compat.array_namespace(x, y)
# Now use xp as the array library namespace
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
Wrapped array namespaces can also be imported directly. For example,
`array_namespace(np.array(...))` will return `array_api_compat.numpy`.
This function will also work for any array library not wrapped by
array-api-compat if it explicitly defines `__array_namespace__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html>`__
(the wrapped namespace is always preferred if it exists).
See Also
--------
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
if use_compat not in [None, True, False]:
raise ValueError("use_compat must be None, True, or False")
_use_compat = use_compat in [None, True]
namespaces: set[Namespace] = set()
for x in xs:
if is_numpy_array(x):
import numpy as np
from .. import numpy as numpy_namespace
if use_compat is True:
_check_api_version(api_version)
namespaces.add(numpy_namespace)
elif use_compat is False:
namespaces.add(np)
else:
# numpy 2.0+ have __array_namespace__, however, they are not yet fully array API
# compatible.
namespaces.add(numpy_namespace)
elif is_cupy_array(x):
if _use_compat:
_check_api_version(api_version)
from .. import cupy as cupy_namespace
namespaces.add(cupy_namespace)
else:
import cupy as cp # pyright: ignore[reportMissingTypeStubs]
namespaces.add(cp)
elif is_torch_array(x):
if _use_compat:
_check_api_version(api_version)
from .. import torch as torch_namespace
namespaces.add(torch_namespace)
else:
import torch
namespaces.add(torch)
elif is_dask_array(x):
if _use_compat:
_check_api_version(api_version)
from ..dask import array as dask_namespace
namespaces.add(dask_namespace)
else:
import dask.array as da
namespaces.add(da)
elif is_jax_array(x):
if use_compat is True:
_check_api_version(api_version)
raise ValueError("JAX does not have an array-api-compat wrapper")
elif use_compat is False:
import jax.numpy as jnp
else:
# JAX v0.4.32 and newer implements the array API directly in jax.numpy.
# For older JAX versions, it is available via jax.experimental.array_api.
import jax.numpy
if hasattr(jax.numpy, "__array_api_version__"):
jnp = jax.numpy
else:
import jax.experimental.array_api as jnp # pyright: ignore[reportMissingImports]
namespaces.add(jnp)
elif is_pydata_sparse_array(x):
if use_compat is True:
_check_api_version(api_version)
raise ValueError("`sparse` does not have an array-api-compat wrapper")
else:
import sparse # pyright: ignore[reportMissingTypeStubs]
# `sparse` is already an array namespace. We do not have a wrapper
# submodule for it.
namespaces.add(sparse)
elif hasattr(x, "__array_namespace__"):
if use_compat is True:
raise ValueError(
"The given array does not have an array-api-compat wrapper"
)
x = cast("SupportsArrayNamespace[Any]", x)
namespaces.add(x.__array_namespace__(api_version=api_version))
elif isinstance(x, (bool, int, float, complex, type(None))):
continue
else:
# TODO: Support Python scalars?
raise TypeError(f"{type(x).__name__} is not a supported array type")
if not namespaces:
raise TypeError("Unrecognized array input")
if len(namespaces) != 1:
raise TypeError(f"Multiple namespaces for array inputs: {namespaces}")
(xp,) = namespaces
return xp
|
Get the array API compatible namespace for the arrays `xs`.
Parameters
----------
xs: arrays
one or more arrays. xs can also be Python scalars (bool, int, float,
complex, or None), which are ignored.
api_version: str
The newest version of the spec that you need support for (currently
the compat library wrapped APIs support v2024.12).
use_compat: bool or None
If None (the default), the native namespace will be returned if it is
already array API compatible, otherwise a compat wrapper is used. If
True, the compat library wrapped library will be returned. If False,
the native library namespace is returned.
Returns
-------
out: namespace
The array API compatible namespace corresponding to the arrays in `xs`.
Raises
------
TypeError
If `xs` contains arrays from different array libraries or contains a
non-array.
Typical usage is to pass the arguments of a function to
`array_namespace()` at the top of a function to get the corresponding
array API namespace:
.. code:: python
def your_function(x, y):
xp = array_api_compat.array_namespace(x, y)
# Now use xp as the array library namespace
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
Wrapped array namespaces can also be imported directly. For example,
`array_namespace(np.array(...))` will return `array_api_compat.numpy`.
This function will also work for any array library not wrapped by
array-api-compat if it explicitly defines `__array_namespace__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html>`__
(the wrapped namespace is always preferred if it exists).
See Also
--------
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_dask_array
is_jax_array
is_pydata_sparse_array
|
array_namespace
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def _check_device(bare_xp: Namespace, device: Device) -> None: # pyright: ignore[reportUnusedFunction]
"""
Validate dummy device on device-less array backends.
Notes
-----
This function is also invoked by CuPy, which does have multiple devices
if there are multiple GPUs available.
However, CuPy multi-device support is currently impossible
without using the global device or a context manager:
https://github.com/data-apis/array-api-compat/pull/293
"""
if bare_xp is sys.modules.get("numpy"):
if device not in ("cpu", None):
raise ValueError(f"Unsupported device for NumPy: {device!r}")
elif bare_xp is sys.modules.get("dask.array"):
if device not in ("cpu", _DASK_DEVICE, None):
raise ValueError(f"Unsupported device for Dask: {device!r}")
|
Validate dummy device on device-less array backends.
Notes
-----
This function is also invoked by CuPy, which does have multiple devices
if there are multiple GPUs available.
However, CuPy multi-device support is currently impossible
without using the global device or a context manager:
https://github.com/data-apis/array-api-compat/pull/293
|
_check_device
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def device(x: _ArrayApiObj, /) -> Device:
"""
Hardware device the array data resides on.
This is equivalent to `x.device` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
This helper is included because some array libraries either do not have
the `device` attribute or include it with an incompatible API.
Parameters
----------
x: array
array instance from an array API compatible library.
Returns
-------
out: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
Notes
-----
For NumPy the device is always `"cpu"`. For Dask, the device is always a
special `DASK_DEVICE` object.
See Also
--------
to_device : Move array data to a different device.
"""
if is_numpy_array(x):
return "cpu"
elif is_dask_array(x):
# Peek at the metadata of the Dask array to determine type
if is_numpy_array(x._meta): # pyright: ignore
# Must be on CPU since backed by numpy
return "cpu"
return _DASK_DEVICE
elif is_jax_array(x):
# FIXME Jitted JAX arrays do not have a device attribute
# https://github.com/jax-ml/jax/issues/26000
# Return None in this case. Note that this workaround breaks
# the standard and will result in new arrays being created on the
# default device instead of the same device as the input array(s).
x_device = getattr(x, "device", None)
# Older JAX releases had .device() as a method, which has been replaced
# with a property in accordance with the standard.
if inspect.ismethod(x_device):
return x_device()
else:
return x_device
elif is_pydata_sparse_array(x):
# `sparse` will gain `.device`, so check for this first.
x_device = getattr(x, "device", None)
if x_device is not None:
return x_device
# Everything but DOK has this attr.
try:
inner = x.data # pyright: ignore
except AttributeError:
return "cpu"
# Return the device of the constituent array
return device(inner) # pyright: ignore
return x.device # pyright: ignore
|
Hardware device the array data resides on.
This is equivalent to `x.device` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
This helper is included because some array libraries either do not have
the `device` attribute or include it with an incompatible API.
Parameters
----------
x: array
array instance from an array API compatible library.
Returns
-------
out: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
Notes
-----
For NumPy the device is always `"cpu"`. For Dask, the device is always a
special `DASK_DEVICE` object.
See Also
--------
to_device : Move array data to a different device.
|
device
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def to_device(x: Array, device: Device, /, *, stream: int | Any | None = None) -> Array:
"""
Copy the array from the device on which it currently resides to the specified ``device``.
This is equivalent to `x.to_device(device, stream=stream)` according to
the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
This helper is included because some array libraries do not have the
`to_device` method.
Parameters
----------
x: array
array instance from an array API compatible library.
device: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
stream: int | Any | None
stream object to use during copy. In addition to the types supported
in ``array.__dlpack__``, implementations may choose to support any
library-specific stream object with the caveat that any code using
such an object would not be portable.
Returns
-------
out: array
an array with the same data and data type as ``x`` and located on the
specified ``device``.
Notes
-----
For NumPy, this function effectively does nothing since the only supported
device is the CPU. For CuPy, this method supports CuPy CUDA
:external+cupy:class:`Device <cupy.cuda.Device>` and
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
(the ``stream`` argument is not supported in PyTorch).
See Also
--------
device : Hardware device the array data resides on.
"""
if is_numpy_array(x):
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
if device == "cpu":
return x
raise ValueError(f"Unsupported device {device!r}")
elif is_cupy_array(x):
# cupy does not yet have to_device
return _cupy_to_device(x, device, stream=stream)
elif is_torch_array(x):
return _torch_to_device(x, device, stream=stream) # pyright: ignore[reportArgumentType]
elif is_dask_array(x):
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
# TODO: What if our array is on the GPU already?
if device == "cpu":
return x
raise ValueError(f"Unsupported device {device!r}")
elif is_jax_array(x):
if not hasattr(x, "__array_namespace__"):
# In JAX v0.4.31 and older, this import adds to_device method to x...
import jax.experimental.array_api # noqa: F401 # pyright: ignore
# ... but only on eager JAX. It won't work inside jax.jit.
if not hasattr(x, "to_device"):
return x
return x.to_device(device, stream=stream)
elif is_pydata_sparse_array(x) and device == _device(x):
# Perform trivial check to return the same array if
# device is same instead of err-ing.
return x
return x.to_device(device, stream=stream) # pyright: ignore
|
Copy the array from the device on which it currently resides to the specified ``device``.
This is equivalent to `x.to_device(device, stream=stream)` according to
the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
This helper is included because some array libraries do not have the
`to_device` method.
Parameters
----------
x: array
array instance from an array API compatible library.
device: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
stream: int | Any | None
stream object to use during copy. In addition to the types supported
in ``array.__dlpack__``, implementations may choose to support any
library-specific stream object with the caveat that any code using
such an object would not be portable.
Returns
-------
out: array
an array with the same data and data type as ``x`` and located on the
specified ``device``.
Notes
-----
For NumPy, this function effectively does nothing since the only supported
device is the CPU. For CuPy, this method supports CuPy CUDA
:external+cupy:class:`Device <cupy.cuda.Device>` and
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
(the ``stream`` argument is not supported in PyTorch).
See Also
--------
device : Hardware device the array data resides on.
|
to_device
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def size(x: HasShape[Collection[SupportsIndex | None]]) -> int | None:
"""
Return the total number of elements of x.
This is equivalent to `x.size` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html>`__.
This helper is included because PyTorch defines `size` in an
:external+torch:meth:`incompatible way <torch.Tensor.size>`.
It also fixes dask.array's behaviour which returns nan for unknown sizes, whereas
the standard requires None.
"""
# Lazy API compliant arrays, such as ndonnx, can contain None in their shape
if None in x.shape:
return None
out = math.prod(cast("Collection[SupportsIndex]", x.shape))
# dask.array.Array.shape can contain NaN
return None if math.isnan(out) else out
|
Return the total number of elements of x.
This is equivalent to `x.size` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html>`__.
This helper is included because PyTorch defines `size` in an
:external+torch:meth:`incompatible way <torch.Tensor.size>`.
It also fixes dask.array's behaviour which returns nan for unknown sizes, whereas
the standard requires None.
|
size
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_writeable_array(x: object) -> bool:
"""
Return False if ``x.__setitem__`` is expected to raise; True otherwise.
Return False if `x` is not an array API compatible object.
Warning
-------
As there is no standard way to check if an array is writeable without actually
writing to it, this function blindly returns True for all unknown array types.
"""
cls = cast(Hashable, type(x))
if _issubclass_fast(cls, "numpy", "ndarray"):
return cast("npt.NDArray", x).flags.writeable
res = _is_writeable_cls(cls)
if res is not None:
return res
return hasattr(x, '__array_namespace__')
|
Return False if ``x.__setitem__`` is expected to raise; True otherwise.
Return False if `x` is not an array API compatible object.
Warning
-------
As there is no standard way to check if an array is writeable without actually
writing to it, this function blindly returns True for all unknown array types.
|
is_writeable_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def is_lazy_array(x: object) -> bool:
"""Return True if x is potentially a future or it may be otherwise impossible or
expensive to eagerly read its contents, regardless of their size, e.g. by
calling ``bool(x)`` or ``float(x)``.
Return False otherwise; e.g. ``bool(x)`` etc. is guaranteed to succeed and to be
cheap as long as the array has the right dtype and size.
Note
----
This function errs on the side of caution for array types that may or may not be
lazy, e.g. JAX arrays, by always returning True for them.
"""
# **JAX note:** while it is possible to determine if you're inside or outside
# jax.jit by testing the subclass of a jax.Array object, as well as testing bool()
# as we do below for unknown arrays, this is not recommended by JAX best practices.
# **Dask note:** Dask eagerly computes the graph on __bool__, __float__, and so on.
# This behaviour, while impossible to change without breaking backwards
# compatibility, is highly detrimental to performance as the whole graph will end
# up being computed multiple times.
# Note: skipping reclassification of JAX zero gradient arrays, as one will
# exclusively get them once they leave a jax.grad JIT context.
cls = cast(Hashable, type(x))
res = _is_lazy_cls(cls)
if res is not None:
return res
if not hasattr(x, "__array_namespace__"):
return False
# Unknown Array API compatible object. Note that this test may have dire consequences
# in terms of performance, e.g. for a lazy object that eagerly computes the graph
# on __bool__ (dask is one such example, which however is special-cased above).
# Select a single point of the array
s = size(cast("HasShape[Collection[SupportsIndex | None]]", x))
if s is None:
return True
xp = array_namespace(x)
if s > 1:
x = xp.reshape(x, (-1,))[0]
# Cast to dtype=bool and deal with size 0 arrays
x = xp.any(x)
try:
bool(x)
return False
# The Array API standard dictactes that __bool__ should raise TypeError if the
# output cannot be defined.
# Here we allow for it to raise arbitrary exceptions, e.g. like Dask does.
except Exception:
return True
|
Return True if x is potentially a future or it may be otherwise impossible or
expensive to eagerly read its contents, regardless of their size, e.g. by
calling ``bool(x)`` or ``float(x)``.
Return False otherwise; e.g. ``bool(x)`` etc. is guaranteed to succeed and to be
cheap as long as the array has the right dtype and size.
Note
----
This function errs on the side of caution for array types that may or may not be
lazy, e.g. JAX arrays, by always returning True for them.
|
is_lazy_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/common/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/common/_helpers.py
|
BSD-3-Clause
|
def asarray(
obj: (
Array
| bool | int | float | complex
| NestedSequence[bool | int | float | complex]
| SupportsBufferProtocol
),
/,
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
copy: Optional[bool] = None,
**kwargs,
) -> Array:
"""
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
with cp.cuda.Device(device):
if copy is None:
return cp.asarray(obj, dtype=dtype, **kwargs)
else:
res = cp.array(obj, dtype=dtype, copy=copy, **kwargs)
if not copy and res is not obj:
raise ValueError("Unable to avoid copy while creating an array as requested")
return res
|
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
asarray
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/cupy/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/cupy/_aliases.py
|
BSD-3-Clause
|
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for CuPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
CuPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
|
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for CuPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
CuPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
|
capabilities
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/cupy/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/cupy/_info.py
|
BSD-3-Clause
|
def default_dtypes(self, *, device=None):
"""
The default data types used for new CuPy arrays.
For CuPy, this always returns the following dictionary:
- **"real floating"**: ``cupy.float64``
- **"complex floating"**: ``cupy.complex128``
- **"integral"**: ``cupy.intp``
- **"indexing"**: ``cupy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new CuPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
"""
# TODO: Does this depend on device?
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
|
The default data types used for new CuPy arrays.
For CuPy, this always returns the following dictionary:
- **"real floating"**: ``cupy.float64``
- **"complex floating"**: ``cupy.complex128``
- **"integral"**: ``cupy.intp``
- **"indexing"**: ``cupy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new CuPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
|
default_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/cupy/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/cupy/_info.py
|
BSD-3-Clause
|
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by CuPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
CuPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': cupy.int8,
'int16': cupy.int16,
'int32': cupy.int32,
'int64': cupy.int64}
"""
# TODO: Does this depend on device?
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
|
The array API data types supported by CuPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
CuPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': cupy.int8,
'int16': cupy.int16,
'int32': cupy.int32,
'int64': cupy.int64}
|
dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/cupy/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/cupy/_info.py
|
BSD-3-Clause
|
def astype(
x: Array,
dtype: DType,
/,
*,
copy: py_bool = True,
device: Device | None = None,
) -> Array:
"""
Array API compatibility wrapper for astype().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
# TODO: respect device keyword?
_helpers._check_device(da, device)
if not copy and dtype == x.dtype:
return x
x = x.astype(dtype)
return x.copy() if copy else x
|
Array API compatibility wrapper for astype().
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
astype
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def arange(
start: float,
/,
stop: float | None = None,
step: float = 1,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
"""
Array API compatibility wrapper for arange().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
# TODO: respect device keyword?
_helpers._check_device(da, device)
args: list[Any] = [start]
if stop is not None:
args.append(stop)
else:
# stop is None, so start is actually stop
# prepend the default value for start which is 0
args.insert(0, 0)
args.append(step)
return da.arange(*args, dtype=dtype, **kwargs)
|
Array API compatibility wrapper for arange().
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
arange
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def asarray(
obj: complex | NestedSequence[complex] | Array | SupportsBufferProtocol,
/,
*,
dtype: DType | None = None,
device: Device | None = None,
copy: py_bool | None = None,
**kwargs: object,
) -> Array:
"""
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
# TODO: respect device keyword?
_helpers._check_device(da, device)
if isinstance(obj, da.Array):
if dtype is not None and dtype != obj.dtype:
if copy is False:
raise ValueError("Unable to avoid copy when changing dtype")
obj = obj.astype(dtype)
return obj.copy() if copy else obj # pyright: ignore[reportAttributeAccessIssue]
if copy is False:
raise ValueError(
"Unable to avoid copy when converting a non-dask object to dask"
)
# copy=None to be uniform across dask < 2024.12 and >= 2024.12
# see https://github.com/dask/dask/pull/11524/
obj = np.array(obj, dtype=dtype, copy=True)
return da.from_array(obj)
|
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
asarray
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def clip(
x: Array,
/,
min: float | Array | None = None,
max: float | Array | None = None,
) -> Array:
"""
Array API compatibility wrapper for clip().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
def _isscalar(a: float | Array | None, /) -> TypeIs[float | None]:
return a is None or isinstance(a, (int, float))
min_shape = () if _isscalar(min) else min.shape
max_shape = () if _isscalar(max) else max.shape
# TODO: This won't handle dask unknown shapes
result_shape = np.broadcast_shapes(x.shape, min_shape, max_shape)
if min is not None:
min = da.broadcast_to(da.asarray(min), result_shape)
if max is not None:
max = da.broadcast_to(da.asarray(max), result_shape)
if min is None and max is None:
return da.positive(x)
if min is None:
return astype(da.minimum(x, max), x.dtype)
if max is None:
return astype(da.maximum(x, min), x.dtype)
return astype(da.minimum(da.maximum(x, min), max), x.dtype)
|
Array API compatibility wrapper for clip().
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
clip
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def _ensure_single_chunk(x: Array, axis: int) -> tuple[Array, Callable[[Array], Array]]:
"""
Make sure that Array is not broken into multiple chunks along axis.
Returns
-------
x : Array
The input Array with a single chunk along axis.
restore : Callable[Array, Array]
function to apply to the output to rechunk it back into reasonable chunks
"""
if axis < 0:
axis += x.ndim
if x.numblocks[axis] < 2:
return x, lambda x: x
# Break chunks on other axes in an attempt to keep chunk size low
x = x.rechunk({i: -1 if i == axis else "auto" for i in range(x.ndim)})
# Rather than reconstructing the original chunks, which can be a
# very expensive affair, just break down oversized chunks without
# incurring in any transfers over the network.
# This has the downside of a risk of overchunking if the array is
# then used in operations against other arrays that match the
# original chunking pattern.
return x, lambda x: x.rechunk()
|
Make sure that Array is not broken into multiple chunks along axis.
Returns
-------
x : Array
The input Array with a single chunk along axis.
restore : Callable[Array, Array]
function to apply to the output to rechunk it back into reasonable chunks
|
_ensure_single_chunk
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def sort(
x: Array,
/,
*,
axis: int = -1,
descending: py_bool = False,
stable: py_bool = True,
) -> Array:
"""
Array API compatibility layer around the lack of sort() in Dask.
Warnings
--------
This function temporarily rechunks the array along `axis` to a single chunk.
This can be extremely inefficient and can lead to out-of-memory errors.
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
x, restore = _ensure_single_chunk(x, axis)
meta_xp = array_namespace(x._meta)
x = da.map_blocks(
meta_xp.sort,
x,
axis=axis,
meta=x._meta,
dtype=x.dtype,
descending=descending,
stable=stable,
)
return restore(x)
|
Array API compatibility layer around the lack of sort() in Dask.
Warnings
--------
This function temporarily rechunks the array along `axis` to a single chunk.
This can be extremely inefficient and can lead to out-of-memory errors.
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
sort
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def argsort(
x: Array,
/,
*,
axis: int = -1,
descending: py_bool = False,
stable: py_bool = True,
) -> Array:
"""
Array API compatibility layer around the lack of argsort() in Dask.
See the corresponding documentation in the array library and/or the array API
specification for more details.
Warnings
--------
This function temporarily rechunks the array along `axis` into a single chunk.
This can be extremely inefficient and can lead to out-of-memory errors.
"""
x, restore = _ensure_single_chunk(x, axis)
meta_xp = array_namespace(x._meta)
dtype = meta_xp.argsort(x._meta).dtype
meta = meta_xp.astype(x._meta, dtype)
x = da.map_blocks(
meta_xp.argsort,
x,
axis=axis,
meta=meta,
dtype=dtype,
descending=descending,
stable=stable,
)
return restore(x)
|
Array API compatibility layer around the lack of argsort() in Dask.
See the corresponding documentation in the array library and/or the array API
specification for more details.
Warnings
--------
This function temporarily rechunks the array along `axis` into a single chunk.
This can be extremely inefficient and can lead to out-of-memory errors.
|
argsort
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_aliases.py
|
BSD-3-Clause
|
def capabilities(self) -> Capabilities:
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing.
Dask support boolean indexing as long as both the index
and the indexed arrays have known shapes.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes.
Dask implements unique_values et.al.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"max dimensions"**: integer indicating the maximum number of
dimensions supported by the array library.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
|
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing.
Dask support boolean indexing as long as both the index
and the indexed arrays have known shapes.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes.
Dask implements unique_values et.al.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"max dimensions"**: integer indicating the maximum number of
dimensions supported by the array library.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
|
capabilities
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_info.py
|
BSD-3-Clause
|
def default_dtypes(self, /, *, device: _Device | None = None) -> DefaultDTypes:
"""
The default data types used for new Dask arrays.
For Dask, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new Dask
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': dask.float64,
'complex floating': dask.complex128,
'integral': dask.int64,
'indexing': dask.int64}
"""
if device not in ["cpu", _DASK_DEVICE, None]:
raise ValueError(
f'Device not understood. Only "cpu" or _DASK_DEVICE is allowed, '
f"but received: {device!r}"
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
|
The default data types used for new Dask arrays.
For Dask, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new Dask
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': dask.float64,
'complex floating': dask.complex128,
'integral': dask.int64,
'indexing': dask.int64}
|
default_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_info.py
|
BSD-3-Clause
|
def dtypes(
self, /, *, device: _Device | None = None, kind: DTypeKind | None = None
) -> DTypesAny:
"""
The array API data types supported by Dask.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
Dask data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': dask.int8,
'int16': dask.int16,
'int32': dask.int32,
'int64': dask.int64}
"""
if device not in ["cpu", _DASK_DEVICE, None]:
raise ValueError(
'Device not understood. Only "cpu" or _DASK_DEVICE is allowed, but received:'
f" {device}"
)
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple): # type: ignore[reportUnnecessaryIsinstanceCall]
res: dict[str, DType] = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
|
The array API data types supported by Dask.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
Dask data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': dask.int8,
'int16': dask.int16,
'int32': dask.int32,
'int64': dask.int64}
|
dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/dask/array/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/dask/array/_info.py
|
BSD-3-Clause
|
def asarray(
obj: Array | complex | NestedSequence[complex] | SupportsBufferProtocol,
/,
*,
dtype: DType | None = None,
device: Device | None = None,
copy: _Copy | None = None,
**kwargs: Any,
) -> Array:
"""
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
_helpers._check_device(np, device)
if copy is None:
copy = np._CopyMode.IF_NEEDED
elif copy is False:
copy = np._CopyMode.NEVER
elif copy is True:
copy = np._CopyMode.ALWAYS
return np.array(obj, copy=copy, dtype=dtype, **kwargs) # pyright: ignore
|
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
|
asarray
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/numpy/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/numpy/_aliases.py
|
BSD-3-Clause
|
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for NumPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
NumPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
|
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for NumPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
NumPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
|
capabilities
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/numpy/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/numpy/_info.py
|
BSD-3-Clause
|
def default_dtypes(
self,
*,
device: Device | None = None,
) -> dict[str, dtype[intp | float64 | complex128]]:
"""
The default data types used for new NumPy arrays.
For NumPy, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for. For NumPy, only
``'cpu'`` is allowed.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new NumPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
|
The default data types used for new NumPy arrays.
For NumPy, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for. For NumPy, only
``'cpu'`` is allowed.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new NumPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
|
default_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/numpy/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/numpy/_info.py
|
BSD-3-Clause
|
def dtypes(
self,
*,
device: Device | None = None,
kind: str | tuple[str, ...] | None = None,
) -> dict[str, DType]:
"""
The array API data types supported by NumPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for. For NumPy, only ``'cpu'`` is
allowed.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
NumPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": dtype(bool)}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res: dict[str, DType] = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
|
The array API data types supported by NumPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for. For NumPy, only ``'cpu'`` is
allowed.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
NumPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
|
dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/numpy/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/numpy/_info.py
|
BSD-3-Clause
|
def _sum_prod_no_axis(x: Array, dtype: DType | None) -> Array:
"""
Implements `sum(..., axis=())` and `prod(..., axis=())`.
Works around https://github.com/pytorch/pytorch/issues/29137
"""
if dtype is not None:
return x.clone() if dtype == x.dtype else x.to(dtype)
# We can't upcast uint8 according to the spec because there is no
# torch.uint64, so at least upcast to int64 which is what prod does
# when axis=None.
if x.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32):
return x.to(torch.int64)
return x.clone()
|
Implements `sum(..., axis=())` and `prod(..., axis=())`.
Works around https://github.com/pytorch/pytorch/issues/29137
|
_sum_prod_no_axis
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/torch/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/torch/_aliases.py
|
BSD-3-Clause
|
def isdtype(
dtype: DType, kind: Union[DType, str, Tuple[Union[DType, str], ...]],
*, _tuple=True, # Disallow nested tuples
) -> bool:
"""
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
"""
if isinstance(kind, tuple) and _tuple:
return _builtin_any(isdtype(dtype, k, _tuple=False) for k in kind)
elif isinstance(kind, str):
if kind == 'bool':
return dtype == torch.bool
elif kind == 'signed integer':
return dtype in _int_dtypes and dtype.is_signed
elif kind == 'unsigned integer':
return dtype in _int_dtypes and not dtype.is_signed
elif kind == 'integral':
return dtype in _int_dtypes
elif kind == 'real floating':
return dtype.is_floating_point
elif kind == 'complex floating':
return dtype.is_complex
elif kind == 'numeric':
return isdtype(dtype, ('integral', 'real floating', 'complex floating'))
else:
raise ValueError(f"Unrecognized data type kind: {kind!r}")
else:
return dtype == kind
|
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
|
isdtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/torch/_aliases.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/torch/_aliases.py
|
BSD-3-Clause
|
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for PyTorch.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
PyTorch.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
|
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for PyTorch.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
PyTorch.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
|
capabilities
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/torch/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/torch/_info.py
|
BSD-3-Clause
|
def default_dtypes(self, *, device=None):
"""
The default data types used for new PyTorch arrays.
Parameters
----------
device : Device, optional
The device to get the default data types for.
Unused for PyTorch, as all devices use the same default dtypes.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new PyTorch
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': torch.float32,
'complex floating': torch.complex64,
'integral': torch.int64,
'indexing': torch.int64}
"""
# Note: if the default is set to float64, the devices like MPS that
# don't support float64 will error. We still return the default_dtype
# value here because this error doesn't represent a different default
# per-device.
default_floating = torch.get_default_dtype()
default_complex = torch.complex64 if default_floating == torch.float32 else torch.complex128
default_integral = torch.int64
return {
"real floating": default_floating,
"complex floating": default_complex,
"integral": default_integral,
"indexing": default_integral,
}
|
The default data types used for new PyTorch arrays.
Parameters
----------
device : Device, optional
The device to get the default data types for.
Unused for PyTorch, as all devices use the same default dtypes.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new PyTorch
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': torch.float32,
'complex floating': torch.complex64,
'integral': torch.int64,
'indexing': torch.int64}
|
default_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/torch/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/torch/_info.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.