code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def score_estimator(estimator, df_test):
"""Score an estimator on the test set."""
y_pred = estimator.predict(df_test)
print(
"MSE: %.3f"
% mean_squared_error(
df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"]
)
)
print(
"MAE: %.3f"
% mean_absolute_error(
df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"]
)
)
# Ignore non-positive predictions, as they are invalid for
# the Poisson deviance.
mask = y_pred > 0
if (~mask).any():
n_masked, n_samples = (~mask).sum(), mask.shape[0]
print(
"WARNING: Estimator yields invalid, non-positive predictions "
f" for {n_masked} samples out of {n_samples}. These predictions "
"are ignored when computing the Poisson deviance."
)
print(
"mean Poisson deviance: %.3f"
% mean_poisson_deviance(
df_test["Frequency"][mask],
y_pred[mask],
sample_weight=df_test["Exposure"][mask],
)
)
|
Score an estimator on the test set.
|
score_estimator
|
python
|
scikit-learn/scikit-learn
|
examples/linear_model/plot_poisson_regression_non_normal_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/linear_model/plot_poisson_regression_non_normal_loss.py
|
BSD-3-Clause
|
def _mean_frequency_by_risk_group(y_true, y_pred, sample_weight=None, n_bins=100):
"""Compare predictions and observations for bins ordered by y_pred.
We order the samples by ``y_pred`` and split it in bins.
In each bin the observed mean is compared with the predicted mean.
Parameters
----------
y_true: array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred: array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,)
Sample weights.
n_bins: int
Number of bins to use.
Returns
-------
bin_centers: ndarray of shape (n_bins,)
bin centers
y_true_bin: ndarray of shape (n_bins,)
average y_pred for each bin
y_pred_bin: ndarray of shape (n_bins,)
average y_pred for each bin
"""
idx_sort = np.argsort(y_pred)
bin_centers = np.arange(0, 1, 1 / n_bins) + 0.5 / n_bins
y_pred_bin = np.zeros(n_bins)
y_true_bin = np.zeros(n_bins)
for n, sl in enumerate(gen_even_slices(len(y_true), n_bins)):
weights = sample_weight[idx_sort][sl]
y_pred_bin[n] = np.average(y_pred[idx_sort][sl], weights=weights)
y_true_bin[n] = np.average(y_true[idx_sort][sl], weights=weights)
return bin_centers, y_true_bin, y_pred_bin
|
Compare predictions and observations for bins ordered by y_pred.
We order the samples by ``y_pred`` and split it in bins.
In each bin the observed mean is compared with the predicted mean.
Parameters
----------
y_true: array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred: array-like of shape (n_samples,)
Estimated target values.
sample_weight : array-like of shape (n_samples,)
Sample weights.
n_bins: int
Number of bins to use.
Returns
-------
bin_centers: ndarray of shape (n_bins,)
bin centers
y_true_bin: ndarray of shape (n_bins,)
average y_pred for each bin
y_pred_bin: ndarray of shape (n_bins,)
average y_pred for each bin
|
_mean_frequency_by_risk_group
|
python
|
scikit-learn/scikit-learn
|
examples/linear_model/plot_poisson_regression_non_normal_loss.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/linear_model/plot_poisson_regression_non_normal_loss.py
|
BSD-3-Clause
|
def load_mnist(n_samples=None, class_0="0", class_1="8"):
"""Load MNIST, select two classes, shuffle and return only n_samples."""
# Load data from http://openml.org/d/554
mnist = fetch_openml("mnist_784", version=1, as_frame=False)
# take only two classes for binary classification
mask = np.logical_or(mnist.target == class_0, mnist.target == class_1)
X, y = shuffle(mnist.data[mask], mnist.target[mask], random_state=42)
if n_samples is not None:
X, y = X[:n_samples], y[:n_samples]
return X, y
|
Load MNIST, select two classes, shuffle and return only n_samples.
|
load_mnist
|
python
|
scikit-learn/scikit-learn
|
examples/linear_model/plot_sgd_early_stopping.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/linear_model/plot_sgd_early_stopping.py
|
BSD-3-Clause
|
def fit_and_score(estimator, max_iter, X_train, X_test, y_train, y_test):
"""Fit the estimator on the train set and score it on both sets"""
estimator.set_params(max_iter=max_iter)
estimator.set_params(random_state=0)
start = time.time()
estimator.fit(X_train, y_train)
fit_time = time.time() - start
n_iter = estimator.n_iter_
train_score = estimator.score(X_train, y_train)
test_score = estimator.score(X_test, y_test)
return fit_time, n_iter, train_score, test_score
|
Fit the estimator on the train set and score it on both sets
|
fit_and_score
|
python
|
scikit-learn/scikit-learn
|
examples/linear_model/plot_sgd_early_stopping.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/linear_model/plot_sgd_early_stopping.py
|
BSD-3-Clause
|
def load_mtpl2(n_samples=None):
"""Fetch the French Motor Third-Party Liability Claims dataset.
Parameters
----------
n_samples: int, default=None
number of samples to select (for faster run time). Full dataset has
678013 samples.
"""
# freMTPL2freq dataset from https://www.openml.org/d/41214
df_freq = fetch_openml(data_id=41214, as_frame=True).data
df_freq["IDpol"] = df_freq["IDpol"].astype(int)
df_freq.set_index("IDpol", inplace=True)
# freMTPL2sev dataset from https://www.openml.org/d/41215
df_sev = fetch_openml(data_id=41215, as_frame=True).data
# sum ClaimAmount over identical IDs
df_sev = df_sev.groupby("IDpol").sum()
df = df_freq.join(df_sev, how="left")
df["ClaimAmount"] = df["ClaimAmount"].fillna(0)
# unquote string fields
for column_name in df.columns[[t is object for t in df.dtypes.values]]:
df[column_name] = df[column_name].str.strip("'")
return df.iloc[:n_samples]
|
Fetch the French Motor Third-Party Liability Claims dataset.
Parameters
----------
n_samples: int, default=None
number of samples to select (for faster run time). Full dataset has
678013 samples.
|
load_mtpl2
|
python
|
scikit-learn/scikit-learn
|
examples/linear_model/plot_tweedie_regression_insurance_claims.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/linear_model/plot_tweedie_regression_insurance_claims.py
|
BSD-3-Clause
|
def plot_obs_pred(
df,
feature,
weight,
observed,
predicted,
y_label=None,
title=None,
ax=None,
fill_legend=False,
):
"""Plot observed and predicted - aggregated per feature level.
Parameters
----------
df : DataFrame
input data
feature: str
a column name of df for the feature to be plotted
weight : str
column name of df with the values of weights or exposure
observed : str
a column name of df with the observed target
predicted : DataFrame
a dataframe, with the same index as df, with the predicted target
fill_legend : bool, default=False
whether to show fill_between legend
"""
# aggregate observed and predicted variables by feature level
df_ = df.loc[:, [feature, weight]].copy()
df_["observed"] = df[observed] * df[weight]
df_["predicted"] = predicted * df[weight]
df_ = (
df_.groupby([feature])[[weight, "observed", "predicted"]]
.sum()
.assign(observed=lambda x: x["observed"] / x[weight])
.assign(predicted=lambda x: x["predicted"] / x[weight])
)
ax = df_.loc[:, ["observed", "predicted"]].plot(style=".", ax=ax)
y_max = df_.loc[:, ["observed", "predicted"]].values.max() * 0.8
p2 = ax.fill_between(
df_.index,
0,
y_max * df_[weight] / df_[weight].values.max(),
color="g",
alpha=0.1,
)
if fill_legend:
ax.legend([p2], ["{} distribution".format(feature)])
ax.set(
ylabel=y_label if y_label is not None else None,
title=title if title is not None else "Train: Observed vs Predicted",
)
|
Plot observed and predicted - aggregated per feature level.
Parameters
----------
df : DataFrame
input data
feature: str
a column name of df for the feature to be plotted
weight : str
column name of df with the values of weights or exposure
observed : str
a column name of df with the observed target
predicted : DataFrame
a dataframe, with the same index as df, with the predicted target
fill_legend : bool, default=False
whether to show fill_between legend
|
plot_obs_pred
|
python
|
scikit-learn/scikit-learn
|
examples/linear_model/plot_tweedie_regression_insurance_claims.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/linear_model/plot_tweedie_regression_insurance_claims.py
|
BSD-3-Clause
|
def make_estimator(name, categorical_columns=None, iforest_kw=None, lof_kw=None):
"""Create an outlier detection estimator based on its name."""
if name == "LOF":
outlier_detector = LocalOutlierFactor(**(lof_kw or {}))
if categorical_columns is None:
preprocessor = RobustScaler()
else:
preprocessor = ColumnTransformer(
transformers=[("categorical", OneHotEncoder(), categorical_columns)],
remainder=RobustScaler(),
)
else: # name == "IForest"
outlier_detector = IsolationForest(**(iforest_kw or {}))
if categorical_columns is None:
preprocessor = None
else:
ordinal_encoder = OrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=-1
)
preprocessor = ColumnTransformer(
transformers=[
("categorical", ordinal_encoder, categorical_columns),
],
remainder="passthrough",
)
return make_pipeline(preprocessor, outlier_detector)
|
Create an outlier detection estimator based on its name.
|
make_estimator
|
python
|
scikit-learn/scikit-learn
|
examples/miscellaneous/plot_outlier_detection_bench.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/miscellaneous/plot_outlier_detection_bench.py
|
BSD-3-Clause
|
def plot_cv_indices(cv, X, y, group, ax, n_splits, lw=10):
"""Create a sample plot for indices of a cross-validation object."""
use_groups = "Group" in type(cv).__name__
groups = group if use_groups else None
# Generate the training/testing visualizations for each CV split
for ii, (tr, tt) in enumerate(cv.split(X=X, y=y, groups=groups)):
# Fill in indices with the training/test groups
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
# Visualize the results
ax.scatter(
range(len(indices)),
[ii + 0.5] * len(indices),
c=indices,
marker="_",
lw=lw,
cmap=cmap_cv,
vmin=-0.2,
vmax=1.2,
)
# Plot the data classes and groups at the end
ax.scatter(
range(len(X)), [ii + 1.5] * len(X), c=y, marker="_", lw=lw, cmap=cmap_data
)
ax.scatter(
range(len(X)), [ii + 2.5] * len(X), c=group, marker="_", lw=lw, cmap=cmap_data
)
# Formatting
yticklabels = list(range(n_splits)) + ["class", "group"]
ax.set(
yticks=np.arange(n_splits + 2) + 0.5,
yticklabels=yticklabels,
xlabel="Sample index",
ylabel="CV iteration",
ylim=[n_splits + 2.2, -0.2],
xlim=[0, 100],
)
ax.set_title("{}".format(type(cv).__name__), fontsize=15)
return ax
|
Create a sample plot for indices of a cross-validation object.
|
plot_cv_indices
|
python
|
scikit-learn/scikit-learn
|
examples/model_selection/plot_cv_indices.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/model_selection/plot_cv_indices.py
|
BSD-3-Clause
|
def refit_strategy(cv_results):
"""Define the strategy to select the best estimator.
The strategy defined here is to filter-out all results below a precision threshold
of 0.98, rank the remaining by recall and keep all models with one standard
deviation of the best by recall. Once these models are selected, we can select the
fastest model to predict.
Parameters
----------
cv_results : dict of numpy (masked) ndarrays
CV results as returned by the `GridSearchCV`.
Returns
-------
best_index : int
The index of the best estimator as it appears in `cv_results`.
"""
# print the info about the grid-search for the different scores
precision_threshold = 0.98
cv_results_ = pd.DataFrame(cv_results)
print("All grid-search results:")
print_dataframe(cv_results_)
# Filter-out all results below the threshold
high_precision_cv_results = cv_results_[
cv_results_["mean_test_precision"] > precision_threshold
]
print(f"Models with a precision higher than {precision_threshold}:")
print_dataframe(high_precision_cv_results)
high_precision_cv_results = high_precision_cv_results[
[
"mean_score_time",
"mean_test_recall",
"std_test_recall",
"mean_test_precision",
"std_test_precision",
"rank_test_recall",
"rank_test_precision",
"params",
]
]
# Select the most performant models in terms of recall
# (within 1 sigma from the best)
best_recall_std = high_precision_cv_results["mean_test_recall"].std()
best_recall = high_precision_cv_results["mean_test_recall"].max()
best_recall_threshold = best_recall - best_recall_std
high_recall_cv_results = high_precision_cv_results[
high_precision_cv_results["mean_test_recall"] > best_recall_threshold
]
print(
"Out of the previously selected high precision models, we keep all the\n"
"the models within one standard deviation of the highest recall model:"
)
print_dataframe(high_recall_cv_results)
# From the best candidates, select the fastest model to predict
fastest_top_recall_high_precision_index = high_recall_cv_results[
"mean_score_time"
].idxmin()
print(
"\nThe selected final model is the fastest to predict out of the previously\n"
"selected subset of best models based on precision and recall.\n"
"Its scoring time is:\n\n"
f"{high_recall_cv_results.loc[fastest_top_recall_high_precision_index]}"
)
return fastest_top_recall_high_precision_index
|
Define the strategy to select the best estimator.
The strategy defined here is to filter-out all results below a precision threshold
of 0.98, rank the remaining by recall and keep all models with one standard
deviation of the best by recall. Once these models are selected, we can select the
fastest model to predict.
Parameters
----------
cv_results : dict of numpy (masked) ndarrays
CV results as returned by the `GridSearchCV`.
Returns
-------
best_index : int
The index of the best estimator as it appears in `cv_results`.
|
refit_strategy
|
python
|
scikit-learn/scikit-learn
|
examples/model_selection/plot_grid_search_digits.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/model_selection/plot_grid_search_digits.py
|
BSD-3-Clause
|
def lower_bound(cv_results):
"""
Calculate the lower bound within 1 standard deviation
of the best `mean_test_scores`.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`
Returns
-------
float
Lower bound within 1 standard deviation of the
best `mean_test_score`.
"""
best_score_idx = np.argmax(cv_results["mean_test_score"])
return (
cv_results["mean_test_score"][best_score_idx]
- cv_results["std_test_score"][best_score_idx]
)
|
Calculate the lower bound within 1 standard deviation
of the best `mean_test_scores`.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`
Returns
-------
float
Lower bound within 1 standard deviation of the
best `mean_test_score`.
|
lower_bound
|
python
|
scikit-learn/scikit-learn
|
examples/model_selection/plot_grid_search_refit_callable.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/model_selection/plot_grid_search_refit_callable.py
|
BSD-3-Clause
|
def best_low_complexity(cv_results):
"""
Balance model complexity with cross-validated score.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`.
Return
------
int
Index of a model that has the fewest PCA components
while has its test score within 1 standard deviation of the best
`mean_test_score`.
"""
threshold = lower_bound(cv_results)
candidate_idx = np.flatnonzero(cv_results["mean_test_score"] >= threshold)
best_idx = candidate_idx[
cv_results["param_reduce_dim__n_components"][candidate_idx].argmin()
]
return best_idx
|
Balance model complexity with cross-validated score.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`.
Return
------
int
Index of a model that has the fewest PCA components
while has its test score within 1 standard deviation of the best
`mean_test_score`.
|
best_low_complexity
|
python
|
scikit-learn/scikit-learn
|
examples/model_selection/plot_grid_search_refit_callable.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/model_selection/plot_grid_search_refit_callable.py
|
BSD-3-Clause
|
def corrected_std(differences, n_train, n_test):
"""Corrects standard deviation using Nadeau and Bengio's approach.
Parameters
----------
differences : ndarray of shape (n_samples,)
Vector containing the differences in the score metrics of two models.
n_train : int
Number of samples in the training set.
n_test : int
Number of samples in the testing set.
Returns
-------
corrected_std : float
Variance-corrected standard deviation of the set of differences.
"""
# kr = k times r, r times repeated k-fold crossvalidation,
# kr equals the number of times the model was evaluated
kr = len(differences)
corrected_var = np.var(differences, ddof=1) * (1 / kr + n_test / n_train)
corrected_std = np.sqrt(corrected_var)
return corrected_std
|
Corrects standard deviation using Nadeau and Bengio's approach.
Parameters
----------
differences : ndarray of shape (n_samples,)
Vector containing the differences in the score metrics of two models.
n_train : int
Number of samples in the training set.
n_test : int
Number of samples in the testing set.
Returns
-------
corrected_std : float
Variance-corrected standard deviation of the set of differences.
|
corrected_std
|
python
|
scikit-learn/scikit-learn
|
examples/model_selection/plot_grid_search_stats.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/model_selection/plot_grid_search_stats.py
|
BSD-3-Clause
|
def compute_corrected_ttest(differences, df, n_train, n_test):
"""Computes right-tailed paired t-test with corrected variance.
Parameters
----------
differences : array-like of shape (n_samples,)
Vector containing the differences in the score metrics of two models.
df : int
Degrees of freedom.
n_train : int
Number of samples in the training set.
n_test : int
Number of samples in the testing set.
Returns
-------
t_stat : float
Variance-corrected t-statistic.
p_val : float
Variance-corrected p-value.
"""
mean = np.mean(differences)
std = corrected_std(differences, n_train, n_test)
t_stat = mean / std
p_val = t.sf(np.abs(t_stat), df) # right-tailed t-test
return t_stat, p_val
|
Computes right-tailed paired t-test with corrected variance.
Parameters
----------
differences : array-like of shape (n_samples,)
Vector containing the differences in the score metrics of two models.
df : int
Degrees of freedom.
n_train : int
Number of samples in the training set.
n_test : int
Number of samples in the testing set.
Returns
-------
t_stat : float
Variance-corrected t-statistic.
p_val : float
Variance-corrected p-value.
|
compute_corrected_ttest
|
python
|
scikit-learn/scikit-learn
|
examples/model_selection/plot_grid_search_stats.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/model_selection/plot_grid_search_stats.py
|
BSD-3-Clause
|
def load_mnist(n_samples):
"""Load MNIST, shuffle the data, and return only n_samples."""
mnist = fetch_openml("mnist_784", as_frame=False)
X, y = shuffle(mnist.data, mnist.target, random_state=2)
return X[:n_samples] / 255, y[:n_samples]
|
Load MNIST, shuffle the data, and return only n_samples.
|
load_mnist
|
python
|
scikit-learn/scikit-learn
|
examples/neighbors/approximate_nearest_neighbors.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/neighbors/approximate_nearest_neighbors.py
|
BSD-3-Clause
|
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
|
Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
|
construct_grids
|
python
|
scikit-learn/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/neighbors/plot_species_kde.py
|
BSD-3-Clause
|
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [1, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 1], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 1, 0]],
]
def shift(x, w):
return convolve(x.reshape((8, 8)), mode="constant", weights=w).ravel()
X = np.concatenate(
[X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors]
)
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
|
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
|
nudge_dataset
|
python
|
scikit-learn/scikit-learn
|
examples/neural_networks/plot_rbm_logistic_classification.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/neural_networks/plot_rbm_logistic_classification.py
|
BSD-3-Clause
|
def levenshtein_distance(x, y):
"""Return the Levenshtein distance between two strings."""
if x == "" or y == "":
return max(len(x), len(y))
if x[0] == y[0]:
return levenshtein_distance(x[1:], y[1:])
return 1 + min(
levenshtein_distance(x[1:], y),
levenshtein_distance(x, y[1:]),
levenshtein_distance(x[1:], y[1:]),
)
|
Return the Levenshtein distance between two strings.
|
levenshtein_distance
|
python
|
scikit-learn/scikit-learn
|
examples/release_highlights/plot_release_highlights_1_5_0.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/release_highlights/plot_release_highlights_1_5_0.py
|
BSD-3-Clause
|
def plot_decision_function(classifier, sample_weight, axis, title):
"""Plot the synthetic data and the classifier decision function. Points with
larger sample_weight are mapped to larger circles in the scatter plot."""
axis.scatter(
X_plot[:, 0],
X_plot[:, 1],
c=y_plot,
s=100 * sample_weight[plot_indices],
alpha=0.9,
cmap=plt.cm.bone,
edgecolors="black",
)
DecisionBoundaryDisplay.from_estimator(
classifier,
X_plot,
response_method="decision_function",
alpha=0.75,
ax=axis,
cmap=plt.cm.bone,
)
axis.axis("off")
axis.set_title(title)
|
Plot the synthetic data and the classifier decision function. Points with
larger sample_weight are mapped to larger circles in the scatter plot.
|
plot_decision_function
|
python
|
scikit-learn/scikit-learn
|
examples/svm/plot_weighted_samples.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/svm/plot_weighted_samples.py
|
BSD-3-Clause
|
def load_dataset(verbose=False, remove=()):
"""Load and vectorize the 20 newsgroups dataset."""
data_train = fetch_20newsgroups(
subset="train",
categories=categories,
shuffle=True,
random_state=42,
remove=remove,
)
data_test = fetch_20newsgroups(
subset="test",
categories=categories,
shuffle=True,
random_state=42,
remove=remove,
)
# order of labels in `target_names` can be different from `categories`
target_names = data_train.target_names
# split target in a training set and a test set
y_train, y_test = data_train.target, data_test.target
# Extracting features from the training data using a sparse vectorizer
t0 = time()
vectorizer = TfidfVectorizer(
sublinear_tf=True, max_df=0.5, min_df=5, stop_words="english"
)
X_train = vectorizer.fit_transform(data_train.data)
duration_train = time() - t0
# Extracting features from the test data using the same vectorizer
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration_test = time() - t0
feature_names = vectorizer.get_feature_names_out()
if verbose:
# compute size of loaded data
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print(
f"{len(data_train.data)} documents - "
f"{data_train_size_mb:.2f}MB (training set)"
)
print(f"{len(data_test.data)} documents - {data_test_size_mb:.2f}MB (test set)")
print(f"{len(target_names)} categories")
print(
f"vectorize training done in {duration_train:.3f}s "
f"at {data_train_size_mb / duration_train:.3f}MB/s"
)
print(f"n_samples: {X_train.shape[0]}, n_features: {X_train.shape[1]}")
print(
f"vectorize testing done in {duration_test:.3f}s "
f"at {data_test_size_mb / duration_test:.3f}MB/s"
)
print(f"n_samples: {X_test.shape[0]}, n_features: {X_test.shape[1]}")
return X_train, X_test, y_train, y_test, feature_names, target_names
|
Load and vectorize the 20 newsgroups dataset.
|
load_dataset
|
python
|
scikit-learn/scikit-learn
|
examples/text/plot_document_classification_20newsgroups.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/text/plot_document_classification_20newsgroups.py
|
BSD-3-Clause
|
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their occurrences."""
freq = defaultdict(int)
for tok in tokenize(doc):
freq[tok] += 1
return freq
|
Extract a dict mapping tokens from doc to their occurrences.
|
token_freqs
|
python
|
scikit-learn/scikit-learn
|
examples/text/plot_hashing_vs_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/examples/text/plot_hashing_vs_dict_vectorizer.py
|
BSD-3-Clause
|
def clone(estimator, *, safe=True):
"""Construct a new unfitted estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It returns a new estimator
with the same parameters that has not been fitted on any data.
.. versionchanged:: 1.3
Delegates to `estimator.__sklearn_clone__` if the method exists.
Parameters
----------
estimator : {list, tuple, set} of estimator instance or a single \
estimator instance
The estimator or group of estimators to be cloned.
safe : bool, default=True
If safe is False, clone will fall back to a deep copy on objects
that are not estimators. Ignored if `estimator.__sklearn_clone__`
exists.
Returns
-------
estimator : object
The deep copy of the input, an estimator if input is an estimator.
Notes
-----
If the estimator's `random_state` parameter is an integer (or if the
estimator doesn't have a `random_state` parameter), an *exact clone* is
returned: the clone and the original estimator will give the exact same
results. Otherwise, *statistical clone* is returned: the clone might
return different results from the original estimator. More details can be
found in :ref:`randomness`.
Examples
--------
>>> from sklearn.base import clone
>>> from sklearn.linear_model import LogisticRegression
>>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]]
>>> y = [0, 0, 1, 1]
>>> classifier = LogisticRegression().fit(X, y)
>>> cloned_classifier = clone(classifier)
>>> hasattr(classifier, "classes_")
True
>>> hasattr(cloned_classifier, "classes_")
False
>>> classifier is cloned_classifier
False
"""
if hasattr(estimator, "__sklearn_clone__") and not inspect.isclass(estimator):
return estimator.__sklearn_clone__()
return _clone_parametrized(estimator, safe=safe)
|
Construct a new unfitted estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It returns a new estimator
with the same parameters that has not been fitted on any data.
.. versionchanged:: 1.3
Delegates to `estimator.__sklearn_clone__` if the method exists.
Parameters
----------
estimator : {list, tuple, set} of estimator instance or a single estimator instance
The estimator or group of estimators to be cloned.
safe : bool, default=True
If safe is False, clone will fall back to a deep copy on objects
that are not estimators. Ignored if `estimator.__sklearn_clone__`
exists.
Returns
-------
estimator : object
The deep copy of the input, an estimator if input is an estimator.
Notes
-----
If the estimator's `random_state` parameter is an integer (or if the
estimator doesn't have a `random_state` parameter), an *exact clone* is
returned: the clone and the original estimator will give the exact same
results. Otherwise, *statistical clone* is returned: the clone might
return different results from the original estimator. More details can be
found in :ref:`randomness`.
Examples
--------
>>> from sklearn.base import clone
>>> from sklearn.linear_model import LogisticRegression
>>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]]
>>> y = [0, 0, 1, 1]
>>> classifier = LogisticRegression().fit(X, y)
>>> cloned_classifier = clone(classifier)
>>> hasattr(classifier, "classes_")
True
>>> hasattr(cloned_classifier, "classes_")
False
>>> classifier is cloned_classifier
False
|
clone
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def _clone_parametrized(estimator, *, safe=True):
"""Default implementation of clone. See :func:`sklearn.base.clone` for details."""
estimator_type = type(estimator)
if estimator_type is dict:
return {k: clone(v, safe=safe) for k, v in estimator.items()}
elif estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, "get_params") or isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
if isinstance(estimator, type):
raise TypeError(
"Cannot clone object. "
"You should provide an instance of "
"scikit-learn estimator instead of a class."
)
else:
raise TypeError(
"Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn "
"estimator as it does not implement a "
"'get_params' method." % (repr(estimator), type(estimator))
)
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
try:
new_object._metadata_request = copy.deepcopy(estimator._metadata_request)
except AttributeError:
pass
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError(
"Cannot clone object %s, as the constructor "
"either does not set or modifies parameter %s" % (estimator, name)
)
# _sklearn_output_config is used by `set_output` to configure the output
# container of an estimator.
if hasattr(estimator, "_sklearn_output_config"):
new_object._sklearn_output_config = copy.deepcopy(
estimator._sklearn_output_config
)
return new_object
|
Default implementation of clone. See :func:`sklearn.base.clone` for details.
|
_clone_parametrized
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % (cls, init_signature)
)
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
|
Get parameter names for the estimator
|
_get_param_names
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params") and not isinstance(value, type):
deep_items = value.get_params().items()
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out
|
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
|
get_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def _get_params_html(self, deep=True):
"""
Get parameters for this estimator with a specific HTML representation.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : ParamsDict
Parameter names mapped to their values. We return a `ParamsDict`
dictionary, which renders a specific HTML representation in table
form.
"""
out = self.get_params(deep=deep)
init_func = getattr(self.__init__, "deprecated_original", self.__init__)
init_default_params = inspect.signature(init_func).parameters
init_default_params = {
name: param.default for name, param in init_default_params.items()
}
def is_non_default(param_name, param_value):
"""Finds the parameters that have been set by the user."""
if param_name not in init_default_params:
# happens if k is part of a **kwargs
return True
if init_default_params[param_name] == inspect._empty:
# k has no default value
return True
# avoid calling repr on nested estimators
if isinstance(param_value, BaseEstimator) and type(param_value) is not type(
init_default_params[param_name]
):
return True
if param_value != init_default_params[param_name] and not (
is_scalar_nan(init_default_params[param_name])
and is_scalar_nan(param_value)
):
return True
return False
# reorder the parameters from `self.get_params` using the `__init__`
# signature
remaining_params = [name for name in out if name not in init_default_params]
ordered_out = {name: out[name] for name in init_default_params if name in out}
ordered_out.update({name: out[name] for name in remaining_params})
non_default_ls = tuple(
[name for name, value in ordered_out.items() if is_non_default(name, value)]
)
return ParamsDict(ordered_out, non_default=non_default_ls)
|
Get parameters for this estimator with a specific HTML representation.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : ParamsDict
Parameter names mapped to their values. We return a `ParamsDict`
dictionary, which renders a specific HTML representation in table
form.
|
_get_params_html
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def is_non_default(param_name, param_value):
"""Finds the parameters that have been set by the user."""
if param_name not in init_default_params:
# happens if k is part of a **kwargs
return True
if init_default_params[param_name] == inspect._empty:
# k has no default value
return True
# avoid calling repr on nested estimators
if isinstance(param_value, BaseEstimator) and type(param_value) is not type(
init_default_params[param_name]
):
return True
if param_value != init_default_params[param_name] and not (
is_scalar_nan(init_default_params[param_name])
and is_scalar_nan(param_value)
):
return True
return False
|
Finds the parameters that have been set by the user.
|
is_non_default
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
local_valid_params = self._get_param_names()
raise ValueError(
f"Invalid parameter {key!r} for estimator {self}. "
f"Valid parameters are: {local_valid_params!r}."
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
|
Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
|
set_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def _validate_params(self):
"""Validate types and values of constructor parameters
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
"""
validate_parameter_constraints(
self._parameter_constraints,
self.get_params(deep=False),
caller_name=self.__class__.__name__,
)
|
Validate types and values of constructor parameters
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
|
_validate_params
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def score(self, X, y, sample_weight=None):
"""
Return :ref:`accuracy <accuracy_score>` on provided data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
Return :ref:`accuracy <accuracy_score>` on provided data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def score(self, X, y, sample_weight=None):
"""Return :ref:`coefficient of determination <r2_score>` on test data.
The coefficient of determination, :math:`R^2`, is defined as
:math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
"""
from .metrics import r2_score
y_pred = self.predict(X)
return r2_score(y, y_pred, sample_weight=sample_weight)
|
Return :ref:`coefficient of determination <r2_score>` on test data.
The coefficient of determination, :math:`R^2`, is defined as
:math:`(1 - \frac{u}{v})`, where :math:`u` is the residual
sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v`
is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``.
The best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always predicts
the expected value of `y`, disregarding the input features, would get
a :math:`R^2` score of 0.0.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples. For some estimators this may be a precomputed
kernel matrix or a list of generic objects instead with shape
``(n_samples, n_samples_fitted)``, where ``n_samples_fitted``
is the number of samples used in the fitting for the estimator.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
:math:`R^2` of ``self.predict(X)`` w.r.t. `y`.
Notes
-----
The :math:`R^2` score used when calling ``score`` on a regressor uses
``multioutput='uniform_average'`` from version 0.23 to keep consistent
with default value of :func:`~sklearn.metrics.r2_score`.
This influences the ``score`` method of all the multioutput
regressors (except for
:class:`~sklearn.multioutput.MultiOutputRegressor`).
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def fit_predict(self, X, y=None, **kwargs):
"""
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X, **kwargs)
return self.labels_
|
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
|
fit_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def get_indices(self, i):
"""Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
|
Row and column indices of the `i`'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
row_ind : ndarray, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : ndarray, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
|
get_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def get_shape(self, i):
"""Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
|
Shape of the `i`'th bicluster.
Parameters
----------
i : int
The index of the cluster.
Returns
-------
n_rows : int
Number of rows in the bicluster.
n_cols : int
Number of columns in the bicluster.
|
get_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def get_submatrix(self, i, data):
"""Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
data = check_array(data, accept_sparse="csr")
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
|
Return the submatrix corresponding to bicluster `i`.
Parameters
----------
i : int
The index of the cluster.
data : array-like of shape (n_samples, n_features)
The data.
Returns
-------
submatrix : ndarray of shape (n_rows, n_cols)
The submatrix corresponding to bicluster `i`.
Notes
-----
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
|
get_submatrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, **fit_params):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
# we do not route parameters here, since consumers don't route. But
# since it's possible for a `transform` method to also consume
# metadata, we check if that's the case, and we raise a warning telling
# users that they should implement a custom `fit_transform` method
# to forward metadata to `transform` as well.
#
# For that, we calculate routing and check if anything would be routed
# to `transform` if we were to route them.
if _routing_enabled():
transform_params = self.get_metadata_routing().consumes(
method="transform", params=fit_params.keys()
)
if transform_params:
warnings.warn(
(
f"This object ({self.__class__.__name__}) has a `transform`"
" method which consumes metadata, but `fit_transform` does not"
" forward metadata to `transform`. Please implement a custom"
" `fit_transform` method to forward metadata to `transform` as"
" well. Alternatively, you can explicitly do"
" `set_transform_request`and set all values to `False` to"
" disable metadata routed to `transform`, if that's an option."
),
UserWarning,
)
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
|
Fit to data, then transform it.
Fits transformer to `X` and `y` with optional parameters `fit_params`
and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Same as input features.
"""
# Note that passing attributes="n_features_in_" forces check_is_fitted
# to check if the attribute is present. Otherwise it will pass on
# stateless estimators (requires_fit=False)
check_is_fitted(self, attributes="n_features_in_")
return _check_feature_names_in(self, input_features)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Same as input features.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
The feature names out will prefixed by the lowercased class name. For
example, if the transformer outputs 3 features, then the feature names
out are: `["class_name0", "class_name1", "class_name2"]`.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in `fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "_n_features_out")
return _generate_get_feature_names_out(
self, self._n_features_out, input_features=input_features
)
|
Get output feature names for transformation.
The feature names out will prefixed by the lowercased class name. For
example, if the transformer outputs 3 features, then the feature names
out are: `["class_name0", "class_name1", "class_name2"]`.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in `fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def fit_predict(self, X, y=None, **kwargs):
"""Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
"""
# we do not route parameters here, since consumers don't route. But
# since it's possible for a `predict` method to also consume
# metadata, we check if that's the case, and we raise a warning telling
# users that they should implement a custom `fit_predict` method
# to forward metadata to `predict` as well.
#
# For that, we calculate routing and check if anything would be routed
# to `predict` if we were to route them.
if _routing_enabled():
transform_params = self.get_metadata_routing().consumes(
method="predict", params=kwargs.keys()
)
if transform_params:
warnings.warn(
(
f"This object ({self.__class__.__name__}) has a `predict` "
"method which consumes metadata, but `fit_predict` does not "
"forward metadata to `predict`. Please implement a custom "
"`fit_predict` method to forward metadata to `predict` as well."
"Alternatively, you can explicitly do `set_predict_request`"
"and set all values to `False` to disable metadata routed to "
"`predict`, if that's an option."
),
UserWarning,
)
# override for transductive outlier detectors like LocalOulierFactor
return self.fit(X, **kwargs).predict(X)
|
Perform fit on X and returns labels for X.
Returns -1 for outliers and 1 for inliers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
y : ndarray of shape (n_samples,)
1 for inliers, -1 for outliers.
|
fit_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def is_classifier(estimator):
"""Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
Examples
--------
>>> from sklearn.base import is_classifier
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_classifier(classifier)
True
>>> is_classifier(regressor)
False
>>> is_classifier(kmeans)
False
"""
# TODO(1.8): Remove this check
if isinstance(estimator, type):
warnings.warn(
f"passing a class to {print(inspect.stack()[0][3])} is deprecated and "
"will be removed in 1.8. Use an instance of the class instead.",
FutureWarning,
)
return getattr(estimator, "_estimator_type", None) == "classifier"
return get_tags(estimator).estimator_type == "classifier"
|
Return True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
Examples
--------
>>> from sklearn.base import is_classifier
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_classifier(classifier)
True
>>> is_classifier(regressor)
False
>>> is_classifier(kmeans)
False
|
is_classifier
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def is_regressor(estimator):
"""Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
Examples
--------
>>> from sklearn.base import is_regressor
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_regressor(classifier)
False
>>> is_regressor(regressor)
True
>>> is_regressor(kmeans)
False
"""
# TODO(1.8): Remove this check
if isinstance(estimator, type):
warnings.warn(
f"passing a class to {print(inspect.stack()[0][3])} is deprecated and "
"will be removed in 1.8. Use an instance of the class instead.",
FutureWarning,
)
return getattr(estimator, "_estimator_type", None) == "regressor"
return get_tags(estimator).estimator_type == "regressor"
|
Return True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
Examples
--------
>>> from sklearn.base import is_regressor
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_regressor(classifier)
False
>>> is_regressor(regressor)
True
>>> is_regressor(kmeans)
False
|
is_regressor
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def is_clusterer(estimator):
"""Return True if the given estimator is (probably) a clusterer.
.. versionadded:: 1.6
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a clusterer and False otherwise.
Examples
--------
>>> from sklearn.base import is_clusterer
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_clusterer(classifier)
False
>>> is_clusterer(regressor)
False
>>> is_clusterer(kmeans)
True
"""
# TODO(1.8): Remove this check
if isinstance(estimator, type):
warnings.warn(
f"passing a class to {print(inspect.stack()[0][3])} is deprecated and "
"will be removed in 1.8. Use an instance of the class instead.",
FutureWarning,
)
return getattr(estimator, "_estimator_type", None) == "clusterer"
return get_tags(estimator).estimator_type == "clusterer"
|
Return True if the given estimator is (probably) a clusterer.
.. versionadded:: 1.6
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a clusterer and False otherwise.
Examples
--------
>>> from sklearn.base import is_clusterer
>>> from sklearn.cluster import KMeans
>>> from sklearn.svm import SVC, SVR
>>> classifier = SVC()
>>> regressor = SVR()
>>> kmeans = KMeans()
>>> is_clusterer(classifier)
False
>>> is_clusterer(regressor)
False
>>> is_clusterer(kmeans)
True
|
is_clusterer
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def is_outlier_detector(estimator):
"""Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
"""
# TODO(1.8): Remove this check
if isinstance(estimator, type):
warnings.warn(
f"passing a class to {print(inspect.stack()[0][3])} is deprecated and "
"will be removed in 1.8. Use an instance of the class instead.",
FutureWarning,
)
return getattr(estimator, "_estimator_type", None) == "outlier_detector"
return get_tags(estimator).estimator_type == "outlier_detector"
|
Return True if the given estimator is (probably) an outlier detector.
Parameters
----------
estimator : estimator instance
Estimator object to test.
Returns
-------
out : bool
True if estimator is an outlier detector and False otherwise.
|
is_outlier_detector
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def _fit_context(*, prefer_skip_nested_validation):
"""Decorator to run the fit methods of estimators within context managers.
Parameters
----------
prefer_skip_nested_validation : bool
If True, the validation of parameters of inner estimators or functions
called during fit will be skipped.
This is useful to avoid validating many times the parameters passed by the
user from the public facing API. It's also useful to avoid validating
parameters that we pass internally to inner functions that are guaranteed to
be valid by the test suite.
It should be set to True for most estimators, except for those that receive
non-validated objects as parameters, such as meta-estimators that are given
estimator objects.
Returns
-------
decorated_fit : method
The decorated fit method.
"""
def decorator(fit_method):
@functools.wraps(fit_method)
def wrapper(estimator, *args, **kwargs):
global_skip_validation = get_config()["skip_parameter_validation"]
# we don't want to validate again for each call to partial_fit
partial_fit_and_fitted = (
fit_method.__name__ == "partial_fit" and _is_fitted(estimator)
)
if not global_skip_validation and not partial_fit_and_fitted:
estimator._validate_params()
with config_context(
skip_parameter_validation=(
prefer_skip_nested_validation or global_skip_validation
)
):
return fit_method(estimator, *args, **kwargs)
return wrapper
return decorator
|
Decorator to run the fit methods of estimators within context managers.
Parameters
----------
prefer_skip_nested_validation : bool
If True, the validation of parameters of inner estimators or functions
called during fit will be skipped.
This is useful to avoid validating many times the parameters passed by the
user from the public facing API. It's also useful to avoid validating
parameters that we pass internally to inner functions that are guaranteed to
be valid by the test suite.
It should be set to True for most estimators, except for those that receive
non-validated objects as parameters, such as meta-estimators that are given
estimator objects.
Returns
-------
decorated_fit : method
The decorated fit method.
|
_fit_context
|
python
|
scikit-learn/scikit-learn
|
sklearn/base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
|
BSD-3-Clause
|
def _get_estimator(self):
"""Resolve which estimator to return (default is LinearSVC)"""
if self.estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
estimator = LinearSVC(random_state=0)
if _routing_enabled():
estimator.set_fit_request(sample_weight=True)
else:
estimator = self.estimator
return estimator
|
Resolve which estimator to return (default is LinearSVC)
|
_get_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None, **fit_params):
"""Fit the calibrated model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
check_classification_targets(y)
X, y = indexable(X, y)
estimator = self._get_estimator()
_ensemble = self.ensemble
if _ensemble == "auto":
_ensemble = not isinstance(estimator, FrozenEstimator)
self.calibrated_classifiers_ = []
if self.cv == "prefit":
# TODO(1.8): Remove this code branch and cv='prefit'
warnings.warn(
"The `cv='prefit'` option is deprecated in 1.6 and will be removed in"
" 1.8. You can use CalibratedClassifierCV(FrozenEstimator(estimator))"
" instead.",
category=FutureWarning,
)
# `classes_` should be consistent with that of estimator
check_is_fitted(self.estimator, attributes=["classes_"])
self.classes_ = self.estimator.classes_
predictions, _ = _get_response_values(
estimator,
X,
response_method=["decision_function", "predict_proba"],
)
if predictions.ndim == 1:
# Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
predictions = predictions.reshape(-1, 1)
if sample_weight is not None:
# Check that the sample_weight dtype is consistent with the predictions
# to avoid unintentional upcasts.
sample_weight = _check_sample_weight(
sample_weight, predictions, dtype=predictions.dtype
)
calibrated_classifier = _fit_calibrator(
estimator,
predictions,
y,
self.classes_,
self.method,
sample_weight,
)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
# Set `classes_` using all `y`
label_encoder_ = LabelEncoder().fit(y)
self.classes_ = label_encoder_.classes_
if _routing_enabled():
routed_params = process_routing(
self,
"fit",
sample_weight=sample_weight,
**fit_params,
)
else:
# sample_weight checks
fit_parameters = signature(estimator.fit).parameters
supports_sw = "sample_weight" in fit_parameters
if sample_weight is not None and not supports_sw:
estimator_name = type(estimator).__name__
warnings.warn(
f"Since {estimator_name} does not appear to accept"
" sample_weight, sample weights will only be used for the"
" calibration itself. This can be caused by a limitation of"
" the current scikit-learn API. See the following issue for"
" more details:"
" https://github.com/scikit-learn/scikit-learn/issues/21134."
" Be warned that the result of the calibration is likely to be"
" incorrect."
)
routed_params = Bunch()
routed_params.splitter = Bunch(split={}) # no routing for splitter
routed_params.estimator = Bunch(fit=fit_params)
if sample_weight is not None and supports_sw:
routed_params.estimator.fit["sample_weight"] = sample_weight
# Check that each cross-validation fold can have at least one
# example per class
if isinstance(self.cv, int):
n_folds = self.cv
elif hasattr(self.cv, "n_splits"):
n_folds = self.cv.n_splits
else:
n_folds = None
if n_folds and np.any(np.unique(y, return_counts=True)[1] < n_folds):
raise ValueError(
f"Requesting {n_folds}-fold "
"cross-validation but provided less than "
f"{n_folds} examples for at least one class."
)
if isinstance(self.cv, LeaveOneOut):
raise ValueError(
"LeaveOneOut cross-validation does not allow"
"all classes to be present in test splits. "
"Please use a cross-validation generator that allows "
"all classes to appear in every test and train split."
)
cv = check_cv(self.cv, y, classifier=True)
if _ensemble:
parallel = Parallel(n_jobs=self.n_jobs)
self.calibrated_classifiers_ = parallel(
delayed(_fit_classifier_calibrator_pair)(
clone(estimator),
X,
y,
train=train,
test=test,
method=self.method,
classes=self.classes_,
sample_weight=sample_weight,
fit_params=routed_params.estimator.fit,
)
for train, test in cv.split(X, y, **routed_params.splitter.split)
)
else:
this_estimator = clone(estimator)
method_name = _check_response_method(
this_estimator,
["decision_function", "predict_proba"],
).__name__
predictions = cross_val_predict(
estimator=this_estimator,
X=X,
y=y,
cv=cv,
method=method_name,
n_jobs=self.n_jobs,
params=routed_params.estimator.fit,
)
if len(self.classes_) == 2:
# Ensure shape (n_samples, 1) in the binary case
if method_name == "predict_proba":
# Select the probability column of the positive class
predictions = _process_predict_proba(
y_pred=predictions,
target_type="binary",
classes=self.classes_,
pos_label=self.classes_[1],
)
predictions = predictions.reshape(-1, 1)
if sample_weight is not None:
# Check that the sample_weight dtype is consistent with the
# predictions to avoid unintentional upcasts.
sample_weight = _check_sample_weight(
sample_weight, predictions, dtype=predictions.dtype
)
this_estimator.fit(X, y, **routed_params.estimator.fit)
# Note: Here we don't pass on fit_params because the supported
# calibrators don't support fit_params anyway
calibrated_classifier = _fit_calibrator(
this_estimator,
predictions,
y,
self.classes_,
self.method,
sample_weight,
)
self.calibrated_classifiers_.append(calibrated_classifier)
first_clf = self.calibrated_classifiers_[0].estimator
if hasattr(first_clf, "n_features_in_"):
self.n_features_in_ = first_clf.n_features_in_
if hasattr(first_clf, "feature_names_in_"):
self.feature_names_in_ = first_clf.feature_names_in_
return self
|
Fit the calibrated model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Calibrated probabilities of classification.
This function returns calibrated probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict_proba`.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((_num_samples(X), len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
|
Calibrated probabilities of classification.
This function returns calibrated probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict_proba`.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
The predicted probas.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict the target of new samples.
The predicted class is the class that has the highest probability,
and can thus be different from the prediction of the uncalibrated classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
C : ndarray of shape (n_samples,)
The predicted class.
"""
check_is_fitted(self)
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
|
Predict the target of new samples.
The predicted class is the class that has the highest probability,
and can thus be different from the prediction of the uncalibrated classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
C : ndarray of shape (n_samples,)
The predicted class.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self.__class__.__name__)
.add_self_request(self)
.add(
estimator=self._get_estimator(),
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
.add(
splitter=self.cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def _fit_classifier_calibrator_pair(
estimator,
X,
y,
train,
test,
method,
classes,
sample_weight=None,
fit_params=None,
):
"""Fit a classifier/calibration pair on a given train/test split.
Fit the classifier on the train set, compute its predictions on the test
set and use the predictions as input to fit the calibrator along with the
test labels.
Parameters
----------
estimator : estimator instance
Cloned base estimator.
X : array-like, shape (n_samples, n_features)
Sample data.
y : array-like, shape (n_samples,)
Targets.
train : ndarray, shape (n_train_indices,)
Indices of the training subset.
test : ndarray, shape (n_test_indices,)
Indices of the testing subset.
method : {'sigmoid', 'isotonic'}
Method to use for calibration.
classes : ndarray, shape (n_classes,)
The target classes.
sample_weight : array-like, default=None
Sample weights for `X`.
fit_params : dict, default=None
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
calibrated_classifier : _CalibratedClassifier instance
"""
fit_params_train = _check_method_params(X, params=fit_params, indices=train)
X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train)
X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test)
estimator.fit(X_train, y_train, **fit_params_train)
predictions, _ = _get_response_values(
estimator,
X_test,
response_method=["decision_function", "predict_proba"],
)
if predictions.ndim == 1:
# Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
predictions = predictions.reshape(-1, 1)
if sample_weight is not None:
# Check that the sample_weight dtype is consistent with the predictions
# to avoid unintentional upcasts.
sample_weight = _check_sample_weight(sample_weight, X, dtype=predictions.dtype)
sw_test = _safe_indexing(sample_weight, test)
else:
sw_test = None
calibrated_classifier = _fit_calibrator(
estimator, predictions, y_test, classes, method, sample_weight=sw_test
)
return calibrated_classifier
|
Fit a classifier/calibration pair on a given train/test split.
Fit the classifier on the train set, compute its predictions on the test
set and use the predictions as input to fit the calibrator along with the
test labels.
Parameters
----------
estimator : estimator instance
Cloned base estimator.
X : array-like, shape (n_samples, n_features)
Sample data.
y : array-like, shape (n_samples,)
Targets.
train : ndarray, shape (n_train_indices,)
Indices of the training subset.
test : ndarray, shape (n_test_indices,)
Indices of the testing subset.
method : {'sigmoid', 'isotonic'}
Method to use for calibration.
classes : ndarray, shape (n_classes,)
The target classes.
sample_weight : array-like, default=None
Sample weights for `X`.
fit_params : dict, default=None
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
calibrated_classifier : _CalibratedClassifier instance
|
_fit_classifier_calibrator_pair
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def _fit_calibrator(clf, predictions, y, classes, method, sample_weight=None):
"""Fit calibrator(s) and return a `_CalibratedClassifier`
instance.
`n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
However, if `n_classes` equals 2, one calibrator is fitted.
Parameters
----------
clf : estimator instance
Fitted classifier.
predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \
when binary.
Raw predictions returned by the un-calibrated base classifier.
y : array-like, shape (n_samples,)
The targets.
classes : ndarray, shape (n_classes,)
All the prediction classes.
method : {'sigmoid', 'isotonic'}
The method to use for calibration.
sample_weight : ndarray, shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
pipeline : _CalibratedClassifier instance
"""
Y = label_binarize(y, classes=classes)
label_encoder = LabelEncoder().fit(classes)
pos_class_indices = label_encoder.transform(clf.classes_)
calibrators = []
for class_idx, this_pred in zip(pos_class_indices, predictions.T):
if method == "isotonic":
calibrator = IsotonicRegression(out_of_bounds="clip")
else: # "sigmoid"
calibrator = _SigmoidCalibration()
calibrator.fit(this_pred, Y[:, class_idx], sample_weight)
calibrators.append(calibrator)
pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes)
return pipeline
|
Fit calibrator(s) and return a `_CalibratedClassifier`
instance.
`n_classes` (i.e. `len(clf.classes_)`) calibrators are fitted.
However, if `n_classes` equals 2, one calibrator is fitted.
Parameters
----------
clf : estimator instance
Fitted classifier.
predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) when binary.
Raw predictions returned by the un-calibrated base classifier.
y : array-like, shape (n_samples,)
The targets.
classes : ndarray, shape (n_classes,)
All the prediction classes.
method : {'sigmoid', 'isotonic'}
The method to use for calibration.
sample_weight : ndarray, shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
pipeline : _CalibratedClassifier instance
|
_fit_calibrator
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Calculate calibrated probabilities.
Calculates classification calibrated probabilities
for each class, in a one-vs-all manner, for `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The sample data.
Returns
-------
proba : array, shape (n_samples, n_classes)
The predicted probabilities. Can be exact zeros.
"""
predictions, _ = _get_response_values(
self.estimator,
X,
response_method=["decision_function", "predict_proba"],
)
if predictions.ndim == 1:
# Reshape binary output from `(n_samples,)` to `(n_samples, 1)`
predictions = predictions.reshape(-1, 1)
n_classes = len(self.classes)
label_encoder = LabelEncoder().fit(self.classes)
pos_class_indices = label_encoder.transform(self.estimator.classes_)
proba = np.zeros((_num_samples(X), n_classes))
for class_idx, this_pred, calibrator in zip(
pos_class_indices, predictions.T, self.calibrators
):
if n_classes == 2:
# When binary, `predictions` consists only of predictions for
# clf.classes_[1] but `pos_class_indices` = 0
class_idx += 1
proba[:, class_idx] = calibrator.predict(this_pred)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1.0 - proba[:, 1]
else:
denominator = np.sum(proba, axis=1)[:, np.newaxis]
# In the edge case where for each class calibrator returns a null
# probability for a given sample, use the uniform distribution
# instead.
uniform_proba = np.full_like(proba, 1 / n_classes)
proba = np.divide(
proba, denominator, out=uniform_proba, where=denominator != 0
)
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
|
Calculate calibrated probabilities.
Calculates classification calibrated probabilities
for each class, in a one-vs-all manner, for `X`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The sample data.
Returns
-------
proba : array, shape (n_samples, n_classes)
The predicted probabilities. Can be exact zeros.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def _sigmoid_calibration(
predictions, y, sample_weight=None, max_abs_prediction_threshold=30
):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
predictions = column_or_1d(predictions)
y = column_or_1d(y)
F = predictions # F follows Platt's notations
scale_constant = 1.0
max_prediction = np.max(np.abs(F))
# If the predictions have large values we scale them in order to bring
# them within a suitable range. This has no effect on the final
# (prediction) result because linear models like Logisitic Regression
# without a penalty are invariant to multiplying the features by a
# constant.
if max_prediction >= max_abs_prediction_threshold:
scale_constant = max_prediction
# We rescale the features in a copy: inplace rescaling could confuse
# the caller and make the code harder to reason about.
F = F / scale_constant
# Bayesian priors (see Platt end of section 2.2):
# It corresponds to the number of samples, taking into account the
# `sample_weight`.
mask_negative_samples = y <= 0
if sample_weight is not None:
prior0 = (sample_weight[mask_negative_samples]).sum()
prior1 = (sample_weight[~mask_negative_samples]).sum()
else:
prior0 = float(np.sum(mask_negative_samples))
prior1 = y.shape[0] - prior0
T = np.zeros_like(y, dtype=predictions.dtype)
T[y > 0] = (prior1 + 1.0) / (prior1 + 2.0)
T[y <= 0] = 1.0 / (prior0 + 2.0)
bin_loss = HalfBinomialLoss()
def loss_grad(AB):
# .astype below is needed to ensure y_true and raw_prediction have the
# same dtype. With result = np.float64(0) * np.array([1, 2], dtype=np.float32)
# - in Numpy 2, result.dtype is float64
# - in Numpy<2, result.dtype is float32
raw_prediction = -(AB[0] * F + AB[1]).astype(dtype=predictions.dtype)
l, g = bin_loss.loss_gradient(
y_true=T,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
loss = l.sum()
# TODO: Remove casting to np.float64 when minimum supported SciPy is 1.11.2
# With SciPy >= 1.11.2, the LBFGS implementation will cast to float64
# https://github.com/scipy/scipy/pull/18825.
# Here we cast to float64 to support SciPy < 1.11.2
grad = np.asarray([-g @ F, -g.sum()], dtype=np.float64)
return loss, grad
AB0 = np.array([0.0, log((prior0 + 1.0) / (prior1 + 1.0))])
opt_result = minimize(
loss_grad,
AB0,
method="L-BFGS-B",
jac=True,
options={
"gtol": 1e-6,
"ftol": 64 * np.finfo(float).eps,
},
)
AB_ = opt_result.x
# The tuned multiplicative parameter is converted back to the original
# input feature scale. The offset parameter does not need rescaling since
# we did not rescale the outcome variable.
return AB_[0] / scale_constant, AB_[1]
|
Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
predictions : ndarray of shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray of shape (n_samples,)
The targets.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
|
_sigmoid_calibration
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
|
Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,)
Data to predict from.
Returns
-------
T_ : ndarray of shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return expit(-(self.a_ * T + self.b_))
|
Predict new data by linear interpolation.
Parameters
----------
T : array-like of shape (n_samples,)
Data to predict from.
Returns
-------
T_ : ndarray of shape (n_samples,)
The predicted data.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def calibration_curve(
y_true,
y_prob,
*,
pos_label=None,
n_bins=5,
strategy="uniform",
):
"""Compute true and predicted probabilities for a calibration curve.
The method assumes the inputs come from a binary classifier, and
discretize the [0, 1] interval into bins.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True targets.
y_prob : array-like of shape (n_samples,)
Probabilities of the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
.. versionadded:: 1.1
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval. A bigger number
requires more data. Bins with no samples (i.e. without
corresponding values in `y_prob`) will not be returned, thus the
returned arrays may have less than `n_bins` values.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
uniform
The bins have identical widths.
quantile
The bins have the same number of samples and depend on `y_prob`.
Returns
-------
prob_true : ndarray of shape (n_bins,) or smaller
The proportion of samples whose class is the positive class, in each
bin (fraction of positives).
prob_pred : ndarray of shape (n_bins,) or smaller
The mean predicted probability in each bin.
See Also
--------
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
Examples
--------
>>> import numpy as np
>>> from sklearn.calibration import calibration_curve
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
>>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
>>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
>>> prob_true
array([0. , 0.5, 1. ])
>>> prob_pred
array([0.2 , 0.525, 0.85 ])
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
check_consistent_length(y_true, y_prob)
pos_label = _check_pos_label_consistency(pos_label, y_true)
if y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1].")
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError(
f"Only binary classification is supported. Provided labels {labels}."
)
y_true = y_true == pos_label
if strategy == "quantile": # Determine bin edges by distribution of data
quantiles = np.linspace(0, 1, n_bins + 1)
bins = np.percentile(y_prob, quantiles * 100)
elif strategy == "uniform":
bins = np.linspace(0.0, 1.0, n_bins + 1)
else:
raise ValueError(
"Invalid entry to 'strategy' input. Strategy "
"must be either 'quantile' or 'uniform'."
)
binids = np.searchsorted(bins[1:-1], y_prob)
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = bin_true[nonzero] / bin_total[nonzero]
prob_pred = bin_sums[nonzero] / bin_total[nonzero]
return prob_true, prob_pred
|
Compute true and predicted probabilities for a calibration curve.
The method assumes the inputs come from a binary classifier, and
discretize the [0, 1] interval into bins.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True targets.
y_prob : array-like of shape (n_samples,)
Probabilities of the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
.. versionadded:: 1.1
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval. A bigger number
requires more data. Bins with no samples (i.e. without
corresponding values in `y_prob`) will not be returned, thus the
returned arrays may have less than `n_bins` values.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
uniform
The bins have identical widths.
quantile
The bins have the same number of samples and depend on `y_prob`.
Returns
-------
prob_true : ndarray of shape (n_bins,) or smaller
The proportion of samples whose class is the positive class, in each
bin (fraction of positives).
prob_pred : ndarray of shape (n_bins,) or smaller
The mean predicted probability in each bin.
See Also
--------
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
Examples
--------
>>> import numpy as np
>>> from sklearn.calibration import calibration_curve
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
>>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
>>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
>>> prob_true
array([0. , 0.5, 1. ])
>>> prob_pred
array([0.2 , 0.525, 0.85 ])
|
calibration_curve
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def plot(self, *, ax=None, name=None, ref_line=True, **kwargs):
"""Plot visualization.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Parameters
----------
ax : Matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name for labeling curve. If `None`, use `estimator_name` if
not `None`, otherwise no labeling is shown.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`
Object that stores computed values.
"""
self.ax_, self.figure_, name = self._validate_plot_params(ax=ax, name=name)
info_pos_label = (
f"(Positive class: {self.pos_label})" if self.pos_label is not None else ""
)
default_line_kwargs = {"marker": "s", "linestyle": "-"}
if name is not None:
default_line_kwargs["label"] = name
line_kwargs = _validate_style_kwargs(default_line_kwargs, kwargs)
ref_line_label = "Perfectly calibrated"
existing_ref_line = ref_line_label in self.ax_.get_legend_handles_labels()[1]
if ref_line and not existing_ref_line:
self.ax_.plot([0, 1], [0, 1], "k:", label=ref_line_label)
self.line_ = self.ax_.plot(self.prob_pred, self.prob_true, **line_kwargs)[0]
# We always have to show the legend for at least the reference line
self.ax_.legend(loc="lower right")
xlabel = f"Mean predicted probability {info_pos_label}"
ylabel = f"Fraction of positives {info_pos_label}"
self.ax_.set(xlabel=xlabel, ylabel=ylabel)
return self
|
Plot visualization.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Parameters
----------
ax : Matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
name : str, default=None
Name for labeling curve. If `None`, use `estimator_name` if
not `None`, otherwise no labeling is shown.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`
Object that stores computed values.
|
plot
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def from_estimator(
cls,
estimator,
X,
y,
*,
n_bins=5,
strategy="uniform",
pos_label=None,
name=None,
ax=None,
ref_line=True,
**kwargs,
):
"""Plot calibration curve using a binary classifier and data.
A calibration curve, also known as a reliability diagram, uses inputs
from a binary classifier and plots the average predicted probability
for each bin against the fraction of positive classes, on the
y-axis.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier. The classifier must
have a :term:`predict_proba` method.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Binary target values.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval into when
calculating the calibration curve. A bigger number requires more
data.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
- `'uniform'`: The bins have identical widths.
- `'quantile'`: The bins have the same number of samples and depend
on predicted probabilities.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default, `estimators.classes_[1]` is considered as the
positive class.
.. versionadded:: 1.1
name : str, default=None
Name for labeling curve. If `None`, the name of the estimator is
used.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`.
Object that stores computed values.
See Also
--------
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
>>> plt.show()
"""
y_prob, pos_label, name = cls._validate_and_get_response_values(
estimator,
X,
y,
response_method="predict_proba",
pos_label=pos_label,
name=name,
)
return cls.from_predictions(
y,
y_prob,
n_bins=n_bins,
strategy=strategy,
pos_label=pos_label,
name=name,
ref_line=ref_line,
ax=ax,
**kwargs,
)
|
Plot calibration curve using a binary classifier and data.
A calibration curve, also known as a reliability diagram, uses inputs
from a binary classifier and plots the average predicted probability
for each bin against the fraction of positive classes, on the
y-axis.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier. The classifier must
have a :term:`predict_proba` method.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
y : array-like of shape (n_samples,)
Binary target values.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval into when
calculating the calibration curve. A bigger number requires more
data.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
- `'uniform'`: The bins have identical widths.
- `'quantile'`: The bins have the same number of samples and depend
on predicted probabilities.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default, `estimators.classes_[1]` is considered as the
positive class.
.. versionadded:: 1.1
name : str, default=None
Name for labeling curve. If `None`, the name of the estimator is
used.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`.
Object that stores computed values.
See Also
--------
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> disp = CalibrationDisplay.from_estimator(clf, X_test, y_test)
>>> plt.show()
|
from_estimator
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def from_predictions(
cls,
y_true,
y_prob,
*,
n_bins=5,
strategy="uniform",
pos_label=None,
name=None,
ax=None,
ref_line=True,
**kwargs,
):
"""Plot calibration curve using true labels and predicted probabilities.
Calibration curve, also known as reliability diagram, uses inputs
from a binary classifier and plots the average predicted probability
for each bin against the fraction of positive classes, on the
y-axis.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_prob : array-like of shape (n_samples,)
The predicted probabilities of the positive class.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval into when
calculating the calibration curve. A bigger number requires more
data.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
- `'uniform'`: The bins have identical widths.
- `'quantile'`: The bins have the same number of samples and depend
on predicted probabilities.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default `pos_label` is set to 1.
.. versionadded:: 1.1
name : str, default=None
Name for labeling curve.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`.
Object that stores computed values.
See Also
--------
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> y_prob = clf.predict_proba(X_test)[:, 1]
>>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
>>> plt.show()
"""
pos_label_validated, name = cls._validate_from_predictions_params(
y_true, y_prob, sample_weight=None, pos_label=pos_label, name=name
)
prob_true, prob_pred = calibration_curve(
y_true, y_prob, n_bins=n_bins, strategy=strategy, pos_label=pos_label
)
disp = cls(
prob_true=prob_true,
prob_pred=prob_pred,
y_prob=y_prob,
estimator_name=name,
pos_label=pos_label_validated,
)
return disp.plot(ax=ax, ref_line=ref_line, **kwargs)
|
Plot calibration curve using true labels and predicted probabilities.
Calibration curve, also known as reliability diagram, uses inputs
from a binary classifier and plots the average predicted probability
for each bin against the fraction of positive classes, on the
y-axis.
Extra keyword arguments will be passed to
:func:`matplotlib.pyplot.plot`.
Read more about calibration in the :ref:`User Guide <calibration>` and
more about the scikit-learn visualization API in :ref:`visualizations`.
.. versionadded:: 1.0
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_prob : array-like of shape (n_samples,)
The predicted probabilities of the positive class.
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval into when
calculating the calibration curve. A bigger number requires more
data.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
- `'uniform'`: The bins have identical widths.
- `'quantile'`: The bins have the same number of samples and depend
on predicted probabilities.
pos_label : int, float, bool or str, default=None
The positive class when computing the calibration curve.
By default `pos_label` is set to 1.
.. versionadded:: 1.1
name : str, default=None
Name for labeling curve.
ax : matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
ref_line : bool, default=True
If `True`, plots a reference line representing a perfectly
calibrated classifier.
**kwargs : dict
Keyword arguments to be passed to :func:`matplotlib.pyplot.plot`.
Returns
-------
display : :class:`~sklearn.calibration.CalibrationDisplay`.
Object that stores computed values.
See Also
--------
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.calibration import CalibrationDisplay
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=0)
>>> clf = LogisticRegression(random_state=0)
>>> clf.fit(X_train, y_train)
LogisticRegression(random_state=0)
>>> y_prob = clf.predict_proba(X_test)[:, 1]
>>> disp = CalibrationDisplay.from_predictions(y_test, y_prob)
>>> plt.show()
|
from_predictions
|
python
|
scikit-learn/scikit-learn
|
sklearn/calibration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/calibration.py
|
BSD-3-Clause
|
def _fetch_fixture(f):
"""Fetch dataset (download if missing and requested by environment)."""
download_if_missing = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
@wraps(f)
def wrapped(*args, **kwargs):
kwargs["download_if_missing"] = download_if_missing
try:
return f(*args, **kwargs)
except OSError as e:
if str(e) != "Data not found and `download_if_missing` is False":
raise
pytest.skip("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0")
return pytest.fixture(lambda: wrapped)
|
Fetch dataset (download if missing and requested by environment).
|
_fetch_fixture
|
python
|
scikit-learn/scikit-learn
|
sklearn/conftest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/conftest.py
|
BSD-3-Clause
|
def pytest_collection_modifyitems(config, items):
"""Called after collect is completed.
Parameters
----------
config : pytest config
items : list of collected items
"""
run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0"
skip_network = pytest.mark.skip(
reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0"
)
# download datasets during collection to avoid thread unsafe behavior
# when running pytest in parallel with pytest-xdist
dataset_features_set = set(dataset_fetchers)
datasets_to_download = set()
for item in items:
if isinstance(item, DoctestItem) and "fetch_" in item.name:
fetcher_function_name = item.name.split(".")[-1]
dataset_fetchers_key = f"{fetcher_function_name}_fxt"
dataset_to_fetch = set([dataset_fetchers_key]) & dataset_features_set
elif not hasattr(item, "fixturenames"):
continue
else:
item_fixtures = set(item.fixturenames)
dataset_to_fetch = item_fixtures & dataset_features_set
if not dataset_to_fetch:
continue
if run_network_tests:
datasets_to_download |= dataset_to_fetch
else:
# network tests are skipped
item.add_marker(skip_network)
# Only download datasets on the first worker spawned by pytest-xdist
# to avoid thread unsafe behavior. If pytest-xdist is not used, we still
# download before tests run.
worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0")
if worker_id == "gw0" and run_network_tests:
for name in datasets_to_download:
with suppress(SkipTest):
dataset_fetchers[name]()
for item in items:
# Known failure on with GradientBoostingClassifier on ARM64
if (
item.name.endswith("GradientBoostingClassifier")
and platform.machine() == "aarch64"
):
marker = pytest.mark.xfail(
reason=(
"know failure. See "
"https://github.com/scikit-learn/scikit-learn/issues/17797"
)
)
item.add_marker(marker)
skip_doctests = False
try:
import matplotlib # noqa: F401
except ImportError:
skip_doctests = True
reason = "matplotlib is required to run the doctests"
if _IS_32BIT:
reason = "doctest are only run when the default numpy int is 64 bits."
skip_doctests = True
elif sys.platform.startswith("win32"):
reason = (
"doctests are not run for Windows because numpy arrays "
"repr is inconsistent across platforms."
)
skip_doctests = True
if np_base_version < parse_version("2"):
# TODO: configure numpy to output scalar arrays as regular Python scalars
# once possible to improve readability of the tests docstrings.
# https://numpy.org/neps/nep-0051-scalar-representation.html#implementation
reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2"
skip_doctests = True
if sp_version < parse_version("1.14"):
reason = "Scipy sparse matrix repr has changed in scipy 1.14"
skip_doctests = True
# Normally doctest has the entire module's scope. Here we set globs to an empty dict
# to remove the module's scope:
# https://docs.python.org/3/library/doctest.html#what-s-the-execution-context
for item in items:
if isinstance(item, DoctestItem):
item.dtest.globs = {}
if skip_doctests:
skip_marker = pytest.mark.skip(reason=reason)
for item in items:
if isinstance(item, DoctestItem):
# work-around an internal error with pytest if adding a skip
# mark to a doctest in a contextmanager, see
# https://github.com/pytest-dev/pytest/issues/8796 for more
# details.
if item.name != "sklearn._config.config_context":
item.add_marker(skip_marker)
try:
import PIL # noqa: F401
pillow_installed = True
except ImportError:
pillow_installed = False
if not pillow_installed:
skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!")
for item in items:
if item.name in [
"sklearn.feature_extraction.image.PatchExtractor",
"sklearn.feature_extraction.image.extract_patches_2d",
]:
item.add_marker(skip_marker)
|
Called after collect is completed.
Parameters
----------
config : pytest config
items : list of collected items
|
pytest_collection_modifyitems
|
python
|
scikit-learn/scikit-learn
|
sklearn/conftest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/conftest.py
|
BSD-3-Clause
|
def pyplot():
"""Setup and teardown fixture for matplotlib.
This fixture checks if we can import matplotlib. If not, the tests will be
skipped. Otherwise, we close the figures before and after running the
functions.
Returns
-------
pyplot : module
The ``matplotlib.pyplot`` module.
"""
pyplot = pytest.importorskip("matplotlib.pyplot")
pyplot.close("all")
yield pyplot
pyplot.close("all")
|
Setup and teardown fixture for matplotlib.
This fixture checks if we can import matplotlib. If not, the tests will be
skipped. Otherwise, we close the figures before and after running the
functions.
Returns
-------
pyplot : module
The ``matplotlib.pyplot`` module.
|
pyplot
|
python
|
scikit-learn/scikit-learn
|
sklearn/conftest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/conftest.py
|
BSD-3-Clause
|
def pytest_generate_tests(metafunc):
"""Parametrization of global_random_seed fixture
based on the SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable.
The goal of this fixture is to prevent tests that use it to be sensitive
to a specific seed value while still being deterministic by default.
See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED
variable for instructions on how to use this fixture.
https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed
"""
# When using pytest-xdist this function is called in the xdist workers.
# We rely on SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable which is
# set in before running pytest and is available in xdist workers since they
# are subprocesses.
RANDOM_SEED_RANGE = list(range(100)) # All seeds in [0, 99] should be valid.
random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED")
default_random_seeds = [42]
if random_seed_var is None:
random_seeds = default_random_seeds
elif random_seed_var == "all":
random_seeds = RANDOM_SEED_RANGE
else:
if "-" in random_seed_var:
start, stop = random_seed_var.split("-")
random_seeds = list(range(int(start), int(stop) + 1))
else:
random_seeds = [int(random_seed_var)]
if min(random_seeds) < 0 or max(random_seeds) > 99:
raise ValueError(
"The value(s) of the environment variable "
"SKLEARN_TESTS_GLOBAL_RANDOM_SEED must be in the range [0, 99] "
f"(or 'all'), got: {random_seed_var}"
)
if "global_random_seed" in metafunc.fixturenames:
metafunc.parametrize("global_random_seed", random_seeds)
|
Parametrization of global_random_seed fixture
based on the SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable.
The goal of this fixture is to prevent tests that use it to be sensitive
to a specific seed value while still being deterministic by default.
See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED
variable for instructions on how to use this fixture.
https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed
|
pytest_generate_tests
|
python
|
scikit-learn/scikit-learn
|
sklearn/conftest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/conftest.py
|
BSD-3-Clause
|
def print_changed_only_false():
"""Set `print_changed_only` to False for the duration of the test."""
set_config(print_changed_only=False)
yield
set_config(print_changed_only=True) # reset to default
|
Set `print_changed_only` to False for the duration of the test.
|
print_changed_only_false
|
python
|
scikit-learn/scikit-learn
|
sklearn/conftest.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/conftest.py
|
BSD-3-Clause
|
def _cov(X, shrinkage=None, covariance_estimator=None):
"""Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator`
is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance``.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
if covariance_estimator is None:
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == "auto":
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == "empirical":
s = empirical_covariance(X)
elif isinstance(shrinkage, Real):
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
if shrinkage is not None and shrinkage != 0:
raise ValueError(
"covariance_estimator and shrinkage parameters "
"are not None. Only one of the two can be set."
)
covariance_estimator.fit(X)
if not hasattr(covariance_estimator, "covariance_"):
raise ValueError(
"%s does not have a covariance_ attribute"
% covariance_estimator.__class__.__name__
)
s = covariance_estimator.covariance_
return s
|
Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator`
is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance``.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
|
_cov
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like of shape (n_classes, n_features)
Class means.
"""
xp, is_array_api_compliant = get_namespace(X)
classes, y = xp.unique_inverse(y)
means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype)
if is_array_api_compliant:
for i in range(classes.shape[0]):
means[i, :] = xp.mean(X[y == i], axis=0)
else:
# TODO: Explore the choice of using bincount + add.at as it seems sub optimal
# from a performance-wise
cnt = np.bincount(y)
np.add.at(means, y, X)
means /= cnt[:, None]
return means
|
Compute class means.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like of shape (n_classes, n_features)
Class means.
|
_class_means
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):
"""Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
If None, the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator))
return cov
|
Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
If None, the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
|
_class_cov
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is `(n_samples,)`, giving the
log likelihood ratio of the positive class.
"""
y_scores = self._decision_function(X)
if len(self.classes_) == 2:
return y_scores[:, 1] - y_scores[:, 0]
return y_scores
|
Apply decision function to an array of samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is `(n_samples,)`, giving the
log likelihood ratio of the positive class.
|
decision_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def predict_log_proba(self, X):
"""Estimate log class probabilities.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
y_log_proba : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
scores = self._decision_function(X)
log_likelihood = scores - scores.max(axis=1)[:, np.newaxis]
return log_likelihood - np.log(
np.exp(log_likelihood).sum(axis=1)[:, np.newaxis]
)
|
Estimate log class probabilities.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
y_log_proba : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def _solve_lstsq(self, X, y, shrinkage, covariance_estimator):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with any covariance estimator),
because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` i
not None
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(
X, y, self.priors_, shrinkage, covariance_estimator
)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
self.priors_
)
|
Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with any covariance estimator),
because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` i
not None
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
|
_solve_lstsq
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def _solve_eigen(self, X, y, shrinkage, covariance_estimator):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with any covariance estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Shrinkage parameter is ignored if `covariance_estimator` i
not None
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(
X, y, self.priors_, shrinkage, covariance_estimator
)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage, covariance_estimator) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][
: self._max_components
]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(
self.priors_
)
|
Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with any covariance estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Shrinkage parameter is ignored if `covariance_estimator` i
not None
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
|
_solve_eigen
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
xp, is_array_api_compliant = get_namespace(X)
if is_array_api_compliant:
svd = xp.linalg.svd
else:
svd = scipy.linalg.svd
n_samples, n_features = X.shape
n_classes = self.classes_.shape[0]
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group]
Xc.append(Xg - self.means_[idx, :])
self.xbar_ = self.priors_ @ self.means_
Xc = xp.concat(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = xp.std(Xc, axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.0
fac = xp.asarray(1.0 / (n_samples - n_classes), dtype=X.dtype, device=device(X))
# 2) Within variance scaling
X = xp.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, Vt = svd(X, full_matrices=False)
rank = xp.sum(xp.astype(S > self.tol, xp.int32))
# Scaling of within covariance is: V' 1/S
scalings = (Vt[:rank, :] / std).T / S[:rank]
fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1)
# 3) Between variance scaling
# Scale weighted centers
X = (
(xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T
).T @ scalings
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, Vt = svd(X, full_matrices=False)
if self._max_components == 0:
self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype)
else:
self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[
: self._max_components
]
rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32))
self.scalings_ = scalings @ Vt.T[:, :rank]
coef = (self.means_ - self.xbar_) @ self.scalings_
self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_)
self.coef_ = coef @ self.scalings_.T
self.intercept_ -= self.xbar_ @ self.coef_.T
|
SVD solver.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
|
_solve_svd
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def fit(self, X, y):
"""Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
`store_covariance` and `tol` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
xp, _ = get_namespace(X)
X, y = validate_data(
self, X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32]
)
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = self.classes_.shape[0]
if n_samples == n_classes:
raise ValueError(
"The number of samples must be more than the number of classes."
)
if self.priors is None: # estimate priors from sample
_, cnts = xp.unique_counts(y) # non-negative ints
self.priors_ = xp.astype(cnts, X.dtype) / float(y.shape[0])
else:
self.priors_ = xp.asarray(self.priors, dtype=X.dtype)
if xp.any(self.priors_ < 0):
raise ValueError("priors must be non-negative")
if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5:
warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(n_classes - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == "svd":
if self.shrinkage is not None:
raise NotImplementedError("shrinkage not supported with 'svd' solver.")
if self.covariance_estimator is not None:
raise ValueError(
"covariance estimator "
"is not supported "
"with svd solver. Try another solver"
)
self._solve_svd(X, y)
elif self.solver == "lsqr":
self._solve_lstsq(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
elif self.solver == "eigen":
self._solve_eigen(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
if size(self.classes_) == 2: # treat binary case as a special case
coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype)
self.coef_ = xp.reshape(coef_, (1, -1))
intercept_ = xp.asarray(
self.intercept_[1] - self.intercept_[0], dtype=X.dtype
)
self.intercept_ = xp.reshape(intercept_, (1,))
self._n_features_out = self._max_components
return self
|
Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
`store_covariance` and `tol` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components) or \
(n_samples, min(rank, n_components))
Transformed data. In the case of the 'svd' solver, the shape
is (n_samples, min(rank, n_components)).
"""
if self.solver == "lsqr":
raise NotImplementedError(
"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."
)
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, reset=False)
if self.solver == "svd":
X_new = (X - self.xbar_) @ self.scalings_
elif self.solver == "eigen":
X_new = X @ self.scalings_
return X_new[:, : self._max_components]
|
Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components) or (n_samples, min(rank, n_components))
Transformed data. In the case of the 'svd' solver, the shape
is (n_samples, min(rank, n_components)).
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
xp, is_array_api_compliant = get_namespace(X)
decision = self.decision_function(X)
if size(self.classes_) == 2:
proba = _expit(decision, xp)
return xp.stack([1 - proba, proba], axis=1)
else:
return softmax(decision)
|
Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
xp, _ = get_namespace(X)
prediction = self.predict_proba(X)
info = xp.finfo(prediction.dtype)
if hasattr(info, "smallest_normal"):
smallest_normal = info.smallest_normal
else:
# smallest_normal was introduced in NumPy 1.22
smallest_normal = info.tiny
prediction[prediction == 0.0] += smallest_normal
return xp.log(prediction)
|
Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``.
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers).
Returns
-------
self : object
Fitted estimator.
"""
X, y = validate_data(self, X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% (n_classes)
)
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = np.array(self.priors)
cov = None
store_covariance = self.store_covariance
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError(
"y has only 1 sample in class %s, covariance is ill defined."
% str(self.classes_[ind])
)
Xgc = Xg - meang
# Xgc = U * S * V.T
_, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
S2 = (S**2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
rank = np.sum(S2 > self.tol)
if rank < n_features:
warnings.warn(
f"The covariance matrix of class {ind} is not full rank. "
"Increasing the value of parameter `reg_param` might help"
" reducing the collinearity.",
linalg.LinAlgWarning,
)
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
|
Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``.
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values (integers).
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
"""
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
return super().predict_proba(X)
|
Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/discriminant_analysis.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/discriminant_analysis.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the baseline classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns the instance itself.
"""
validate_data(self, X, skip_check_array=True)
self._strategy = self.strategy
if self._strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn(
(
"A local copy of the target data has been converted "
"to a numpy array. Predicting on sparse target data "
"with the uniform strategy would not save memory "
"and would be slower."
),
UserWarning,
)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self._strategy == "constant":
if self.constant is None:
raise ValueError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError(
"Constant target value should have shape (%d, 1)."
% self.n_outputs_
)
(self.classes_, self.n_classes_, self.class_prior_) = class_distribution(
y, sample_weight
)
if self._strategy == "constant":
for k in range(self.n_outputs_):
if not any(constant[k][0] == c for c in self.classes_[k]):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
err_msg = (
"The constant target value must be present in "
"the training data. You provided constant={}. "
"Possible values are: {}.".format(
self.constant, self.classes_[k].tolist()
)
)
raise ValueError(err_msg)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
|
Fit the baseline classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self._strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == "stratified":
class_prob = class_prior_
elif self._strategy == "uniform":
raise ValueError(
"Sparse target prediction is not "
"supported with the uniform strategy"
)
elif self._strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
else:
if self._strategy in ("most_frequent", "prior"):
y = np.tile(
[
classes_[k][class_prior_[k].argmax()]
for k in range(self.n_outputs_)
],
[n_samples, 1],
)
elif self._strategy == "stratified":
y = np.vstack(
[
classes_[k][proba[k].argmax(axis=1)]
for k in range(self.n_outputs_)
]
).T
elif self._strategy == "uniform":
ret = [
classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)
]
y = np.vstack(ret).T
elif self._strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
|
Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
return P
|
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
|
predict_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, object with finite length or shape}
Training data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
|
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, object with finite length or shape}
Training data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
|
predict_log_proba
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def score(self, X, y, sample_weight=None):
"""Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyClassifier
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
|
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyClassifier
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the baseline regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted estimator.
"""
validate_data(self, X, skip_check_array=True)
y = check_array(y, ensure_2d=False, input_name="y")
if len(y) == 0:
raise ValueError("y must not be empty.")
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile_rank=50.0)
for k in range(self.n_outputs_)
]
elif self.strategy == "quantile":
if self.quantile is None:
raise ValueError(
"When using `strategy='quantile', you have to specify the desired "
"quantile in the range [0, 1]."
)
percentile_rank = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile_rank)
else:
self.constant_ = [
_weighted_percentile(
y[:, k], sample_weight, percentile_rank=percentile_rank
)
for k in range(self.n_outputs_)
]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
self.constant_ = check_array(
self.constant,
accept_sparse=["csr", "csc", "coo"],
ensure_2d=False,
ensure_min_samples=0,
)
if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have shape (%d, 1)." % y.shape[1]
)
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
|
Fit the baseline regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def predict(self, X, return_std=False):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
y = np.full(
(n_samples, self.n_outputs_),
self.constant_,
dtype=np.array(self.constant_).dtype,
)
y_std = np.zeros((n_samples, self.n_outputs_))
if self.n_outputs_ == 1:
y = np.ravel(y)
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
|
Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the
residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the
total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best
possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A constant model that always predicts the expected
value of y, disregarding the input features, would get a R^2 score of
0.0.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since `DummyRegressor`
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
R^2 of `self.predict(X)` w.r.t. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
|
Return the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the
residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the
total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best
possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A constant model that always predicts the expected
value of y, disregarding the input features, would get a R^2 score of
0.0.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since `DummyRegressor`
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
R^2 of `self.predict(X)` w.r.t. y.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/dummy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/dummy.py
|
BSD-3-Clause
|
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
Returns
-------
increasing_bool : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
Examples
--------
>>> from sklearn.isotonic import check_increasing
>>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10]
>>> check_increasing(x, y)
np.True_
>>> y = [10, 8, 6, 4, 2]
>>> check_increasing(x, y)
np.False_
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0] and len(x) > 3:
F = 0.5 * math.log((1.0 + rho) / (1.0 - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# https://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn(
"Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect."
)
return increasing_bool
|
Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
Returns
-------
increasing_bool : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
Examples
--------
>>> from sklearn.isotonic import check_increasing
>>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10]
>>> check_increasing(x, y)
np.True_
>>> y = [10, 8, 6, 4, 2]
>>> check_increasing(x, y)
np.False_
|
check_increasing
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def isotonic_regression(
y, *, sample_weight=None, y_min=None, y_max=None, increasing=True
):
"""Solve the isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool, default=True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False).
Returns
-------
y_ : ndarray of shape (n_samples,)
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
Examples
--------
>>> from sklearn.isotonic import isotonic_regression
>>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4])
array([2.75 , 2.75 , 2.75 , 2.75 , 7.33,
7.33, 7.33, 7.33, 7.33, 7.33])
"""
y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32])
if sp_base_version >= parse_version("1.12.0"):
res = optimize.isotonic_regression(
y=y, weights=sample_weight, increasing=increasing
)
y = np.asarray(res.x, dtype=y.dtype)
else:
# TODO: remove this branch when Scipy 1.12 is the minimum supported version
# Also remove _inplace_contiguous_isotonic_regression.
order = np.s_[:] if increasing else np.s_[::-1]
y = np.array(y[order], dtype=y.dtype)
sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True)
sample_weight = np.ascontiguousarray(sample_weight[order])
_inplace_contiguous_isotonic_regression(y, sample_weight)
y = y[order]
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y
|
Solve the isotonic regression model.
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : array-like of shape (n_samples,)
The data.
sample_weight : array-like of shape (n_samples,), default=None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : float, default=None
Lower bound on the lowest predicted value (the minimum value may
still be higher). If not set, defaults to -inf.
y_max : float, default=None
Upper bound on the highest predicted value (the maximum may still be
lower). If not set, defaults to +inf.
increasing : bool, default=True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False).
Returns
-------
y_ : ndarray of shape (n_samples,)
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
Examples
--------
>>> from sklearn.isotonic import isotonic_regression
>>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4])
array([2.75 , 2.75 , 2.75 , 2.75 , 7.33,
7.33, 7.33, 7.33, 7.33, 7.33])
|
isotonic_regression
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False)
X = check_array(
X, input_name="X", dtype=[np.float64, np.float32], **check_params
)
y = check_array(y, input_name="y", dtype=X.dtype, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self.X_thresholds_, self.y_thresholds_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
|
Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def _transform(self, T):
"""`_transform` is called by both `transform` and `predict` methods.
Since `transform` is wrapped to output arrays of specific types (e.g.
NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform`
directly.
The above behaviour could be changed in the future, if we decide to output
other type of arrays when calling `predict`.
"""
if hasattr(self, "X_thresholds_"):
dtype = self.X_thresholds_.dtype
else:
dtype = np.float64
T = check_array(T, dtype=dtype, ensure_2d=False)
self._check_input_data_shape(T)
T = T.reshape(-1) # use 1d view
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
res = self.f_(T)
# on scipy 0.17, interp1d up-casts to float64, so we cast back
res = res.astype(T.dtype)
return res
|
`_transform` is called by both `transform` and `predict` methods.
Since `transform` is wrapped to output arrays of specific types (e.g.
NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform`
directly.
The above behaviour could be changed in the future, if we decide to output
other type of arrays when calling `predict`.
|
_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Ignored.
Returns
-------
feature_names_out : ndarray of str objects
An ndarray with one string i.e. ["isotonicregression0"].
"""
check_is_fitted(self, "f_")
class_name = self.__class__.__name__.lower()
return np.asarray([f"{class_name}0"], dtype=object)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Ignored.
Returns
-------
feature_names_out : ndarray of str objects
An ndarray with one string i.e. ["isotonicregression0"].
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def __getstate__(self):
"""Pickle-protocol - return state of the estimator."""
state = super().__getstate__()
# remove interpolation method
state.pop("f_", None)
return state
|
Pickle-protocol - return state of the estimator.
|
__getstate__
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
super().__setstate__(state)
if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"):
self._build_f(self.X_thresholds_, self.y_thresholds_)
|
Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
|
__setstate__
|
python
|
scikit-learn/scikit-learn
|
sklearn/isotonic.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/isotonic.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model with X.
Initializes the internal variables. The method needs no information
about the distribution of data, so we only care about n_features in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csc")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
if self.coef0 != 0:
n_features += 1
self.indexHash_ = random_state.randint(
0, high=self.n_components, size=(self.degree, n_features)
)
self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))
self._n_features_out = self.n_components
return self
|
Fit the model with X.
Initializes the internal variables. The method needs no information
about the distribution of data, so we only care about n_features in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def transform(self, X):
"""Generate the feature map approximation for X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csc", reset=False)
X_gamma = np.sqrt(self.gamma) * X
if sp.issparse(X_gamma) and self.coef0 != 0:
X_gamma = sp.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))],
format="csc",
)
elif not sp.issparse(X_gamma) and self.coef0 != 0:
X_gamma = np.hstack(
[X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))]
)
if X_gamma.shape[1] != self.indexHash_.shape[1]:
raise ValueError(
"Number of features of test samples does not"
" match that of training samples."
)
count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components))
if sp.issparse(X_gamma):
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += (
(iHashBit * X_gamma[:, [j]]).toarray().ravel()
)
else:
for j in range(X_gamma.shape[1]):
for d in range(self.degree):
iHashIndex = self.indexHash_[d, j]
iHashBit = self.bitHash_[d, j]
count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j]
# For each same, compute a count sketch of phi(x) using the polynomial
# multiplication (via FFT) of p count sketches of x.
count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True)
count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1)
data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True))
return data_sketch
|
Generate the feature map approximation for X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr")
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
sparse = sp.issparse(X)
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0
else:
self._gamma = self.gamma
self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(
size=(n_features, self.n_components)
)
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
|
Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= (2.0 / self.n_components) ** 0.5
return projection
|
Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform))
self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components)
if X.dtype == np.float32:
# Setting the data type of the fitted attribute will ensure the
# output data type during `transform`.
self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False)
self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False)
self._n_features_out = self.n_components
return self
|
Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def transform(self, X):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
check_is_fitted(self)
X = validate_data(
self, X, copy=True, dtype=[np.float64, np.float32], reset=False
)
if (X <= -self.skewedness).any():
raise ValueError("X may not contain entries smaller than -skewedness.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.0) / np.sqrt(self.n_components)
return projection
|
Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features. All values of X must be
strictly greater than "-skewedness".
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the transformer.
"""
X = validate_data(self, X, accept_sparse="csr", ensure_non_negative=True)
if self.sample_interval is None and self.sample_steps not in (1, 2, 3):
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
return self
|
Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : object
Returns the transformer.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
def transform(self, X):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : {ndarray, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps - 1))
Whether the return value is an array or sparse matrix depends on
the type of the input X.
"""
X = validate_data(
self, X, accept_sparse="csr", reset=False, ensure_non_negative=True
)
sparse = sp.issparse(X)
if self.sample_interval is None:
# See figure 2 c) of "Efficient additive kernels via explicit feature maps"
# <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>
# A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
# 2011
if self.sample_steps == 1:
sample_interval = 0.8
elif self.sample_steps == 2:
sample_interval = 0.5
elif self.sample_steps == 3:
sample_interval = 0.4
else:
raise ValueError(
"If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval"
)
else:
sample_interval = self.sample_interval
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X, self.sample_steps, sample_interval)
|
Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : {ndarray, sparse matrix}, shape = (n_samples, n_features * (2*sample_steps - 1))
Whether the return value is an array or sparse matrix depends on
the type of the input X.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/kernel_approximation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/kernel_approximation.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.