code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_dbscan_input_not_modified_precomputed_sparse_nodiag(csr_container):
"""Check that we don't modify in-place the pre-computed sparse matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27508
"""
X = np.random.RandomState(0).rand(10, 10)
# Add zeros on the diagonal that will be implicit when creating
# the sparse matrix. If `X` is modified in-place, the zeros from
# the diagonal will be made explicit.
np.fill_diagonal(X, 0)
X = csr_container(X)
assert all(row != col for row, col in zip(*X.nonzero()))
X_copy = X.copy()
dbscan(X, metric="precomputed")
# Make sure that we did not modify `X` in-place even by creating
# explicit 0s values.
assert X.nnz == X_copy.nnz
assert_array_equal(X.toarray(), X_copy.toarray())
|
Check that we don't modify in-place the pre-computed sparse matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27508
|
test_dbscan_input_not_modified_precomputed_sparse_nodiag
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_dbscan.py
|
BSD-3-Clause
|
def test_outlier_data(outlier_type):
"""
Tests if np.inf and np.nan data are each treated as special outliers.
"""
outlier = {
"infinite": np.inf,
"missing": np.nan,
}[outlier_type]
prob_check = {
"infinite": lambda x, y: x == y,
"missing": lambda x, y: np.isnan(x),
}[outlier_type]
label = _OUTLIER_ENCODING[outlier_type]["label"]
prob = _OUTLIER_ENCODING[outlier_type]["prob"]
X_outlier = X.copy()
X_outlier[0] = [outlier, 1]
X_outlier[5] = [outlier, outlier]
model = HDBSCAN().fit(X_outlier)
(missing_labels_idx,) = (model.labels_ == label).nonzero()
assert_array_equal(missing_labels_idx, [0, 5])
(missing_probs_idx,) = (prob_check(model.probabilities_, prob)).nonzero()
assert_array_equal(missing_probs_idx, [0, 5])
clean_indices = list(range(1, 5)) + list(range(6, 200))
clean_model = HDBSCAN().fit(X_outlier[clean_indices])
assert_array_equal(clean_model.labels_, model.labels_[clean_indices])
|
Tests if np.inf and np.nan data are each treated as special outliers.
|
test_outlier_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_distance_matrix():
"""
Tests that HDBSCAN works with precomputed distance matrices, and throws the
appropriate errors when needed.
"""
D = euclidean_distances(X)
D_original = D.copy()
labels = HDBSCAN(metric="precomputed", copy=True).fit_predict(D)
assert_allclose(D, D_original)
check_label_quality(labels)
msg = r"The precomputed distance matrix.*has shape"
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed", copy=True).fit_predict(X)
msg = r"The precomputed distance matrix.*values"
# Ensure the matrix is not symmetric
D[0, 1] = 10
D[1, 0] = 1
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed").fit_predict(D)
|
Tests that HDBSCAN works with precomputed distance matrices, and throws the
appropriate errors when needed.
|
test_hdbscan_distance_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_sparse_distance_matrix(sparse_constructor):
"""
Tests that HDBSCAN works with sparse distance matrices.
"""
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
threshold = stats.scoreatpercentile(D.flatten(), 50)
D[D >= threshold] = 0.0
D = sparse_constructor(D)
D.eliminate_zeros()
labels = HDBSCAN(metric="precomputed").fit_predict(D)
check_label_quality(labels)
|
Tests that HDBSCAN works with sparse distance matrices.
|
test_hdbscan_sparse_distance_matrix
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_feature_array():
"""
Tests that HDBSCAN works with feature array, including an arbitrary
goodness of fit check. Note that the check is a simple heuristic.
"""
labels = HDBSCAN().fit_predict(X)
# Check that clustering is arbitrarily good
# This is a heuristic to guard against regression
check_label_quality(labels)
|
Tests that HDBSCAN works with feature array, including an arbitrary
goodness of fit check. Note that the check is a simple heuristic.
|
test_hdbscan_feature_array
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_algorithms(algo, metric):
"""
Tests that HDBSCAN works with the expected combinations of algorithms and
metrics, or raises the expected errors.
"""
labels = HDBSCAN(algorithm=algo).fit_predict(X)
check_label_quality(labels)
# Validation for brute is handled by `pairwise_distances`
if algo in ("brute", "auto"):
return
ALGOS_TREES = {
"kd_tree": KDTree,
"ball_tree": BallTree,
}
metric_params = {
"mahalanobis": {"V": np.eye(X.shape[1])},
"seuclidean": {"V": np.ones(X.shape[1])},
"minkowski": {"p": 2},
"wminkowski": {"p": 2, "w": np.ones(X.shape[1])},
}.get(metric, None)
hdb = HDBSCAN(
algorithm=algo,
metric=metric,
metric_params=metric_params,
)
if metric not in ALGOS_TREES[algo].valid_metrics:
with pytest.raises(ValueError):
hdb.fit(X)
elif metric == "wminkowski":
with pytest.warns(FutureWarning):
hdb.fit(X)
else:
hdb.fit(X)
|
Tests that HDBSCAN works with the expected combinations of algorithms and
metrics, or raises the expected errors.
|
test_hdbscan_algorithms
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_dbscan_clustering():
"""
Tests that HDBSCAN can generate a sufficiently accurate dbscan clustering.
This test is more of a sanity check than a rigorous evaluation.
"""
clusterer = HDBSCAN().fit(X)
labels = clusterer.dbscan_clustering(0.3)
# We use a looser threshold due to dbscan producing a more constrained
# clustering representation
check_label_quality(labels, threshold=0.92)
|
Tests that HDBSCAN can generate a sufficiently accurate dbscan clustering.
This test is more of a sanity check than a rigorous evaluation.
|
test_dbscan_clustering
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_dbscan_clustering_outlier_data(cut_distance):
"""
Tests if np.inf and np.nan data are each treated as special outliers.
"""
missing_label = _OUTLIER_ENCODING["missing"]["label"]
infinite_label = _OUTLIER_ENCODING["infinite"]["label"]
X_outlier = X.copy()
X_outlier[0] = [np.inf, 1]
X_outlier[2] = [1, np.nan]
X_outlier[5] = [np.inf, np.nan]
model = HDBSCAN().fit(X_outlier)
labels = model.dbscan_clustering(cut_distance=cut_distance)
missing_labels_idx = np.flatnonzero(labels == missing_label)
assert_array_equal(missing_labels_idx, [2, 5])
infinite_labels_idx = np.flatnonzero(labels == infinite_label)
assert_array_equal(infinite_labels_idx, [0])
clean_idx = list(set(range(200)) - set(missing_labels_idx + infinite_labels_idx))
clean_model = HDBSCAN().fit(X_outlier[clean_idx])
clean_labels = clean_model.dbscan_clustering(cut_distance=cut_distance)
assert_array_equal(clean_labels, labels[clean_idx])
|
Tests if np.inf and np.nan data are each treated as special outliers.
|
test_dbscan_clustering_outlier_data
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_best_balltree_metric():
"""
Tests that HDBSCAN using `BallTree` works.
"""
labels = HDBSCAN(
metric="seuclidean", metric_params={"V": np.ones(X.shape[1])}
).fit_predict(X)
check_label_quality(labels)
|
Tests that HDBSCAN using `BallTree` works.
|
test_hdbscan_best_balltree_metric
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_min_cluster_size():
"""
Test that the smallest non-noise cluster has at least `min_cluster_size`
many points
"""
for min_cluster_size in range(2, len(X), 1):
labels = HDBSCAN(min_cluster_size=min_cluster_size).fit_predict(X)
true_labels = [label for label in labels if label != -1]
if len(true_labels) != 0:
assert np.min(np.bincount(true_labels)) >= min_cluster_size
|
Test that the smallest non-noise cluster has at least `min_cluster_size`
many points
|
test_hdbscan_min_cluster_size
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_callable_metric():
"""
Tests that HDBSCAN works when passed a callable metric.
"""
metric = distance.euclidean
labels = HDBSCAN(metric=metric).fit_predict(X)
check_label_quality(labels)
|
Tests that HDBSCAN works when passed a callable metric.
|
test_hdbscan_callable_metric
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_precomputed_non_brute(tree):
"""
Tests that HDBSCAN correctly raises an error when passing precomputed data
while requesting a tree-based algorithm.
"""
hdb = HDBSCAN(metric="precomputed", algorithm=tree)
msg = "precomputed is not a valid metric for"
with pytest.raises(ValueError, match=msg):
hdb.fit(X)
|
Tests that HDBSCAN correctly raises an error when passing precomputed data
while requesting a tree-based algorithm.
|
test_hdbscan_precomputed_non_brute
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_sparse(csr_container):
"""
Tests that HDBSCAN works correctly when passing sparse feature data.
Evaluates correctness by comparing against the same data passed as a dense
array.
"""
dense_labels = HDBSCAN().fit(X).labels_
check_label_quality(dense_labels)
_X_sparse = csr_container(X)
X_sparse = _X_sparse.copy()
sparse_labels = HDBSCAN().fit(X_sparse).labels_
assert_array_equal(dense_labels, sparse_labels)
# Compare that the sparse and dense non-precomputed routines return the same labels
# where the 0th observation contains the outlier.
for outlier_val, outlier_type in ((np.inf, "infinite"), (np.nan, "missing")):
X_dense = X.copy()
X_dense[0, 0] = outlier_val
dense_labels = HDBSCAN().fit(X_dense).labels_
check_label_quality(dense_labels)
assert dense_labels[0] == _OUTLIER_ENCODING[outlier_type]["label"]
X_sparse = _X_sparse.copy()
X_sparse[0, 0] = outlier_val
sparse_labels = HDBSCAN().fit(X_sparse).labels_
assert_array_equal(dense_labels, sparse_labels)
msg = "Sparse data matrices only support algorithm `brute`."
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="euclidean", algorithm="ball_tree").fit(X_sparse)
|
Tests that HDBSCAN works correctly when passing sparse feature data.
Evaluates correctness by comparing against the same data passed as a dense
array.
|
test_hdbscan_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_centers(algorithm):
"""
Tests that HDBSCAN centers are calculated and stored properly, and are
accurate to the data.
"""
centers = [(0.0, 0.0), (3.0, 3.0)]
H, _ = make_blobs(n_samples=2000, random_state=0, centers=centers, cluster_std=0.5)
hdb = HDBSCAN(store_centers="both").fit(H)
for center, centroid, medoid in zip(centers, hdb.centroids_, hdb.medoids_):
assert_allclose(center, centroid, rtol=1, atol=0.05)
assert_allclose(center, medoid, rtol=1, atol=0.05)
# Ensure that nothing is done for noise
hdb = HDBSCAN(
algorithm=algorithm, store_centers="both", min_cluster_size=X.shape[0]
).fit(X)
assert hdb.centroids_.shape[0] == 0
assert hdb.medoids_.shape[0] == 0
|
Tests that HDBSCAN centers are calculated and stored properly, and are
accurate to the data.
|
test_hdbscan_centers
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_allow_single_cluster_with_epsilon():
"""
Tests that HDBSCAN single-cluster selection with epsilon works correctly.
"""
rng = np.random.RandomState(0)
no_structure = rng.rand(150, 2)
# without epsilon we should see many noise points as children of root.
labels = HDBSCAN(
min_cluster_size=5,
cluster_selection_epsilon=0.0,
cluster_selection_method="eom",
allow_single_cluster=True,
).fit_predict(no_structure)
unique_labels, counts = np.unique(labels, return_counts=True)
assert len(unique_labels) == 2
# Arbitrary heuristic. Would prefer something more precise.
assert counts[unique_labels == -1] > 30
# for this random seed an epsilon of 0.18 will produce exactly 2 noise
# points at that cut in single linkage.
labels = HDBSCAN(
min_cluster_size=5,
cluster_selection_epsilon=0.18,
cluster_selection_method="eom",
allow_single_cluster=True,
algorithm="kd_tree",
).fit_predict(no_structure)
unique_labels, counts = np.unique(labels, return_counts=True)
assert len(unique_labels) == 2
assert counts[unique_labels == -1] == 2
|
Tests that HDBSCAN single-cluster selection with epsilon works correctly.
|
test_hdbscan_allow_single_cluster_with_epsilon
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_better_than_dbscan():
"""
Validate that HDBSCAN can properly cluster this difficult synthetic
dataset. Note that DBSCAN fails on this (see HDBSCAN plotting
example)
"""
centers = [[-0.85, -0.85], [-0.85, 0.85], [3, 3], [3, -3]]
X, y = make_blobs(
n_samples=750,
centers=centers,
cluster_std=[0.2, 0.35, 1.35, 1.35],
random_state=0,
)
labels = HDBSCAN().fit(X).labels_
n_clusters = len(set(labels)) - int(-1 in labels)
assert n_clusters == 4
fowlkes_mallows_score(labels, y) > 0.99
|
Validate that HDBSCAN can properly cluster this difficult synthetic
dataset. Note that DBSCAN fails on this (see HDBSCAN plotting
example)
|
test_hdbscan_better_than_dbscan
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_sparse_distances_too_few_nonzero(csr_container):
"""
Tests that HDBSCAN raises the correct error when there are too few
non-zero distances.
"""
X = csr_container(np.zeros((10, 10)))
msg = "There exists points with fewer than"
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed").fit(X)
|
Tests that HDBSCAN raises the correct error when there are too few
non-zero distances.
|
test_hdbscan_sparse_distances_too_few_nonzero
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_sparse_distances_disconnected_graph(csr_container):
"""
Tests that HDBSCAN raises the correct error when the distance matrix
has multiple connected components.
"""
# Create symmetric sparse matrix with 2 connected components
X = np.zeros((20, 20))
X[:5, :5] = 1
X[5:, 15:] = 1
X = X + X.T
X = csr_container(X)
msg = "HDBSCAN cannot be performed on a disconnected graph"
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed").fit(X)
|
Tests that HDBSCAN raises the correct error when the distance matrix
has multiple connected components.
|
test_hdbscan_sparse_distances_disconnected_graph
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_tree_invalid_metric():
"""
Tests that HDBSCAN correctly raises an error for invalid metric choices.
"""
metric_callable = lambda x: x
msg = (
".* is not a valid metric for a .*-based algorithm\\. Please select a different"
" metric\\."
)
# Callables are not supported for either
with pytest.raises(ValueError, match=msg):
HDBSCAN(algorithm="kd_tree", metric=metric_callable).fit(X)
with pytest.raises(ValueError, match=msg):
HDBSCAN(algorithm="ball_tree", metric=metric_callable).fit(X)
# The set of valid metrics for KDTree at the time of writing this test is a
# strict subset of those supported in BallTree
metrics_not_kd = list(set(BallTree.valid_metrics) - set(KDTree.valid_metrics))
if len(metrics_not_kd) > 0:
with pytest.raises(ValueError, match=msg):
HDBSCAN(algorithm="kd_tree", metric=metrics_not_kd[0]).fit(X)
|
Tests that HDBSCAN correctly raises an error for invalid metric choices.
|
test_hdbscan_tree_invalid_metric
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_too_many_min_samples():
"""
Tests that HDBSCAN correctly raises an error when setting `min_samples`
larger than the number of samples.
"""
hdb = HDBSCAN(min_samples=len(X) + 1)
msg = r"min_samples (.*) must be at most"
with pytest.raises(ValueError, match=msg):
hdb.fit(X)
|
Tests that HDBSCAN correctly raises an error when setting `min_samples`
larger than the number of samples.
|
test_hdbscan_too_many_min_samples
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_precomputed_dense_nan():
"""
Tests that HDBSCAN correctly raises an error when providing precomputed
distances with `np.nan` values.
"""
X_nan = X.copy()
X_nan[0, 0] = np.nan
msg = "np.nan values found in precomputed-dense"
hdb = HDBSCAN(metric="precomputed")
with pytest.raises(ValueError, match=msg):
hdb.fit(X_nan)
|
Tests that HDBSCAN correctly raises an error when providing precomputed
distances with `np.nan` values.
|
test_hdbscan_precomputed_dense_nan
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_labelling_distinct(global_random_seed, allow_single_cluster, epsilon):
"""
Tests that the `_do_labelling` helper function correctly assigns labels.
"""
n_samples = 48
X, y = make_blobs(
n_samples,
random_state=global_random_seed,
# Ensure the clusters are distinct with no overlap
centers=[
[0, 0],
[10, 0],
[0, 10],
],
)
est = HDBSCAN().fit(X)
condensed_tree = _condense_tree(
est._single_linkage_tree_, min_cluster_size=est.min_cluster_size
)
clusters = {n_samples + 2, n_samples + 3, n_samples + 4}
cluster_label_map = {n_samples + 2: 0, n_samples + 3: 1, n_samples + 4: 2}
labels = _do_labelling(
condensed_tree=condensed_tree,
clusters=clusters,
cluster_label_map=cluster_label_map,
allow_single_cluster=allow_single_cluster,
cluster_selection_epsilon=epsilon,
)
first_with_label = {_y: np.where(y == _y)[0][0] for _y in list(set(y))}
y_to_labels = {_y: labels[first_with_label[_y]] for _y in list(set(y))}
aligned_target = np.vectorize(y_to_labels.get)(y)
assert_array_equal(labels, aligned_target)
|
Tests that the `_do_labelling` helper function correctly assigns labels.
|
test_labelling_distinct
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_labelling_thresholding():
"""
Tests that the `_do_labelling` helper function correctly thresholds the
incoming lambda values given various `cluster_selection_epsilon` values.
"""
n_samples = 5
MAX_LAMBDA = 1.5
condensed_tree = np.array(
[
(5, 2, MAX_LAMBDA, 1),
(5, 1, 0.1, 1),
(5, 0, MAX_LAMBDA, 1),
(5, 3, 0.2, 1),
(5, 4, 0.3, 1),
],
dtype=CONDENSED_dtype,
)
labels = _do_labelling(
condensed_tree=condensed_tree,
clusters={n_samples},
cluster_label_map={n_samples: 0, n_samples + 1: 1},
allow_single_cluster=True,
cluster_selection_epsilon=1,
)
num_noise = condensed_tree["value"] < 1
assert sum(num_noise) == sum(labels == -1)
labels = _do_labelling(
condensed_tree=condensed_tree,
clusters={n_samples},
cluster_label_map={n_samples: 0, n_samples + 1: 1},
allow_single_cluster=True,
cluster_selection_epsilon=0,
)
# The threshold should be calculated per-sample based on the largest
# lambda of any simbling node. In this case, all points are siblings
# and the largest value is exactly MAX_LAMBDA.
num_noise = condensed_tree["value"] < MAX_LAMBDA
assert sum(num_noise) == sum(labels == -1)
|
Tests that the `_do_labelling` helper function correctly thresholds the
incoming lambda values given various `cluster_selection_epsilon` values.
|
test_labelling_thresholding
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_error_precomputed_and_store_centers(store_centers):
"""Check that we raise an error if the centers are requested together with
a precomputed input matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27893
"""
rng = np.random.RandomState(0)
X = rng.random((100, 2))
X_dist = euclidean_distances(X)
err_msg = "Cannot store centers when using a precomputed distance matrix."
with pytest.raises(ValueError, match=err_msg):
HDBSCAN(metric="precomputed", store_centers=store_centers).fit(X_dist)
|
Check that we raise an error if the centers are requested together with
a precomputed input matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27893
|
test_hdbscan_error_precomputed_and_store_centers
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_hdbscan_cosine_metric_invalid_algorithm(invalid_algo):
"""Test that HDBSCAN raises an informative error is raised when an unsupported
algorithm is used with the "cosine" metric.
"""
hdbscan = HDBSCAN(metric="cosine", algorithm=invalid_algo)
with pytest.raises(ValueError, match="cosine is not a valid metric"):
hdbscan.fit_predict(X)
|
Test that HDBSCAN raises an informative error is raised when an unsupported
algorithm is used with the "cosine" metric.
|
test_hdbscan_cosine_metric_invalid_algorithm
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hdbscan.py
|
BSD-3-Clause
|
def test_agglomerative_clustering_memory_mapped():
"""AgglomerativeClustering must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(0)
Xmm = create_memmap_backed_data(rng.randn(50, 100))
AgglomerativeClustering(metric="euclidean", linkage="single").fit(Xmm)
|
AgglomerativeClustering must work on mem-mapped dataset.
Non-regression test for issue #19875.
|
test_agglomerative_clustering_memory_mapped
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hierarchical.py
|
BSD-3-Clause
|
def test_mst_linkage_core_memory_mapped(metric_param_grid):
"""The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(seed=1)
X = rng.normal(size=(20, 4))
Xmm = create_memmap_backed_data(X)
metric, param_grid = metric_param_grid
keys = param_grid.keys()
for vals in itertools.product(*param_grid.values()):
kwargs = dict(zip(keys, vals))
distance_metric = DistanceMetric.get_metric(metric, **kwargs)
mst = mst_linkage_core(X, distance_metric)
mst_mm = mst_linkage_core(Xmm, distance_metric)
np.testing.assert_equal(mst, mst_mm)
|
The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset.
Non-regression test for issue #19875.
|
test_mst_linkage_core_memory_mapped
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hierarchical.py
|
BSD-3-Clause
|
def test_precomputed_connectivity_metric_with_2_connected_components():
"""Check that connecting components works when connectivity and
affinity are both precomputed and the number of connected components is
greater than 1. Non-regression test for #16151.
"""
connectivity_matrix = np.array(
[
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
]
)
# ensure that connectivity_matrix has two connected components
assert connected_components(connectivity_matrix)[0] == 2
rng = np.random.RandomState(0)
X = rng.randn(5, 10)
X_dist = pairwise_distances(X)
clusterer_precomputed = AgglomerativeClustering(
metric="precomputed", connectivity=connectivity_matrix, linkage="complete"
)
msg = "Completing it to avoid stopping the tree early"
with pytest.warns(UserWarning, match=msg):
clusterer_precomputed.fit(X_dist)
clusterer = AgglomerativeClustering(
connectivity=connectivity_matrix, linkage="complete"
)
with pytest.warns(UserWarning, match=msg):
clusterer.fit(X)
assert_array_equal(clusterer.labels_, clusterer_precomputed.labels_)
assert_array_equal(clusterer.children_, clusterer_precomputed.children_)
|
Check that connecting components works when connectivity and
affinity are both precomputed and the number of connected components is
greater than 1. Non-regression test for #16151.
|
test_precomputed_connectivity_metric_with_2_connected_components
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_hierarchical.py
|
BSD-3-Clause
|
def test_kmeans_init_auto_with_initial_centroids(Estimator, init, expected_n_init):
"""Check that `n_init="auto"` chooses the right number of initializations.
Non-regression test for #26657:
https://github.com/scikit-learn/scikit-learn/pull/26657
"""
n_sample, n_features, n_clusters = 100, 10, 5
X = np.random.randn(n_sample, n_features)
if init == "array-like":
init = np.random.randn(n_clusters, n_features)
if expected_n_init == "default":
expected_n_init = 3 if Estimator is MiniBatchKMeans else 10
kmeans = Estimator(n_clusters=n_clusters, init=init, n_init="auto").fit(X)
assert kmeans._n_init == expected_n_init
|
Check that `n_init="auto"` chooses the right number of initializations.
Non-regression test for #26657:
https://github.com/scikit-learn/scikit-learn/pull/26657
|
test_kmeans_init_auto_with_initial_centroids
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_k_means.py
|
BSD-3-Clause
|
def test_kmeans_with_array_like_or_np_scalar_init(kwargs):
"""Check that init works with numpy scalar strings.
Non-regression test for #21964.
"""
X = np.asarray([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=np.float64)
clustering = KMeans(n_clusters=2, **kwargs)
# Does not raise
clustering.fit(X)
|
Check that init works with numpy scalar strings.
Non-regression test for #21964.
|
test_kmeans_with_array_like_or_np_scalar_init
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_k_means.py
|
BSD-3-Clause
|
def test_predict_does_not_change_cluster_centers(csr_container):
"""Check that predict does not change cluster centers.
Non-regression test for gh-24253.
"""
X, _ = make_blobs(n_samples=200, n_features=10, centers=10, random_state=0)
if csr_container is not None:
X = csr_container(X)
kmeans = KMeans()
y_pred1 = kmeans.fit_predict(X)
# Make cluster_centers readonly
kmeans.cluster_centers_ = create_memmap_backed_data(kmeans.cluster_centers_)
kmeans.labels_ = create_memmap_backed_data(kmeans.labels_)
y_pred2 = kmeans.predict(X)
assert_array_equal(y_pred1, y_pred2)
|
Check that predict does not change cluster centers.
Non-regression test for gh-24253.
|
test_predict_does_not_change_cluster_centers
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_k_means.py
|
BSD-3-Clause
|
def test_sample_weight_init(init, global_random_seed):
"""Check that sample weight is used during init.
`_init_centroids` is shared across all classes inheriting from _BaseKMeans so
it's enough to check for KMeans.
"""
rng = np.random.RandomState(global_random_seed)
X, _ = make_blobs(
n_samples=200, n_features=10, centers=10, random_state=global_random_seed
)
x_squared_norms = row_norms(X, squared=True)
kmeans = KMeans()
clusters_weighted = kmeans._init_centroids(
X=X,
x_squared_norms=x_squared_norms,
init=init,
sample_weight=rng.uniform(size=X.shape[0]),
n_centroids=5,
random_state=np.random.RandomState(global_random_seed),
)
clusters = kmeans._init_centroids(
X=X,
x_squared_norms=x_squared_norms,
init=init,
sample_weight=np.ones(X.shape[0]),
n_centroids=5,
random_state=np.random.RandomState(global_random_seed),
)
with pytest.raises(AssertionError):
assert_allclose(clusters_weighted, clusters)
|
Check that sample weight is used during init.
`_init_centroids` is shared across all classes inheriting from _BaseKMeans so
it's enough to check for KMeans.
|
test_sample_weight_init
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_k_means.py
|
BSD-3-Clause
|
def test_sample_weight_zero(init, global_random_seed):
"""Check that if sample weight is 0, this sample won't be chosen.
`_init_centroids` is shared across all classes inheriting from _BaseKMeans so
it's enough to check for KMeans.
"""
rng = np.random.RandomState(global_random_seed)
X, _ = make_blobs(
n_samples=100, n_features=5, centers=5, random_state=global_random_seed
)
sample_weight = rng.uniform(size=X.shape[0])
sample_weight[::2] = 0
x_squared_norms = row_norms(X, squared=True)
kmeans = KMeans()
clusters_weighted = kmeans._init_centroids(
X=X,
x_squared_norms=x_squared_norms,
init=init,
sample_weight=sample_weight,
n_centroids=10,
random_state=np.random.RandomState(global_random_seed),
)
# No center should be one of the 0 sample weight point
# (i.e. be at a distance=0 from it)
d = euclidean_distances(X[::2], clusters_weighted)
assert not np.any(np.isclose(d, 0))
|
Check that if sample weight is 0, this sample won't be chosen.
`_init_centroids` is shared across all classes inheriting from _BaseKMeans so
it's enough to check for KMeans.
|
test_sample_weight_zero
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_k_means.py
|
BSD-3-Clause
|
def test_relocating_with_duplicates(algorithm, array_constr):
"""Check that kmeans stops when there are more centers than non-duplicate samples
Non-regression test for issue:
https://github.com/scikit-learn/scikit-learn/issues/28055
"""
X = np.array([[0, 0], [1, 1], [1, 1], [1, 0], [0, 1]])
km = KMeans(n_clusters=5, init=X, algorithm=algorithm)
msg = r"Number of distinct clusters \(4\) found smaller than n_clusters \(5\)"
with pytest.warns(ConvergenceWarning, match=msg):
km.fit(array_constr(X))
assert km.n_iter_ == 1
|
Check that kmeans stops when there are more centers than non-duplicate samples
Non-regression test for issue:
https://github.com/scikit-learn/scikit-learn/issues/28055
|
test_relocating_with_duplicates
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_k_means.py
|
BSD-3-Clause
|
def test_optics_input_not_modified_precomputed_sparse_nodiag(
csr_container, global_random_seed
):
"""Check that we don't modify in-place the pre-computed sparse matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27508
"""
X = np.random.RandomState(global_random_seed).rand(6, 6)
# Add zeros on the diagonal that will be implicit when creating
# the sparse matrix. If `X` is modified in-place, the zeros from
# the diagonal will be made explicit.
np.fill_diagonal(X, 0)
X = csr_container(X)
assert all(row != col for row, col in zip(*X.nonzero()))
X_copy = X.copy()
OPTICS(metric="precomputed").fit(X)
# Make sure that we did not modify `X` in-place even by creating
# explicit 0s values.
assert X.nnz == X_copy.nnz
assert_array_equal(X.toarray(), X_copy.toarray())
|
Check that we don't modify in-place the pre-computed sparse matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27508
|
test_optics_input_not_modified_precomputed_sparse_nodiag
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_optics.py
|
BSD-3-Clause
|
def test_optics_predecessor_correction_ordering():
"""Check that cluster correction using predecessor is working as expected.
In the following example, the predecessor correction was not working properly
since it was not using the right indices.
This non-regression test check that reordering the data does not change the results.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26324
"""
X_1 = np.array([1, 2, 3, 1, 8, 8, 7, 100]).reshape(-1, 1)
reorder = [0, 1, 2, 4, 5, 6, 7, 3]
X_2 = X_1[reorder]
optics_1 = OPTICS(min_samples=3, metric="euclidean").fit(X_1)
optics_2 = OPTICS(min_samples=3, metric="euclidean").fit(X_2)
assert_array_equal(optics_1.labels_[reorder], optics_2.labels_)
|
Check that cluster correction using predecessor is working as expected.
In the following example, the predecessor correction was not working properly
since it was not using the right indices.
This non-regression test check that reordering the data does not change the results.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26324
|
test_optics_predecessor_correction_ordering
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_optics.py
|
BSD-3-Clause
|
def test_spectral_clustering_np_matrix_raises():
"""Check that spectral_clustering raises an informative error when passed
a np.matrix. See #10993"""
X = np.matrix([[0.0, 2.0], [2.0, 0.0]])
msg = r"np\.matrix is not supported. Please convert to a numpy array"
with pytest.raises(TypeError, match=msg):
spectral_clustering(X)
|
Check that spectral_clustering raises an informative error when passed
a np.matrix. See #10993
|
test_spectral_clustering_np_matrix_raises
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_spectral.py
|
BSD-3-Clause
|
def test_spectral_clustering_not_infinite_loop(capsys, monkeypatch):
"""Check that discretize raises LinAlgError when svd never converges.
Non-regression test for #21380
"""
def new_svd(*args, **kwargs):
raise LinAlgError()
monkeypatch.setattr(np.linalg, "svd", new_svd)
vectors = np.ones((10, 4))
with pytest.raises(LinAlgError, match="SVD did not converge"):
discretize(vectors)
|
Check that discretize raises LinAlgError when svd never converges.
Non-regression test for #21380
|
test_spectral_clustering_not_infinite_loop
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_spectral.py
|
BSD-3-Clause
|
def _brute_mst(mutual_reachability, min_samples):
"""
Builds a minimum spanning tree (MST) from the provided mutual-reachability
values. This function dispatches to a custom Cython implementation for
dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse
arrays/matrices.
Parameters
----------
mututal_reachability_graph: {ndarray, sparse matrix} of shape \
(n_samples, n_samples)
Weighted adjacency matrix of the mutual reachability graph.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
Returns
-------
mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
The MST representation of the mutual-reachability graph. The MST is
represented as a collection of edges.
"""
if not issparse(mutual_reachability):
return mst_from_mutual_reachability(mutual_reachability)
# Check if the mutual reachability matrix has any rows which have
# less than `min_samples` non-zero elements.
indptr = mutual_reachability.indptr
num_points = mutual_reachability.shape[0]
if any((indptr[i + 1] - indptr[i]) < min_samples for i in range(num_points)):
raise ValueError(
f"There exists points with fewer than {min_samples} neighbors. Ensure"
" your distance matrix has non-zero values for at least"
f" `min_sample`={min_samples} neighbors for each points (i.e. K-nn"
" graph), or specify a `max_distance` in `metric_params` to use when"
" distances are missing."
)
# Check connected component on mutual reachability.
# If more than one connected component is present,
# it means that the graph is disconnected.
n_components = csgraph.connected_components(
mutual_reachability, directed=False, return_labels=False
)
if n_components > 1:
raise ValueError(
f"Sparse mutual reachability matrix has {n_components} connected"
" components. HDBSCAN cannot be performed on a disconnected graph. Ensure"
" that the sparse distance matrix has only one connected component."
)
# Compute the minimum spanning tree for the sparse graph
sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability)
rows, cols = sparse_min_spanning_tree.nonzero()
mst = np.rec.fromarrays(
[rows, cols, sparse_min_spanning_tree.data],
dtype=MST_edge_dtype,
)
return mst
|
Builds a minimum spanning tree (MST) from the provided mutual-reachability
values. This function dispatches to a custom Cython implementation for
dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse
arrays/matrices.
Parameters
----------
mututal_reachability_graph: {ndarray, sparse matrix} of shape (n_samples, n_samples)
Weighted adjacency matrix of the mutual reachability graph.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
Returns
-------
mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
The MST representation of the mutual-reachability graph. The MST is
represented as a collection of edges.
|
_brute_mst
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def _process_mst(min_spanning_tree):
"""
Builds a single-linkage tree (SLT) from the provided minimum spanning tree
(MST). The MST is first sorted then processed by a custom Cython routine.
Parameters
----------
min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
The MST representation of the mutual-reachability graph. The MST is
represented as a collection of edges.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
"""
# Sort edges of the min_spanning_tree by weight
row_order = np.argsort(min_spanning_tree["distance"])
min_spanning_tree = min_spanning_tree[row_order]
# Convert edge list into standard hierarchical clustering format
return make_single_linkage(min_spanning_tree)
|
Builds a single-linkage tree (SLT) from the provided minimum spanning tree
(MST). The MST is first sorted then processed by a custom Cython routine.
Parameters
----------
min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
The MST representation of the mutual-reachability graph. The MST is
represented as a collection of edges.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
|
_process_mst
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def _hdbscan_brute(
X,
min_samples=5,
alpha=None,
metric="euclidean",
n_jobs=None,
copy=False,
**metric_params,
):
"""
Builds a single-linkage tree (SLT) from the input data `X`. If
`metric="precomputed"` then `X` must be a symmetric array of distances.
Otherwise, the pairwise distances are calculated directly and passed to
`mutual_reachability_graph`.
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
Either the raw data from which to compute the pairwise distances,
or the precomputed distances.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array.
- If metric is a string or callable, it must be one of
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
for its metric parameter.
- If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
n_jobs : int, default=None
The number of jobs to use for computing the pairwise distances. This
works by breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel. This parameter is passed directly to
:func:`~sklearn.metrics.pairwise_distances`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite `X`, a copy will first be made, guaranteeing that
the original data will be unchanged. Currently, it only applies when
`metric="precomputed"`, when passing a dense array or a CSR sparse
array/matrix.
metric_params : dict, default=None
Arguments passed to the distance metric.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
"""
if metric == "precomputed":
if X.shape[0] != X.shape[1]:
raise ValueError(
"The precomputed distance matrix is expected to be symmetric, however"
f" it has shape {X.shape}. Please verify that the"
" distance matrix was constructed correctly."
)
if not _allclose_dense_sparse(X, X.T):
raise ValueError(
"The precomputed distance matrix is expected to be symmetric, however"
" its values appear to be asymmetric. Please verify that the distance"
" matrix was constructed correctly."
)
distance_matrix = X.copy() if copy else X
else:
distance_matrix = pairwise_distances(
X, metric=metric, n_jobs=n_jobs, **metric_params
)
distance_matrix /= alpha
max_distance = metric_params.get("max_distance", 0.0)
if issparse(distance_matrix) and distance_matrix.format != "csr":
# we need CSR format to avoid a conversion in `_brute_mst` when calling
# `csgraph.connected_components`
distance_matrix = distance_matrix.tocsr()
# Note that `distance_matrix` is manipulated in-place, however we do not
# need it for anything else past this point, hence the operation is safe.
mutual_reachability_ = mutual_reachability_graph(
distance_matrix, min_samples=min_samples, max_distance=max_distance
)
min_spanning_tree = _brute_mst(mutual_reachability_, min_samples=min_samples)
# Warn if the MST couldn't be constructed around the missing distances
if np.isinf(min_spanning_tree["distance"]).any():
warn(
(
"The minimum spanning tree contains edge weights with value "
"infinity. Potentially, you are missing too many distances "
"in the initial distance matrix for the given neighborhood "
"size."
),
UserWarning,
)
return _process_mst(min_spanning_tree)
|
Builds a single-linkage tree (SLT) from the input data `X`. If
`metric="precomputed"` then `X` must be a symmetric array of distances.
Otherwise, the pairwise distances are calculated directly and passed to
`mutual_reachability_graph`.
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
Either the raw data from which to compute the pairwise distances,
or the precomputed distances.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array.
- If metric is a string or callable, it must be one of
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
for its metric parameter.
- If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
n_jobs : int, default=None
The number of jobs to use for computing the pairwise distances. This
works by breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel. This parameter is passed directly to
:func:`~sklearn.metrics.pairwise_distances`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite `X`, a copy will first be made, guaranteeing that
the original data will be unchanged. Currently, it only applies when
`metric="precomputed"`, when passing a dense array or a CSR sparse
array/matrix.
metric_params : dict, default=None
Arguments passed to the distance metric.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
|
_hdbscan_brute
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def _hdbscan_prims(
X,
algo,
min_samples=5,
alpha=1.0,
metric="euclidean",
leaf_size=40,
n_jobs=None,
**metric_params,
):
"""
Builds a single-linkage tree (SLT) from the input data `X`. If
`metric="precomputed"` then `X` must be a symmetric array of distances.
Otherwise, the pairwise distances are calculated directly and passed to
`mutual_reachability_graph`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The raw data.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. `metric` must be one of the options allowed by
:func:`~sklearn.metrics.pairwise_distances` for its metric
parameter.
n_jobs : int, default=None
The number of jobs to use for computing the pairwise distances. This
works by breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel. This parameter is passed directly to
:func:`~sklearn.metrics.pairwise_distances`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite `X`, a copy will first be made, guaranteeing that
the original data will be unchanged. Currently, it only applies when
`metric="precomputed"`, when passing a dense array or a CSR sparse
array/matrix.
metric_params : dict, default=None
Arguments passed to the distance metric.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
"""
# The Cython routines used require contiguous arrays
X = np.asarray(X, order="C")
# Get distance to kth nearest neighbour
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algo,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
n_jobs=n_jobs,
p=None,
).fit(X)
neighbors_distances, _ = nbrs.kneighbors(X, min_samples, return_distance=True)
core_distances = np.ascontiguousarray(neighbors_distances[:, -1])
dist_metric = DistanceMetric.get_metric(metric, **metric_params)
# Mutual reachability distance is implicit in mst_from_data_matrix
min_spanning_tree = mst_from_data_matrix(X, core_distances, dist_metric, alpha)
return _process_mst(min_spanning_tree)
|
Builds a single-linkage tree (SLT) from the input data `X`. If
`metric="precomputed"` then `X` must be a symmetric array of distances.
Otherwise, the pairwise distances are calculated directly and passed to
`mutual_reachability_graph`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The raw data.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. `metric` must be one of the options allowed by
:func:`~sklearn.metrics.pairwise_distances` for its metric
parameter.
n_jobs : int, default=None
The number of jobs to use for computing the pairwise distances. This
works by breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel. This parameter is passed directly to
:func:`~sklearn.metrics.pairwise_distances`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite `X`, a copy will first be made, guaranteeing that
the original data will be unchanged. Currently, it only applies when
`metric="precomputed"`, when passing a dense array or a CSR sparse
array/matrix.
metric_params : dict, default=None
Arguments passed to the distance metric.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
|
_hdbscan_prims
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def remap_single_linkage_tree(tree, internal_to_raw, non_finite):
"""
Takes an internal single_linkage_tree structure and adds back in a set of points
that were initially detected as non-finite and returns that new tree.
These points will all be merged into the final node at np.inf distance and
considered noise points.
Parameters
----------
tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
internal_to_raw: dict
A mapping from internal integer index to the raw integer index
non_finite : ndarray
Boolean array of which entries in the raw data are non-finite
"""
finite_count = len(internal_to_raw)
outlier_count = len(non_finite)
for i, _ in enumerate(tree):
left = tree[i]["left_node"]
right = tree[i]["right_node"]
if left < finite_count:
tree[i]["left_node"] = internal_to_raw[left]
else:
tree[i]["left_node"] = left + outlier_count
if right < finite_count:
tree[i]["right_node"] = internal_to_raw[right]
else:
tree[i]["right_node"] = right + outlier_count
outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype)
last_cluster_id = max(
tree[tree.shape[0] - 1]["left_node"], tree[tree.shape[0] - 1]["right_node"]
)
last_cluster_size = tree[tree.shape[0] - 1]["cluster_size"]
for i, outlier in enumerate(non_finite):
outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1)
last_cluster_id += 1
last_cluster_size += 1
tree = np.concatenate([tree, outlier_tree])
return tree
|
Takes an internal single_linkage_tree structure and adds back in a set of points
that were initially detected as non-finite and returns that new tree.
These points will all be merged into the final node at np.inf distance and
considered noise points.
Parameters
----------
tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
internal_to_raw: dict
A mapping from internal integer index to the raw integer index
non_finite : ndarray
Boolean array of which entries in the raw data are non-finite
|
remap_single_linkage_tree
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def _get_finite_row_indices(matrix):
"""
Returns the indices of the purely finite rows of a
sparse matrix or dense ndarray
"""
if issparse(matrix):
row_indices = np.array(
[i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))]
)
else:
(row_indices,) = np.isfinite(matrix.sum(axis=1)).nonzero()
return row_indices
|
Returns the indices of the purely finite rows of a
sparse matrix or dense ndarray
|
_get_finite_row_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Find clusters based on hierarchical density-based clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
ndarray of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
`metric='precomputed'`.
y : None
Ignored.
Returns
-------
self : object
Returns self.
"""
if self.metric == "precomputed" and self.store_centers is not None:
raise ValueError(
"Cannot store centers when using a precomputed distance matrix."
)
self._metric_params = self.metric_params or {}
if self.metric != "precomputed":
# Non-precomputed matrices may contain non-finite values.
X = validate_data(
self,
X,
accept_sparse=["csr", "lil"],
ensure_all_finite=False,
dtype=np.float64,
)
self._raw_data = X
all_finite = True
try:
_assert_all_finite(X.data if issparse(X) else X)
except ValueError:
all_finite = False
if not all_finite:
# Pass only the purely finite indices into hdbscan
# We will later assign all non-finite points their
# corresponding labels, as specified in `_OUTLIER_ENCODING`
# Reduce X to make the checks for missing/outlier samples more
# convenient.
reduced_X = X.sum(axis=1)
# Samples with missing data are denoted by the presence of
# `np.nan`
missing_index = np.isnan(reduced_X).nonzero()[0]
# Outlier samples are denoted by the presence of `np.inf`
infinite_index = np.isinf(reduced_X).nonzero()[0]
# Continue with only finite samples
finite_index = _get_finite_row_indices(X)
internal_to_raw = {x: y for x, y in enumerate(finite_index)}
X = X[finite_index]
elif issparse(X):
# Handle sparse precomputed distance matrices separately
X = validate_data(
self,
X,
accept_sparse=["csr", "lil"],
dtype=np.float64,
force_writeable=True,
)
else:
# Only non-sparse, precomputed distance matrices are handled here
# and thereby allowed to contain numpy.inf for missing distances
# Perform data validation after removing infinite values (numpy.inf)
# from the given distance matrix.
X = validate_data(
self, X, ensure_all_finite=False, dtype=np.float64, force_writeable=True
)
if np.isnan(X).any():
# TODO: Support np.nan in Cython implementation for precomputed
# dense HDBSCAN
raise ValueError("np.nan values found in precomputed-dense")
if X.shape[0] == 1:
raise ValueError("n_samples=1 while HDBSCAN requires more than one sample")
self._min_samples = (
self.min_cluster_size if self.min_samples is None else self.min_samples
)
if self._min_samples > X.shape[0]:
raise ValueError(
f"min_samples ({self._min_samples}) must be at most the number of"
f" samples in X ({X.shape[0]})"
)
mst_func = None
kwargs = dict(
X=X,
min_samples=self._min_samples,
alpha=self.alpha,
metric=self.metric,
n_jobs=self.n_jobs,
**self._metric_params,
)
if self.algorithm == "kd_tree" and self.metric not in KDTree.valid_metrics:
raise ValueError(
f"{self.metric} is not a valid metric for a KDTree-based algorithm."
" Please select a different metric."
)
elif (
self.algorithm == "ball_tree" and self.metric not in BallTree.valid_metrics
):
raise ValueError(
f"{self.metric} is not a valid metric for a BallTree-based algorithm."
" Please select a different metric."
)
if self.algorithm != "auto":
if (
self.metric != "precomputed"
and issparse(X)
and self.algorithm != "brute"
):
raise ValueError("Sparse data matrices only support algorithm `brute`.")
if self.algorithm == "brute":
mst_func = _hdbscan_brute
kwargs["copy"] = self.copy
elif self.algorithm == "kd_tree":
mst_func = _hdbscan_prims
kwargs["algo"] = "kd_tree"
kwargs["leaf_size"] = self.leaf_size
else:
mst_func = _hdbscan_prims
kwargs["algo"] = "ball_tree"
kwargs["leaf_size"] = self.leaf_size
else:
if issparse(X) or self.metric not in FAST_METRICS:
# We can't do much with sparse matrices ...
mst_func = _hdbscan_brute
kwargs["copy"] = self.copy
elif self.metric in KDTree.valid_metrics:
# TODO: Benchmark KD vs Ball Tree efficiency
mst_func = _hdbscan_prims
kwargs["algo"] = "kd_tree"
kwargs["leaf_size"] = self.leaf_size
else:
# Metric is a valid BallTree metric
mst_func = _hdbscan_prims
kwargs["algo"] = "ball_tree"
kwargs["leaf_size"] = self.leaf_size
self._single_linkage_tree_ = mst_func(**kwargs)
self.labels_, self.probabilities_ = tree_to_labels(
self._single_linkage_tree_,
self.min_cluster_size,
self.cluster_selection_method,
self.allow_single_cluster,
self.cluster_selection_epsilon,
self.max_cluster_size,
)
if self.metric != "precomputed" and not all_finite:
# Remap indices to align with original data in the case of
# non-finite entries. Samples with np.inf are mapped to -1 and
# those with np.nan are mapped to -2.
self._single_linkage_tree_ = remap_single_linkage_tree(
self._single_linkage_tree_,
internal_to_raw,
# There may be overlap for points w/ both `np.inf` and `np.nan`
non_finite=set(np.hstack([infinite_index, missing_index])),
)
new_labels = np.empty(self._raw_data.shape[0], dtype=np.int32)
new_labels[finite_index] = self.labels_
new_labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
new_labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
self.labels_ = new_labels
new_probabilities = np.zeros(self._raw_data.shape[0], dtype=np.float64)
new_probabilities[finite_index] = self.probabilities_
# Infinite outliers have probability 0 by convention, though this
# is arbitrary.
new_probabilities[infinite_index] = _OUTLIER_ENCODING["infinite"]["prob"]
new_probabilities[missing_index] = _OUTLIER_ENCODING["missing"]["prob"]
self.probabilities_ = new_probabilities
if self.store_centers:
self._weighted_cluster_center(X)
return self
|
Find clusters based on hierarchical density-based clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or ndarray of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
`metric='precomputed'`.
y : None
Ignored.
Returns
-------
self : object
Returns self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def _weighted_cluster_center(self, X):
"""Calculate and store the centroids/medoids of each cluster.
This requires `X` to be a raw feature array, not precomputed
distances. Rather than return outputs directly, this helper method
instead stores them in the `self.{centroids, medoids}_` attributes.
The choice for which attributes are calculated and stored is mediated
by the value of `self.store_centers`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The feature array that the estimator was fit with.
"""
# Number of non-noise clusters
n_clusters = len(set(self.labels_) - {-1, -2})
mask = np.empty((X.shape[0],), dtype=np.bool_)
make_centroids = self.store_centers in ("centroid", "both")
make_medoids = self.store_centers in ("medoid", "both")
if make_centroids:
self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
if make_medoids:
self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)
# Need to handle iteratively seen each cluster may have a different
# number of samples, hence we can't create a homogeneous 3D array.
for idx in range(n_clusters):
mask = self.labels_ == idx
data = X[mask]
strength = self.probabilities_[mask]
if make_centroids:
self.centroids_[idx] = np.average(data, weights=strength, axis=0)
if make_medoids:
# TODO: Implement weighted argmin PWD backend
dist_mat = pairwise_distances(
data, metric=self.metric, **self._metric_params
)
dist_mat = dist_mat * strength
medoid_index = np.argmin(dist_mat.sum(axis=1))
self.medoids_[idx] = data[medoid_index]
return
|
Calculate and store the centroids/medoids of each cluster.
This requires `X` to be a raw feature array, not precomputed
distances. Rather than return outputs directly, this helper method
instead stores them in the `self.{centroids, medoids}_` attributes.
The choice for which attributes are calculated and stored is mediated
by the value of `self.store_centers`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The feature array that the estimator was fit with.
|
_weighted_cluster_center
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def dbscan_clustering(self, cut_distance, min_cluster_size=5):
"""Return clustering given by DBSCAN without border points.
Return clustering that would be equivalent to running DBSCAN* for a
particular cut_distance (or epsilon) DBSCAN* can be thought of as
DBSCAN without the border points. As such these results may differ
slightly from `cluster.DBSCAN` due to the difference in implementation
over the non-core points.
This can also be thought of as a flat clustering derived from constant
height cut through the single linkage tree.
This represents the result of selecting a cut value for robust single linkage
clustering. The `min_cluster_size` allows the flat clustering to declare noise
points (and cluster smaller than `min_cluster_size`).
Parameters
----------
cut_distance : float
The mutual reachability distance cut value to use to generate a
flat clustering.
min_cluster_size : int, default=5
Clusters smaller than this value with be called 'noise' and remain
unclustered in the resulting flat clustering.
Returns
-------
labels : ndarray of shape (n_samples,)
An array of cluster labels, one per datapoint.
Outliers are labeled as follows:
- Noisy samples are given the label -1.
- Samples with infinite elements (+/- np.inf) are given the label -2.
- Samples with missing data are given the label -3, even if they
also have infinite elements.
"""
labels = labelling_at_cut(
self._single_linkage_tree_, cut_distance, min_cluster_size
)
# Infer indices from labels generated during `fit`
infinite_index = self.labels_ == _OUTLIER_ENCODING["infinite"]["label"]
missing_index = self.labels_ == _OUTLIER_ENCODING["missing"]["label"]
# Overwrite infinite/missing outlier samples (otherwise simple noise)
labels[infinite_index] = _OUTLIER_ENCODING["infinite"]["label"]
labels[missing_index] = _OUTLIER_ENCODING["missing"]["label"]
return labels
|
Return clustering given by DBSCAN without border points.
Return clustering that would be equivalent to running DBSCAN* for a
particular cut_distance (or epsilon) DBSCAN* can be thought of as
DBSCAN without the border points. As such these results may differ
slightly from `cluster.DBSCAN` due to the difference in implementation
over the non-core points.
This can also be thought of as a flat clustering derived from constant
height cut through the single linkage tree.
This represents the result of selecting a cut value for robust single linkage
clustering. The `min_cluster_size` allows the flat clustering to declare noise
points (and cluster smaller than `min_cluster_size`).
Parameters
----------
cut_distance : float
The mutual reachability distance cut value to use to generate a
flat clustering.
min_cluster_size : int, default=5
Clusters smaller than this value with be called 'noise' and remain
unclustered in the resulting flat clustering.
Returns
-------
labels : ndarray of shape (n_samples,)
An array of cluster labels, one per datapoint.
Outliers are labeled as follows:
- Noisy samples are given the label -1.
- Samples with infinite elements (+/- np.inf) are given the label -2.
- Samples with missing data are given the label -3, even if they
also have infinite elements.
|
dbscan_clustering
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/hdbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/hdbscan.py
|
BSD-3-Clause
|
def test_mutual_reachability_graph_error_sparse_format():
"""Check that we raise an error if the sparse format is not CSR."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, "sparse_csc")
err_msg = "Only sparse CSR matrices are supported"
with pytest.raises(ValueError, match=err_msg):
mutual_reachability_graph(X)
|
Check that we raise an error if the sparse format is not CSR.
|
test_mutual_reachability_graph_error_sparse_format
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
BSD-3-Clause
|
def test_mutual_reachability_graph_inplace(array_type):
"""Check that the operation is happening inplace."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
mr_graph = mutual_reachability_graph(X)
assert id(mr_graph) == id(X)
|
Check that the operation is happening inplace.
|
test_mutual_reachability_graph_inplace
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
BSD-3-Clause
|
def test_mutual_reachability_graph_equivalence_dense_sparse():
"""Check that we get the same results for dense and sparse implementation."""
rng = np.random.RandomState(0)
X = rng.randn(5, 5)
X_dense = X.T @ X
X_sparse = _convert_container(X_dense, "sparse_csr")
mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3)
mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3)
assert_allclose(mr_graph_dense, mr_graph_sparse.toarray())
|
Check that we get the same results for dense and sparse implementation.
|
test_mutual_reachability_graph_equivalence_dense_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
BSD-3-Clause
|
def test_mutual_reachability_graph_preserves_dtype(array_type, dtype):
"""Check that the computation preserve dtype thanks to fused types."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = (X.T @ X).astype(dtype)
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
assert X.dtype == dtype
mr_graph = mutual_reachability_graph(X)
assert mr_graph.dtype == dtype
|
Check that the computation preserve dtype thanks to fused types.
|
test_mutual_reachability_graph_preserves_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_hdbscan/tests/test_reachibility.py
|
BSD-3-Clause
|
def _transformers(self):
"""
Internal list of transformer only containing the name and
transformers, dropping the columns.
DO NOT USE: This is for the implementation of get_params via
BaseComposition._get_params which expects lists of tuples of len 2.
To iterate through the transformers, use ``self._iter`` instead.
"""
try:
return [(name, trans) for name, trans, _ in self.transformers]
except (TypeError, ValueError):
return self.transformers
|
Internal list of transformer only containing the name and
transformers, dropping the columns.
DO NOT USE: This is for the implementation of get_params via
BaseComposition._get_params which expects lists of tuples of len 2.
To iterate through the transformers, use ``self._iter`` instead.
|
_transformers
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _transformers(self, value):
"""DO NOT USE: This is for the implementation of set_params via
BaseComposition._get_params which gives lists of tuples of len 2.
"""
try:
self.transformers = [
(name, trans, col)
for ((name, trans), (_, _, col)) in zip(value, self.transformers)
]
except (TypeError, ValueError):
self.transformers = value
|
DO NOT USE: This is for the implementation of set_params via
BaseComposition._get_params which gives lists of tuples of len 2.
|
_transformers
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `transformers`
and `transformers_`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
"""
super().set_output(transform=transform)
transformers = (
trans
for _, trans, _ in chain(
self.transformers, getattr(self, "transformers_", [])
)
if trans not in {"passthrough", "drop"}
)
for trans in transformers:
_safe_set_output(trans, transform=transform)
if self.remainder not in {"passthrough", "drop"}:
_safe_set_output(self.remainder, transform=transform)
return self
|
Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `transformers`
and `transformers_`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
|
set_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):
"""
Generate (name, trans, columns, weight) tuples.
Parameters
----------
fitted : bool
If True, use the fitted transformers (``self.transformers_``) to
iterate through transformers, else use the transformers passed by
the user (``self.transformers``).
column_as_labels : bool
If True, columns are returned as string labels. If False, columns
are returned as they were given by the user. This can only be True
if the ``ColumnTransformer`` is already fitted.
skip_drop : bool
If True, 'drop' transformers are filtered out.
skip_empty_columns : bool
If True, transformers with empty selected columns are filtered out.
Yields
------
A generator of tuples containing:
- name : the name of the transformer
- transformer : the transformer object
- columns : the columns for that transformer
- weight : the weight of the transformer
"""
if fitted:
transformers = self.transformers_
else:
# interleave the validated column specifiers
transformers = [
(name, trans, column)
for (name, trans, _), column in zip(self.transformers, self._columns)
]
# add transformer tuple for remainder
if self._remainder[2]:
transformers = chain(transformers, [self._remainder])
get_weight = (self.transformer_weights or {}).get
for name, trans, columns in transformers:
if skip_drop and trans == "drop":
continue
if skip_empty_columns and _is_empty_column_selection(columns):
continue
if column_as_labels:
# Convert all columns to using their string labels
columns_is_scalar = np.isscalar(columns)
indices = self._transformer_to_input_indices[name]
columns = self.feature_names_in_[indices]
if columns_is_scalar:
# selection is done with one dimension
columns = columns[0]
yield (name, trans, columns, get_weight(name))
|
Generate (name, trans, columns, weight) tuples.
Parameters
----------
fitted : bool
If True, use the fitted transformers (``self.transformers_``) to
iterate through transformers, else use the transformers passed by
the user (``self.transformers``).
column_as_labels : bool
If True, columns are returned as string labels. If False, columns
are returned as they were given by the user. This can only be True
if the ``ColumnTransformer`` is already fitted.
skip_drop : bool
If True, 'drop' transformers are filtered out.
skip_empty_columns : bool
If True, transformers with empty selected columns are filtered out.
Yields
------
A generator of tuples containing:
- name : the name of the transformer
- transformer : the transformer object
- columns : the columns for that transformer
- weight : the weight of the transformer
|
_iter
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _validate_transformers(self):
"""Validate names of transformers and the transformers themselves.
This checks whether given transformers have the required methods, i.e.
`fit` or `fit_transform` and `transform` implemented.
"""
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t in ("drop", "passthrough"):
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
# Used to validate the transformers in the `transformers` list
raise TypeError(
"All estimators should implement fit and "
"transform, or can be 'drop' or 'passthrough' "
"specifiers. '%s' (type %s) doesn't." % (t, type(t))
)
|
Validate names of transformers and the transformers themselves.
This checks whether given transformers have the required methods, i.e.
`fit` or `fit_transform` and `transform` implemented.
|
_validate_transformers
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _validate_column_callables(self, X):
"""
Converts callable column specifications.
This stores a dictionary of the form `{step_name: column_indices}` and
calls the `columns` on `X` if `columns` is a callable for a given
transformer.
The results are then stored in `self._transformer_to_input_indices`.
"""
all_columns = []
transformer_to_input_indices = {}
for name, _, columns in self.transformers:
if callable(columns):
columns = columns(X)
all_columns.append(columns)
transformer_to_input_indices[name] = _get_column_indices(X, columns)
self._columns = all_columns
self._transformer_to_input_indices = transformer_to_input_indices
|
Converts callable column specifications.
This stores a dictionary of the form `{step_name: column_indices}` and
calls the `columns` on `X` if `columns` is a callable for a given
transformer.
The results are then stored in `self._transformer_to_input_indices`.
|
_validate_column_callables
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _validate_remainder(self, X):
"""
Validates ``remainder`` and defines ``_remainder`` targeting
the remaining columns.
"""
cols = set(chain(*self._transformer_to_input_indices.values()))
remaining = sorted(set(range(self.n_features_in_)) - cols)
self._transformer_to_input_indices["remainder"] = remaining
remainder_cols = self._get_remainder_cols(remaining)
self._remainder = ("remainder", self.remainder, remainder_cols)
|
Validates ``remainder`` and defines ``_remainder`` targeting
the remaining columns.
|
_validate_remainder
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def named_transformers_(self):
"""Access the fitted transformer by name.
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
"""
# Use Bunch object to improve autocomplete
return Bunch(**{name: trans for name, trans, _ in self.transformers_})
|
Access the fitted transformer by name.
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
|
named_transformers_
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in):
"""Gets feature names of transformer.
Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
"""
column_indices = self._transformer_to_input_indices[name]
names = feature_names_in[column_indices]
# An actual transformer
if not hasattr(trans, "get_feature_names_out"):
raise AttributeError(
f"Transformer {name} (type {type(trans).__name__}) does "
"not provide get_feature_names_out."
)
return trans.get_feature_names_out(names)
|
Gets feature names of transformer.
Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
|
_get_feature_name_out_for_transformer
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self)
input_features = _check_feature_names_in(self, input_features)
# List of tuples (name, feature_names_out)
transformer_with_feature_names_out = []
for name, trans, *_ in self._iter(
fitted=True,
column_as_labels=False,
skip_empty_columns=True,
skip_drop=True,
):
feature_names_out = self._get_feature_name_out_for_transformer(
name, trans, input_features
)
if feature_names_out is None:
continue
transformer_with_feature_names_out.append((name, feature_names_out))
if not transformer_with_feature_names_out:
# No feature names
return np.array([], dtype=object)
return self._add_prefix_for_feature_names_out(
transformer_with_feature_names_out
)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
"""Add prefix for feature names out that includes the transformer names.
Parameters
----------
transformer_with_feature_names_out : list of tuples of (str, array-like of str)
The tuple consistent of the transformer's name and its feature names out.
Returns
-------
feature_names_out : ndarray of shape (n_features,), dtype=str
Transformed feature names.
"""
feature_names_out_callable = None
if callable(self.verbose_feature_names_out):
feature_names_out_callable = self.verbose_feature_names_out
elif isinstance(self.verbose_feature_names_out, str):
feature_names_out_callable = partial(
_feature_names_out_with_str_format,
str_format=self.verbose_feature_names_out,
)
elif self.verbose_feature_names_out is True:
feature_names_out_callable = partial(
_feature_names_out_with_str_format,
str_format="{transformer_name}__{feature_name}",
)
if feature_names_out_callable is not None:
# Prefix the feature names out with the transformers name
names = list(
chain.from_iterable(
(feature_names_out_callable(name, i) for i in feature_names_out)
for name, feature_names_out in transformer_with_feature_names_out
)
)
return np.asarray(names, dtype=object)
# verbose_feature_names_out is False
# Check that names are all unique without a prefix
feature_names_count = Counter(
chain.from_iterable(s for _, s in transformer_with_feature_names_out)
)
top_6_overlap = [
name for name, count in feature_names_count.most_common(6) if count > 1
]
top_6_overlap.sort()
if top_6_overlap:
if len(top_6_overlap) == 6:
# There are more than 5 overlapping names, we only show the 5
# of the feature names
names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
else:
names_repr = str(top_6_overlap)
raise ValueError(
f"Output feature names: {names_repr} are not unique. Please set "
"verbose_feature_names_out=True to add prefixes to feature names"
)
return np.concatenate(
[name for _, name in transformer_with_feature_names_out],
)
|
Add prefix for feature names out that includes the transformer names.
Parameters
----------
transformer_with_feature_names_out : list of tuples of (str, array-like of str)
The tuple consistent of the transformer's name and its feature names out.
Returns
-------
feature_names_out : ndarray of shape (n_features,), dtype=str
Transformed feature names.
|
_add_prefix_for_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _update_fitted_transformers(self, transformers):
"""Set self.transformers_ from given transformers.
Parameters
----------
transformers : list of estimators
The fitted estimators as the output of
`self._call_func_on_transformers(func=_fit_transform_one, ...)`.
That function doesn't include 'drop' or transformers for which no
column is selected. 'drop' is kept as is, and for the no-column
transformers the unfitted transformer is put in
`self.transformers_`.
"""
# transformers are fitted; excludes 'drop' cases
fitted_transformers = iter(transformers)
transformers_ = []
for name, old, column, _ in self._iter(
fitted=False,
column_as_labels=False,
skip_drop=False,
skip_empty_columns=False,
):
if old == "drop":
trans = "drop"
elif _is_empty_column_selection(column):
trans = old
else:
trans = next(fitted_transformers)
transformers_.append((name, trans, column))
# sanity check that transformers is exhausted
assert not list(fitted_transformers)
self.transformers_ = transformers_
|
Set self.transformers_ from given transformers.
Parameters
----------
transformers : list of estimators
The fitted estimators as the output of
`self._call_func_on_transformers(func=_fit_transform_one, ...)`.
That function doesn't include 'drop' or transformers for which no
column is selected. 'drop' is kept as is, and for the no-column
transformers the unfitted transformer is put in
`self.transformers_`.
|
_update_fitted_transformers
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _validate_output(self, result):
"""
Ensure that the output of each transformer is 2D. Otherwise
hstack can raise an error or produce incorrect results.
"""
names = [
name
for name, _, _, _ in self._iter(
fitted=True,
column_as_labels=False,
skip_drop=True,
skip_empty_columns=True,
)
]
for Xs, name in zip(result, names):
if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"):
raise ValueError(
"The output of the '{0}' transformer should be 2D (numpy array, "
"scipy sparse array, dataframe).".format(name)
)
if _get_output_config("transform", self)["dense"] == "pandas":
return
try:
import pandas as pd
except ImportError:
return
for Xs, name in zip(result, names):
if not _is_pandas_df(Xs):
continue
for col_name, dtype in Xs.dtypes.to_dict().items():
if getattr(dtype, "na_value", None) is not pd.NA:
continue
if pd.NA not in Xs[col_name].values:
continue
class_name = self.__class__.__name__
raise ValueError(
f"The output of the '{name}' transformer for column"
f" '{col_name}' has dtype {dtype} and uses pandas.NA to"
" represent null values. Storing this output in a numpy array"
" can cause errors in downstream scikit-learn estimators, and"
" inefficiencies. To avoid this problem you can (i)"
" store the output in a pandas DataFrame by using"
f" {class_name}.set_output(transform='pandas') or (ii) modify"
f" the input data or the '{name}' transformer to avoid the"
" presence of pandas.NA (for example by using"
" pandas.DataFrame.astype)."
)
|
Ensure that the output of each transformer is 2D. Otherwise
hstack can raise an error or produce incorrect results.
|
_validate_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _record_output_indices(self, Xs):
"""
Record which transformer produced which column.
"""
idx = 0
self.output_indices_ = {}
for transformer_idx, (name, _, _, _) in enumerate(
self._iter(
fitted=True,
column_as_labels=False,
skip_drop=True,
skip_empty_columns=True,
)
):
n_columns = Xs[transformer_idx].shape[1]
self.output_indices_[name] = slice(idx, idx + n_columns)
idx += n_columns
# `_iter` only generates transformers that have a non empty
# selection. Here we set empty slices for transformers that
# generate no output, which are safe for indexing
all_names = [t[0] for t in self.transformers] + ["remainder"]
for name in all_names:
if name not in self.output_indices_:
self.output_indices_[name] = slice(0, 0)
|
Record which transformer produced which column.
|
_record_output_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _call_func_on_transformers(self, X, y, func, column_as_labels, routed_params):
"""
Private function to fit and/or transform on demand.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be used in fit and/or transform.
y : array-like of shape (n_samples,)
Targets.
func : callable
Function to call, which can be _fit_transform_one or
_transform_one.
column_as_labels : bool
Used to iterate through transformers. If True, columns are returned
as strings. If False, columns are returned as they were given by
the user. Can be True only if the ``ColumnTransformer`` is already
fitted.
routed_params : dict
The routed parameters as the output from ``process_routing``.
Returns
-------
Return value (transformers and/or transformed X data) depends
on the passed function.
"""
if func is _fit_transform_one:
fitted = False
else: # func is _transform_one
fitted = True
transformers = list(
self._iter(
fitted=fitted,
column_as_labels=column_as_labels,
skip_drop=True,
skip_empty_columns=True,
)
)
try:
jobs = []
for idx, (name, trans, columns, weight) in enumerate(transformers, start=1):
if func is _fit_transform_one:
if trans == "passthrough":
output_config = _get_output_config("transform", self)
trans = FunctionTransformer(
accept_sparse=True,
check_inverse=False,
feature_names_out="one-to-one",
).set_output(transform=output_config["dense"])
extra_args = dict(
message_clsname="ColumnTransformer",
message=self._log_message(name, idx, len(transformers)),
)
else: # func is _transform_one
extra_args = {}
jobs.append(
delayed(func)(
transformer=clone(trans) if not fitted else trans,
X=_safe_indexing(X, columns, axis=1),
y=y,
weight=weight,
**extra_args,
params=routed_params[name],
)
)
return Parallel(n_jobs=self.n_jobs)(jobs)
except ValueError as e:
if "Expected 2D array, got 1D array instead" in str(e):
raise ValueError(_ERR_MSG_1DCOLUMN) from e
else:
raise
|
Private function to fit and/or transform on demand.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be used in fit and/or transform.
y : array-like of shape (n_samples,)
Targets.
func : callable
Function to call, which can be _fit_transform_one or
_transform_one.
column_as_labels : bool
Used to iterate through transformers. If True, columns are returned
as strings. If False, columns are returned as they were given by
the user. Can be True only if the ``ColumnTransformer`` is already
fitted.
routed_params : dict
The routed parameters as the output from ``process_routing``.
Returns
-------
Return value (transformers and/or transformed X data) depends
on the passed function.
|
_call_func_on_transformers
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def fit(self, X, y=None, **params):
"""Fit all transformers using X.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,...), default=None
Targets for supervised learning.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``fit`` and
``transform`` methods.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
self : ColumnTransformer
This estimator.
"""
_raise_for_params(params, self, "fit")
# we use fit_transform to make sure to set sparse_output_ (for which we
# need the transformed data) to have consistent output type in predict
self.fit_transform(X, y=y, **params)
return self
|
Fit all transformers using X.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,...), default=None
Targets for supervised learning.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``fit`` and
``transform`` methods.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
self : ColumnTransformer
This estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def fit_transform(self, X, y=None, **params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,), default=None
Targets for supervised learning.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``fit`` and
``transform`` methods.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
_raise_for_params(params, self, "fit_transform")
_check_feature_names(self, X, reset=True)
if self.force_int_remainder_cols != "deprecated":
warnings.warn(
"The parameter `force_int_remainder_cols` is deprecated and will be "
"removed in 1.9. It has no effect. Leave it to its default value to "
"avoid this warning.",
FutureWarning,
)
X = _check_X(X)
# set n_features_in_ attribute
_check_n_features(self, X, reset=True)
self._validate_transformers()
n_samples = _num_samples(X)
self._validate_column_callables(X)
self._validate_remainder(X)
if _routing_enabled():
routed_params = process_routing(self, "fit_transform", **params)
else:
routed_params = self._get_empty_routing()
result = self._call_func_on_transformers(
X,
y,
_fit_transform_one,
column_as_labels=False,
routed_params=routed_params,
)
if not result:
self._update_fitted_transformers([])
# All transformers are None
return np.zeros((n_samples, 0))
Xs, transformers = zip(*result)
# determine if concatenated output will be sparse or not
if any(sparse.issparse(X) for X in Xs):
nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
total = sum(
X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs
)
density = nnz / total
self.sparse_output_ = density < self.sparse_threshold
else:
self.sparse_output_ = False
self._update_fitted_transformers(transformers)
self._validate_output(Xs)
self._record_output_indices(Xs)
return self._hstack(list(Xs), n_samples=n_samples)
|
Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,), default=None
Targets for supervised learning.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``fit`` and
``transform`` methods.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def transform(self, X, **params):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be transformed by subset.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``transform``
method.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
_raise_for_params(params, self, "transform")
check_is_fitted(self)
X = _check_X(X)
# If ColumnTransformer is fit using a dataframe, and now a dataframe is
# passed to be transformed, we select columns by name instead. This
# enables the user to pass X at transform time with extra columns which
# were not present in fit time, and the order of the columns doesn't
# matter.
fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and (
_is_pandas_df(X) or hasattr(X, "__dataframe__")
)
n_samples = _num_samples(X)
column_names = _get_feature_names(X)
if fit_dataframe_and_transform_dataframe:
named_transformers = self.named_transformers_
# check that all names seen in fit are in transform, unless
# they were dropped
non_dropped_indices = [
ind
for name, ind in self._transformer_to_input_indices.items()
if name in named_transformers and named_transformers[name] != "drop"
]
all_indices = set(chain(*non_dropped_indices))
all_names = set(self.feature_names_in_[ind] for ind in all_indices)
diff = all_names - set(column_names)
if diff:
raise ValueError(f"columns are missing: {diff}")
else:
# ndarray was used for fitting or transforming, thus we only
# check that n_features_in_ is consistent
_check_n_features(self, X, reset=False)
if _routing_enabled():
routed_params = process_routing(self, "transform", **params)
else:
routed_params = self._get_empty_routing()
Xs = self._call_func_on_transformers(
X,
None,
_transform_one,
column_as_labels=fit_dataframe_and_transform_dataframe,
routed_params=routed_params,
)
self._validate_output(Xs)
if not Xs:
# All transformers are None
return np.zeros((n_samples, 0))
return self._hstack(list(Xs), n_samples=n_samples)
|
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be transformed by subset.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``transform``
method.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _hstack(self, Xs, *, n_samples):
"""Stacks Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer.
Parameters
----------
Xs : list of {array-like, sparse matrix, dataframe}
The container to concatenate.
n_samples : int
The number of samples in the input data to checking the transformation
consistency.
"""
if self.sparse_output_:
try:
# since all columns should be numeric before stacking them
# in a sparse matrix, `check_array` is used for the
# dtype conversion if necessary.
converted_Xs = [
check_array(X, accept_sparse=True, ensure_all_finite=False)
for X in Xs
]
except ValueError as e:
raise ValueError(
"For a sparse output, all columns should "
"be a numeric or convertible to a numeric."
) from e
return sparse.hstack(converted_Xs).tocsr()
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
adapter = _get_container_adapter("transform", self)
if adapter and all(adapter.is_supported_container(X) for X in Xs):
# rename before stacking as it avoids to error on temporary duplicated
# columns
transformer_names = [
t[0]
for t in self._iter(
fitted=True,
column_as_labels=False,
skip_drop=True,
skip_empty_columns=True,
)
]
feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
if self.verbose_feature_names_out:
# `_add_prefix_for_feature_names_out` takes care about raising
# an error if there are duplicated columns.
feature_names_outs = self._add_prefix_for_feature_names_out(
list(zip(transformer_names, feature_names_outs))
)
else:
# check for duplicated columns and raise if any
feature_names_outs = list(chain.from_iterable(feature_names_outs))
feature_names_count = Counter(feature_names_outs)
if any(count > 1 for count in feature_names_count.values()):
duplicated_feature_names = sorted(
name
for name, count in feature_names_count.items()
if count > 1
)
err_msg = (
"Duplicated feature names found before concatenating the"
" outputs of the transformers:"
f" {duplicated_feature_names}.\n"
)
for transformer_name, X in zip(transformer_names, Xs):
if X.shape[1] == 0:
continue
dup_cols_in_transformer = sorted(
set(X.columns).intersection(duplicated_feature_names)
)
if len(dup_cols_in_transformer):
err_msg += (
f"Transformer {transformer_name} has conflicting "
f"columns names: {dup_cols_in_transformer}.\n"
)
raise ValueError(
err_msg
+ "Either make sure that the transformers named above "
"do not generate columns with conflicting names or set "
"verbose_feature_names_out=True to automatically "
"prefix to the output feature names with the name "
"of the transformer to prevent any conflicting "
"names."
)
names_idx = 0
for X in Xs:
if X.shape[1] == 0:
continue
names_out = feature_names_outs[names_idx : names_idx + X.shape[1]]
adapter.rename_columns(X, names_out)
names_idx += X.shape[1]
output = adapter.hstack(Xs)
output_samples = output.shape[0]
if output_samples != n_samples:
raise ValueError(
"Concatenating DataFrames from the transformer's output lead to"
" an inconsistent number of samples. The output may have Pandas"
" Indexes that do not match, or that transformers are returning"
" number of samples which are not the same as the number input"
" samples."
)
return output
return np.hstack(Xs)
|
Stacks Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer.
Parameters
----------
Xs : list of {array-like, sparse matrix, dataframe}
The container to concatenate.
n_samples : int
The number of samples in the input data to checking the transformation
consistency.
|
_hstack
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _get_empty_routing(self):
"""Return empty routing.
Used while routing can be disabled.
TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no
more an option.
"""
return Bunch(
**{
name: Bunch(**{method: {} for method in METHODS})
for name, step, _, _ in self._iter(
fitted=False,
column_as_labels=False,
skip_drop=True,
skip_empty_columns=True,
)
}
)
|
Return empty routing.
Used while routing can be disabled.
TODO: Remove when ``set_config(enable_metadata_routing=False)`` is no
more an option.
|
_get_empty_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
# Here we don't care about which columns are used for which
# transformers, and whether or not a transformer is used at all, which
# might happen if no columns are selected for that transformer. We
# request all metadata requested by all transformers.
transformers = chain(self.transformers, [("remainder", self.remainder, None)])
for name, step, _ in transformers:
method_mapping = MethodMapping()
if hasattr(step, "fit_transform"):
(
method_mapping.add(caller="fit", callee="fit_transform").add(
caller="fit_transform", callee="fit_transform"
)
)
else:
(
method_mapping.add(caller="fit", callee="fit")
.add(caller="fit", callee="transform")
.add(caller="fit_transform", callee="fit")
.add(caller="fit_transform", callee="transform")
)
method_mapping.add(caller="transform", callee="transform")
router.add(method_mapping=method_mapping, **{name: step})
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _check_X(X):
"""Use check_array only when necessary, e.g. on lists and other non-array-likes."""
if (
(hasattr(X, "__array__") and hasattr(X, "shape"))
or hasattr(X, "__dataframe__")
or sparse.issparse(X)
):
return X
return check_array(X, ensure_all_finite="allow-nan", dtype=object)
|
Use check_array only when necessary, e.g. on lists and other non-array-likes.
|
_check_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _is_empty_column_selection(column):
"""
Return True if the column selection is empty (empty list or all-False
boolean array).
"""
if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_):
return not column.any()
elif hasattr(column, "__len__"):
return len(column) == 0 or (
all(isinstance(col, bool) for col in column) and not any(column)
)
else:
return False
|
Return True if the column selection is empty (empty list or all-False
boolean array).
|
_is_empty_column_selection
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _get_transformer_list(estimators):
"""
Construct (name, trans, column) tuples from list
"""
transformers, columns = zip(*estimators)
names, _ = zip(*_name_estimators(transformers))
transformer_list = list(zip(names, transformers, columns))
return transformer_list
|
Construct (name, trans, column) tuples from list
|
_get_transformer_list
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def __call__(self, df):
"""Callable for column selection to be used by a
:class:`ColumnTransformer`.
Parameters
----------
df : dataframe of shape (n_features, n_samples)
DataFrame to select columns from.
"""
if not hasattr(df, "iloc"):
raise ValueError(
"make_column_selector can only be applied to pandas dataframes"
)
df_row = df.iloc[:1]
if self.dtype_include is not None or self.dtype_exclude is not None:
df_row = df_row.select_dtypes(
include=self.dtype_include, exclude=self.dtype_exclude
)
cols = df_row.columns
if self.pattern is not None:
cols = cols[cols.str.contains(self.pattern, regex=True)]
return cols.tolist()
|
Callable for column selection to be used by a
:class:`ColumnTransformer`.
Parameters
----------
df : dataframe of shape (n_features, n_samples)
DataFrame to select columns from.
|
__call__
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_column_transformer.py
|
BSD-3-Clause
|
def _fit_transformer(self, y):
"""Check transformer and fit transformer.
Create the default transformer, fit it and make additional inverse
check on a subset (optional).
"""
if self.transformer is not None and (
self.func is not None or self.inverse_func is not None
):
raise ValueError(
"'transformer' and functions 'func'/'inverse_func' cannot both be set."
)
elif self.transformer is not None:
self.transformer_ = clone(self.transformer)
else:
if (self.func is not None and self.inverse_func is None) or (
self.func is None and self.inverse_func is not None
):
lacking_param, existing_param = (
("func", "inverse_func")
if self.func is None
else ("inverse_func", "func")
)
raise ValueError(
f"When '{existing_param}' is provided, '{lacking_param}' must also"
f" be provided. If {lacking_param} is supposed to be the default,"
" you need to explicitly pass it the identity function."
)
self.transformer_ = FunctionTransformer(
func=self.func,
inverse_func=self.inverse_func,
validate=True,
check_inverse=self.check_inverse,
)
# We are transforming the target here and not the features, so we set the
# output of FunctionTransformer() to be a numpy array (default) and to not
# depend on the global configuration:
self.transformer_.set_output(transform="default")
# XXX: sample_weight is not currently passed to the
# transformer. However, if transformer starts using sample_weight, the
# code should be modified accordingly. At the time to consider the
# sample_prop feature, it is also a good use case to be considered.
self.transformer_.fit(y)
if self.check_inverse:
idx_selected = slice(None, None, max(1, y.shape[0] // 10))
y_sel = _safe_indexing(y, idx_selected)
y_sel_t = self.transformer_.transform(y_sel)
if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)):
warnings.warn(
(
"The provided functions or transformer are"
" not strictly inverse of each other. If"
" you are sure you want to proceed regardless"
", set 'check_inverse=False'"
),
UserWarning,
)
|
Check transformer and fit transformer.
Create the default transformer, fit it and make additional inverse
check on a subset (optional).
|
_fit_transformer
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_target.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_target.py
|
BSD-3-Clause
|
def fit(self, X, y, **fit_params):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `fit` method of the underlying regressor.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the underlying regressor.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
if y is None:
raise ValueError(
f"This {self.__class__.__name__} estimator "
"requires y to be passed, but the target y is None."
)
y = check_array(
y,
input_name="y",
accept_sparse=False,
ensure_all_finite=True,
ensure_2d=False,
dtype="numeric",
allow_nd=True,
)
# store the number of dimension of the target to predict an array of
# similar shape at predict
self._training_dim = y.ndim
# transformers are designed to modify X which is 2d dimensional, we
# need to modify y accordingly.
if y.ndim == 1:
y_2d = y.reshape(-1, 1)
else:
y_2d = y
self._fit_transformer(y_2d)
# transform y and convert back to 1d array if needed
y_trans = self.transformer_.transform(y_2d)
# FIXME: a FunctionTransformer can return a 1D array even when validate
# is set to True. Therefore, we need to check the number of dimension
# first.
if y_trans.ndim == 2 and y_trans.shape[1] == 1:
y_trans = y_trans.squeeze(axis=1)
self.regressor_ = self._get_regressor(get_clone=True)
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch(regressor=Bunch(fit=fit_params))
self.regressor_.fit(X, y_trans, **routed_params.regressor.fit)
if hasattr(self.regressor_, "feature_names_in_"):
self.feature_names_in_ = self.regressor_.feature_names_in_
return self
|
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `fit` method of the underlying regressor.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the underlying regressor.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_target.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_target.py
|
BSD-3-Clause
|
def predict(self, X, **predict_params):
"""Predict using the base regressor, applying inverse.
The regressor is used to predict and the `inverse_func` or
`inverse_transform` is applied before returning the prediction.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
**predict_params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `predict` method of the underlying regressor.
- If `enable_metadata_routing=True`: Parameters safely routed to the
`predict` method of the underlying regressor.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
y_hat : ndarray of shape (n_samples,)
Predicted values.
"""
check_is_fitted(self)
if _routing_enabled():
routed_params = process_routing(self, "predict", **predict_params)
else:
routed_params = Bunch(regressor=Bunch(predict=predict_params))
pred = self.regressor_.predict(X, **routed_params.regressor.predict)
if pred.ndim == 1:
pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1))
else:
pred_trans = self.transformer_.inverse_transform(pred)
if (
self._training_dim == 1
and pred_trans.ndim == 2
and pred_trans.shape[1] == 1
):
pred_trans = pred_trans.squeeze(axis=1)
return pred_trans
|
Predict using the base regressor, applying inverse.
The regressor is used to predict and the `inverse_func` or
`inverse_transform` is applied before returning the prediction.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
**predict_params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `predict` method of the underlying regressor.
- If `enable_metadata_routing=True`: Parameters safely routed to the
`predict` method of the underlying regressor.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
y_hat : ndarray of shape (n_samples,)
Predicted values.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_target.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_target.py
|
BSD-3-Clause
|
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# For consistency with other estimators we raise a AttributeError so
# that hasattr() returns False the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.regressor_.n_features_in_
|
Number of features seen during :term:`fit`.
|
n_features_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_target.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_target.py
|
BSD-3-Clause
|
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__).add(
regressor=self._get_regressor(),
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict"),
)
return router
|
Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
|
get_metadata_routing
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/_target.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/_target.py
|
BSD-3-Clause
|
def test_column_transformer_remainder_dtypes(cols1, cols2, expected_remainder_cols):
"""Check that the remainder columns format matches the format of the other
columns when they're all strings or masks.
"""
X = np.ones((1, 3))
if isinstance(cols1, list) and isinstance(cols1[0], str):
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X, columns=["A", "B", "C"])
# if inputs are column names store remainder columns as column names
ct = make_column_transformer(
(Trans(), cols1),
(Trans(), cols2),
remainder="passthrough",
)
ct.fit_transform(X)
assert ct.transformers_[-1][-1] == expected_remainder_cols
|
Check that the remainder columns format matches the format of the other
columns when they're all strings or masks.
|
test_column_transformer_remainder_dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_force_int_remainder_cols_deprecation(force_int_remainder_cols):
"""Check that ColumnTransformer raises a FutureWarning when
force_int_remainder_cols is set.
"""
X = np.ones((1, 3))
ct = ColumnTransformer(
[("T1", Trans(), [0]), ("T2", Trans(), [1])],
remainder="passthrough",
force_int_remainder_cols=force_int_remainder_cols,
)
with pytest.warns(FutureWarning, match="`force_int_remainder_cols` is deprecated"):
ct.fit(X)
|
Check that ColumnTransformer raises a FutureWarning when
force_int_remainder_cols is set.
|
test_force_int_remainder_cols_deprecation
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_feature_names_out_pandas(selector):
"""Checks name when selecting only the second column"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"col1": ["a", "a", "b"], "col2": ["z", "z", "z"]})
ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
ct.fit(df)
assert_array_equal(ct.get_feature_names_out(), ["ohe__col2_z"])
|
Checks name when selecting only the second column
|
test_feature_names_out_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_feature_names_out_non_pandas(selector):
"""Checks name when selecting the second column with numpy array"""
X = [["a", "z"], ["a", "z"], ["b", "z"]]
ct = ColumnTransformer([("ohe", OneHotEncoder(), selector)])
ct.fit(X)
assert_array_equal(ct.get_feature_names_out(), ["ohe__x1_z"])
|
Checks name when selecting the second column with numpy array
|
test_feature_names_out_non_pandas
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_column_transformer_reordered_column_names_remainder(
explicit_colname, remainder
):
"""Test the interaction between remainder and column transformer"""
pd = pytest.importorskip("pandas")
X_fit_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_fit_df = pd.DataFrame(X_fit_array, columns=["first", "second"])
X_trans_array = np.array([[2, 4, 6], [0, 1, 2]]).T
X_trans_df = pd.DataFrame(X_trans_array, columns=["second", "first"])
tf = ColumnTransformer([("bycol", Trans(), explicit_colname)], remainder=remainder)
tf.fit(X_fit_df)
X_fit_trans = tf.transform(X_fit_df)
# Changing the order still works
X_trans = tf.transform(X_trans_df)
assert_allclose(X_trans, X_fit_trans)
# extra columns are ignored
X_extended_df = X_fit_df.copy()
X_extended_df["third"] = [3, 6, 9]
X_trans = tf.transform(X_extended_df)
assert_allclose(X_trans, X_fit_trans)
if isinstance(explicit_colname, str):
# Raise error if columns are specified by names but input only allows
# to specify by position, e.g. numpy array instead of a pandas df.
X_array = X_fit_array.copy()
err_msg = "Specifying the columns"
with pytest.raises(ValueError, match=err_msg):
tf.transform(X_array)
|
Test the interaction between remainder and column transformer
|
test_column_transformer_reordered_column_names_remainder
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_feature_name_validation_missing_columns_drop_passthough():
"""Test the interaction between {'drop', 'passthrough'} and
missing column names."""
pd = pytest.importorskip("pandas")
X = np.ones(shape=(3, 4))
df = pd.DataFrame(X, columns=["a", "b", "c", "d"])
df_dropped = df.drop("c", axis=1)
# with remainder='passthrough', all columns seen during `fit` must be
# present
tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="passthrough")
tf.fit(df)
msg = r"columns are missing: {'c'}"
with pytest.raises(ValueError, match=msg):
tf.transform(df_dropped)
# with remainder='drop', it is allowed to have column 'c' missing
tf = ColumnTransformer([("bycol", Trans(), [1])], remainder="drop")
tf.fit(df)
df_dropped_trans = tf.transform(df_dropped)
df_fit_trans = tf.transform(df)
assert_allclose(df_dropped_trans, df_fit_trans)
# bycol drops 'c', thus it is allowed for 'c' to be missing
tf = ColumnTransformer([("bycol", "drop", ["c"])], remainder="passthrough")
tf.fit(df)
df_dropped_trans = tf.transform(df_dropped)
df_fit_trans = tf.transform(df)
assert_allclose(df_dropped_trans, df_fit_trans)
|
Test the interaction between {'drop', 'passthrough'} and
missing column names.
|
test_feature_name_validation_missing_columns_drop_passthough
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_feature_names_in_():
"""Feature names are stored in column transformer.
Column transformer deliberately does not check for column name consistency.
It only checks that the non-dropped names seen in `fit` are seen
in `transform`. This behavior is already tested in
`test_feature_name_validation_missing_columns_drop_passthough`"""
pd = pytest.importorskip("pandas")
feature_names = ["a", "c", "d"]
df = pd.DataFrame([[1, 2, 3]], columns=feature_names)
ct = ColumnTransformer([("bycol", Trans(), ["a", "d"])], remainder="passthrough")
ct.fit(df)
assert_array_equal(ct.feature_names_in_, feature_names)
assert isinstance(ct.feature_names_in_, np.ndarray)
assert ct.feature_names_in_.dtype == object
|
Feature names are stored in column transformer.
Column transformer deliberately does not check for column name consistency.
It only checks that the non-dropped names seen in `fit` are seen
in `transform`. This behavior is already tested in
`test_feature_name_validation_missing_columns_drop_passthough`
|
test_feature_names_in_
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_column_transform_set_output_mixed(remainder, fit_transform):
"""Check ColumnTransformer outputs mixed types correctly."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame(
{
"pet": pd.Series(["dog", "cat", "snake"], dtype="category"),
"color": pd.Series(["green", "blue", "red"], dtype="object"),
"age": [1.4, 2.1, 4.4],
"height": [20, 40, 10],
"distance": pd.Series([20, pd.NA, 100], dtype="Int32"),
}
)
ct = ColumnTransformer(
[
(
"color_encode",
OneHotEncoder(sparse_output=False, dtype="int8"),
["color"],
),
("age", StandardScaler(), ["age"]),
],
remainder=remainder,
verbose_feature_names_out=False,
).set_output(transform="pandas")
if fit_transform:
X_trans = ct.fit_transform(df)
else:
X_trans = ct.fit(df).transform(df)
assert isinstance(X_trans, pd.DataFrame)
assert_array_equal(X_trans.columns, ct.get_feature_names_out())
expected_dtypes = {
"color_blue": "int8",
"color_green": "int8",
"color_red": "int8",
"age": "float64",
"pet": "category",
"height": "int64",
"distance": "Int32",
}
for col, dtype in X_trans.dtypes.items():
assert dtype == expected_dtypes[col]
|
Check ColumnTransformer outputs mixed types correctly.
|
test_column_transform_set_output_mixed
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_transformers_with_pandas_out_but_not_feature_names_out(
trans_1, expected_verbose_names, expected_non_verbose_names
):
"""Check that set_config(transform="pandas") is compatible with more transformers.
Specifically, if transformers returns a DataFrame, but does not define
`get_feature_names_out`.
"""
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame({"feat0": [1.0, 2.0, 3.0], "feat1": [2.0, 3.0, 4.0]})
ct = ColumnTransformer(
[
("trans_0", PandasOutTransformer(offset=3.0), ["feat1"]),
("trans_1", trans_1, ["feat0"]),
]
)
X_trans_np = ct.fit_transform(X_df)
assert isinstance(X_trans_np, np.ndarray)
# `ct` does not have `get_feature_names_out` because `PandasOutTransformer` does
# not define the method.
with pytest.raises(AttributeError, match="not provide get_feature_names_out"):
ct.get_feature_names_out()
# The feature names are prefixed because verbose_feature_names_out=True is default
ct.set_output(transform="pandas")
X_trans_df0 = ct.fit_transform(X_df)
assert_array_equal(X_trans_df0.columns, expected_verbose_names)
ct.set_params(verbose_feature_names_out=False)
X_trans_df1 = ct.fit_transform(X_df)
assert_array_equal(X_trans_df1.columns, expected_non_verbose_names)
|
Check that set_config(transform="pandas") is compatible with more transformers.
Specifically, if transformers returns a DataFrame, but does not define
`get_feature_names_out`.
|
test_transformers_with_pandas_out_but_not_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_empty_selection_pandas_output(empty_selection):
"""Check that pandas output works when there is an empty selection.
Non-regression test for gh-25487
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"])
ct = ColumnTransformer(
[
("categorical", "passthrough", empty_selection),
("numerical", StandardScaler(), ["a", "b"]),
],
verbose_feature_names_out=True,
)
ct.set_output(transform="pandas")
X_out = ct.fit_transform(X)
assert_array_equal(X_out.columns, ["numerical__a", "numerical__b"])
ct.set_params(verbose_feature_names_out=False)
X_out = ct.fit_transform(X)
assert_array_equal(X_out.columns, ["a", "b"])
|
Check that pandas output works when there is an empty selection.
Non-regression test for gh-25487
|
test_empty_selection_pandas_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_raise_error_if_index_not_aligned():
"""Check column transformer raises error if indices are not aligned.
Non-regression test for gh-26210.
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame([[1.0, 2.2], [3.0, 1.0]], columns=["a", "b"], index=[8, 3])
reset_index_transformer = FunctionTransformer(
lambda x: x.reset_index(drop=True), feature_names_out="one-to-one"
)
ct = ColumnTransformer(
[
("num1", "passthrough", ["a"]),
("num2", reset_index_transformer, ["b"]),
],
)
ct.set_output(transform="pandas")
msg = (
"Concatenating DataFrames from the transformer's output lead to"
" an inconsistent number of samples. The output may have Pandas"
" Indexes that do not match."
)
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X)
|
Check column transformer raises error if indices are not aligned.
Non-regression test for gh-26210.
|
test_raise_error_if_index_not_aligned
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_remainder_set_output():
"""Check that the output is set for the remainder.
Non-regression test for #26306.
"""
pd = pytest.importorskip("pandas")
df = pd.DataFrame({"a": [True, False, True], "b": [1, 2, 3]})
ct = make_column_transformer(
(VarianceThreshold(), make_column_selector(dtype_include=bool)),
remainder=VarianceThreshold(),
verbose_feature_names_out=False,
)
ct.set_output(transform="pandas")
out = ct.fit_transform(df)
pd.testing.assert_frame_equal(out, df)
ct.set_output(transform="default")
out = ct.fit_transform(df)
assert isinstance(out, np.ndarray)
|
Check that the output is set for the remainder.
Non-regression test for #26306.
|
test_remainder_set_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_transform_pd_na():
"""Check behavior when a tranformer's output contains pandas.NA
It should raise an error unless the output config is set to 'pandas'.
"""
pd = pytest.importorskip("pandas")
if not hasattr(pd, "Float64Dtype"):
pytest.skip(
"The issue with pd.NA tested here does not happen in old versions that do"
" not have the extension dtypes"
)
df = pd.DataFrame({"a": [1.5, None]})
ct = make_column_transformer(("passthrough", ["a"]))
# No warning with non-extension dtypes and np.nan
with warnings.catch_warnings():
warnings.simplefilter("error")
ct.fit_transform(df)
df = df.convert_dtypes()
# Error with extension dtype and pd.NA
with pytest.raises(ValueError, match=r"set_output\(transform='pandas'\)"):
ct.fit_transform(df)
# No error when output is set to pandas
ct.set_output(transform="pandas")
ct.fit_transform(df)
ct.set_output(transform="default")
# No error when there are no pd.NA
ct.fit_transform(df.fillna(-1.0))
|
Check behavior when a tranformer's output contains pandas.NA
It should raise an error unless the output config is set to 'pandas'.
|
test_transform_pd_na
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_dataframe_different_dataframe_libraries():
"""Check fitting and transforming on pandas and polars dataframes."""
pd = pytest.importorskip("pandas")
pl = pytest.importorskip("polars")
X_train_np = np.array([[0, 1], [2, 4], [4, 5]])
X_test_np = np.array([[1, 2], [1, 3], [2, 3]])
# Fit on pandas and transform on polars
X_train_pd = pd.DataFrame(X_train_np, columns=["a", "b"])
X_test_pl = pl.DataFrame(X_test_np, schema=["a", "b"])
ct = make_column_transformer((Trans(), [0, 1]))
ct.fit(X_train_pd)
out_pl_in = ct.transform(X_test_pl)
assert_array_equal(out_pl_in, X_test_np)
# Fit on polars and transform on pandas
X_train_pl = pl.DataFrame(X_train_np, schema=["a", "b"])
X_test_pd = pd.DataFrame(X_test_np, columns=["a", "b"])
ct.fit(X_train_pl)
out_pd_in = ct.transform(X_test_pd)
assert_array_equal(out_pd_in, X_test_np)
|
Check fitting and transforming on pandas and polars dataframes.
|
test_dataframe_different_dataframe_libraries
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_column_transformer_remainder_passthrough_naming_consistency(transform_output):
"""Check that when `remainder="passthrough"`, inconsistent naming is handled
correctly by the underlying `FunctionTransformer`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28232
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(np.random.randn(10, 4))
preprocessor = ColumnTransformer(
transformers=[("scaler", StandardScaler(), [0, 1])],
remainder="passthrough",
).set_output(transform=transform_output)
X_trans = preprocessor.fit_transform(X)
assert X_trans.shape == X.shape
expected_column_names = [
"scaler__x0",
"scaler__x1",
"remainder__x2",
"remainder__x3",
]
if hasattr(X_trans, "columns"):
assert X_trans.columns.tolist() == expected_column_names
assert preprocessor.get_feature_names_out().tolist() == expected_column_names
|
Check that when `remainder="passthrough"`, inconsistent naming is handled
correctly by the underlying `FunctionTransformer`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28232
|
test_column_transformer_remainder_passthrough_naming_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_column_transformer_column_renaming(dataframe_lib):
"""Check that we properly rename columns when using `ColumnTransformer` and
selected columns are redundant between transformers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28260
"""
lib = pytest.importorskip(dataframe_lib)
df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
transformer = ColumnTransformer(
transformers=[
("A", "passthrough", ["x1", "x2", "x3"]),
("B", FunctionTransformer(), ["x1", "x2"]),
("C", StandardScaler(), ["x1", "x3"]),
# special case of a transformer returning 0-columns, e.g feature selector
(
"D",
FunctionTransformer(lambda x: _safe_indexing(x, [], axis=1)),
["x1", "x2", "x3"],
),
],
verbose_feature_names_out=True,
).set_output(transform=dataframe_lib)
df_trans = transformer.fit_transform(df)
assert list(df_trans.columns) == [
"A__x1",
"A__x2",
"A__x3",
"B__x1",
"B__x2",
"C__x1",
"C__x3",
]
|
Check that we properly rename columns when using `ColumnTransformer` and
selected columns are redundant between transformers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28260
|
test_column_transformer_column_renaming
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_column_transformer_error_with_duplicated_columns(dataframe_lib):
"""Check that we raise an error when using `ColumnTransformer` and
the columns names are duplicated between transformers."""
lib = pytest.importorskip(dataframe_lib)
df = lib.DataFrame({"x1": [1, 2, 3], "x2": [10, 20, 30], "x3": [100, 200, 300]})
transformer = ColumnTransformer(
transformers=[
("A", "passthrough", ["x1", "x2", "x3"]),
("B", FunctionTransformer(), ["x1", "x2"]),
("C", StandardScaler(), ["x1", "x3"]),
# special case of a transformer returning 0-columns, e.g feature selector
(
"D",
FunctionTransformer(lambda x: _safe_indexing(x, [], axis=1)),
["x1", "x2", "x3"],
),
],
verbose_feature_names_out=False,
).set_output(transform=dataframe_lib)
err_msg = re.escape(
"Duplicated feature names found before concatenating the outputs of the "
"transformers: ['x1', 'x2', 'x3'].\n"
"Transformer A has conflicting columns names: ['x1', 'x2', 'x3'].\n"
"Transformer B has conflicting columns names: ['x1', 'x2'].\n"
"Transformer C has conflicting columns names: ['x1', 'x3'].\n"
)
with pytest.raises(ValueError, match=err_msg):
transformer.fit_transform(df)
|
Check that we raise an error when using `ColumnTransformer` and
the columns names are duplicated between transformers.
|
test_column_transformer_error_with_duplicated_columns
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_column_transformer_auto_memmap():
"""Check that ColumnTransformer works in parallel with joblib's auto-memmapping.
non-regression test for issue #28781
"""
X = np.random.RandomState(0).uniform(size=(3, 4))
scaler = StandardScaler(copy=False)
transformer = ColumnTransformer(
transformers=[("scaler", scaler, [0])],
n_jobs=2,
)
with joblib.parallel_backend("loky", max_nbytes=1):
Xt = transformer.fit_transform(X)
assert_allclose(Xt, StandardScaler().fit_transform(X[:, [0]]))
|
Check that ColumnTransformer works in parallel with joblib's auto-memmapping.
non-regression test for issue #28781
|
test_column_transformer_auto_memmap
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
def test_routing_passed_metadata_not_supported(method):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
X = np.array([[0, 1, 2], [2, 4, 6]]).T
y = [1, 2, 3]
trs = ColumnTransformer([("trans", Trans(), [0])]).fit(X, y)
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
getattr(trs, method)([[1]], sample_weight=[1], prop="a")
|
Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`.
|
test_routing_passed_metadata_not_supported
|
python
|
scikit-learn/scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/compose/tests/test_column_transformer.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.