code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def _get_threadlocal_config():
"""Get a threadlocal **mutable** configuration. If the configuration
does not exist, copy the default global configuration."""
if not hasattr(_threadlocal, "global_config"):
_threadlocal.global_config = _global_config.copy()
return _threadlocal.global_config
|
Get a threadlocal **mutable** configuration. If the configuration
does not exist, copy the default global configuration.
|
_get_threadlocal_config
|
python
|
scikit-learn/scikit-learn
|
sklearn/_config.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/_config.py
|
BSD-3-Clause
|
def get_config():
"""Retrieve current values for configuration set by :func:`set_config`.
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
See Also
--------
config_context : Context manager for global scikit-learn configuration.
set_config : Set global scikit-learn configuration.
Examples
--------
>>> import sklearn
>>> config = sklearn.get_config()
>>> config.keys()
dict_keys([...])
"""
# Return a copy of the threadlocal configuration so that users will
# not be able to modify the configuration with the returned dict.
return _get_threadlocal_config().copy()
|
Retrieve current values for configuration set by :func:`set_config`.
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
See Also
--------
config_context : Context manager for global scikit-learn configuration.
set_config : Set global scikit-learn configuration.
Examples
--------
>>> import sklearn
>>> config = sklearn.get_config()
>>> config.keys()
dict_keys([...])
|
get_config
|
python
|
scikit-learn/scikit-learn
|
sklearn/_config.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/_config.py
|
BSD-3-Clause
|
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
Fixture for the tests to assure globally controllable seeding of RNGs
|
setup_module
|
python
|
scikit-learn/scikit-learn
|
sklearn/__init__.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/__init__.py
|
BSD-3-Clause
|
def affinity_propagation(
S,
*,
preference=None,
convergence_iter=15,
max_iter=200,
damping=0.5,
copy=True,
verbose=False,
return_n_iter=False,
random_state=None,
):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like of shape (n_samples, n_samples)
Matrix of similarities between points.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, default=200
Maximum number of iterations.
damping : float, default=0.5
Damping factor between 0.5 and 1.
copy : bool, default=True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency.
verbose : bool, default=False
The verbosity level.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Returns
-------
cluster_centers_indices : ndarray of shape (n_clusters,)
Index of clusters centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example usage,
see :ref:`sphx_glr_auto_examples_cluster_plot_affinity_propagation.py`.
You may also check out,
:ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`
When the algorithm does not converge, it will still return a arrays of
``cluster_center_indices`` and labels if there are any exemplars/clusters,
however they may be degenerate and should be used with caution.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import affinity_propagation
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> S = -euclidean_distances(X, squared=True)
>>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0)
>>> cluster_centers_indices
array([0, 3])
>>> labels
array([0, 0, 0, 1, 1, 1])
"""
estimator = AffinityPropagation(
damping=damping,
max_iter=max_iter,
convergence_iter=convergence_iter,
copy=copy,
preference=preference,
affinity="precomputed",
verbose=verbose,
random_state=random_state,
).fit(S)
if return_n_iter:
return estimator.cluster_centers_indices_, estimator.labels_, estimator.n_iter_
return estimator.cluster_centers_indices_, estimator.labels_
|
Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like of shape (n_samples, n_samples)
Matrix of similarities between points.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, default=200
Maximum number of iterations.
damping : float, default=0.5
Damping factor between 0.5 and 1.
copy : bool, default=True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency.
verbose : bool, default=False
The verbosity level.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Returns
-------
cluster_centers_indices : ndarray of shape (n_clusters,)
Index of clusters centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example usage,
see :ref:`sphx_glr_auto_examples_cluster_plot_affinity_propagation.py`.
You may also check out,
:ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`
When the algorithm does not converge, it will still return a arrays of
``cluster_center_indices`` and labels if there are any exemplars/clusters,
however they may be degenerate and should be used with caution.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import affinity_propagation
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> S = -euclidean_distances(X, squared=True)
>>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0)
>>> cluster_centers_indices
array([0, 3])
>>> labels
array([0, 0, 0, 1, 1, 1])
|
affinity_propagation
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_affinity_propagation.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Returns the instance itself.
"""
if self.affinity == "precomputed":
X = validate_data(self, X, copy=self.copy, force_writeable=True)
self.affinity_matrix_ = X
else: # self.affinity == "euclidean"
X = validate_data(self, X, accept_sparse="csr")
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]:
raise ValueError(
"The matrix of similarities must be a square array. "
f"Got {self.affinity_matrix_.shape} instead."
)
if self.preference is None:
preference = np.median(self.affinity_matrix_)
else:
preference = self.preference
preference = np.asarray(preference)
random_state = check_random_state(self.random_state)
(
self.cluster_centers_indices_,
self.labels_,
self.n_iter_,
) = _affinity_propagation(
self.affinity_matrix_,
max_iter=self.max_iter,
convergence_iter=self.convergence_iter,
preference=preference,
damping=self.damping,
verbose=self.verbose,
return_n_iter=True,
random_state=random_state,
)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
|
Fit the clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Returns the instance itself.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_affinity_propagation.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False, accept_sparse="csr")
if not hasattr(self, "cluster_centers_"):
raise ValueError(
"Predict method is not supported when affinity='precomputed'."
)
if self.cluster_centers_.shape[0] > 0:
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
else:
warnings.warn(
(
"This model does not have any cluster centers "
"because affinity propagation did not converge. "
"Labeling every sample as '-1'."
),
ConvergenceWarning,
)
return np.array([-1] * X.shape[0])
|
Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_affinity_propagation.py
|
BSD-3-Clause
|
def _fix_connectivity(X, connectivity, affinity):
"""
Fixes the connectivity matrix.
The different steps are:
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
affinity : {"euclidean", "precomputed"}, default="euclidean"
Which affinity to use. At the moment `precomputed` and
``euclidean`` are supported. `euclidean` uses the
negative squared Euclidean distance between points.
Returns
-------
connectivity : sparse matrix
The fixed connectivity matrix.
n_connected_components : int
The number of connected components in the graph.
"""
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError(
"Wrong shape for connectivity matrix: %s when X is %s"
% (connectivity.shape, X.shape)
)
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.issparse(connectivity):
connectivity = sparse.lil_matrix(connectivity)
# `connectivity` is a sparse matrix at this point
if connectivity.format != "lil":
connectivity = connectivity.tolil()
# Compute the number of nodes
n_connected_components, labels = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn(
"the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_connected_components,
stacklevel=2,
)
# XXX: Can we do without completing the matrix?
connectivity = _fix_connected_components(
X=X,
graph=connectivity,
n_connected_components=n_connected_components,
component_labels=labels,
metric=affinity,
mode="connectivity",
)
return connectivity, n_connected_components
|
Fixes the connectivity matrix.
The different steps are:
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
affinity : {"euclidean", "precomputed"}, default="euclidean"
Which affinity to use. At the moment `precomputed` and
``euclidean`` are supported. `euclidean` uses the
negative squared Euclidean distance between points.
Returns
-------
connectivity : sparse matrix
The fixed connectivity matrix.
n_connected_components : int
The number of connected components in the graph.
|
_fix_connectivity
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_agglomerative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_agglomerative.py
|
BSD-3-Clause
|
def _single_linkage_tree(
connectivity,
n_samples,
n_nodes,
n_clusters,
n_connected_components,
return_distance,
):
"""
Perform single linkage clustering on sparse data via the minimum
spanning tree from scipy.sparse.csgraph, then using union-find to label.
The parent array is then generated by walking through the tree.
"""
from scipy.sparse.csgraph import minimum_spanning_tree
# explicitly cast connectivity to ensure safety
connectivity = connectivity.astype(np.float64, copy=False)
# Ensure zero distances aren't ignored by setting them to "epsilon"
epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps
connectivity.data[connectivity.data == 0] = epsilon_value
# Use scipy.sparse.csgraph to generate a minimum spanning tree
mst = minimum_spanning_tree(connectivity.tocsr())
# Convert the graph to scipy.cluster.hierarchy array format
mst = mst.tocoo()
# Undo the epsilon values
mst.data[mst.data == epsilon_value] = 0
mst_array = np.vstack([mst.row, mst.col, mst.data]).T
# Sort edges of the min_spanning_tree by weight
mst_array = mst_array[np.argsort(mst_array.T[2], kind="mergesort"), :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = _hierarchical._single_linkage_label(mst_array)
children_ = single_linkage_tree[:, :2].astype(int)
# Compute parents
parent = np.arange(n_nodes, dtype=np.intp)
for i, (left, right) in enumerate(children_, n_samples):
if n_clusters is not None and i >= n_nodes:
break
if left < n_nodes:
parent[left] = i
if right < n_nodes:
parent[right] = i
if return_distance:
distances = single_linkage_tree[:, 2]
return children_, n_connected_components, n_samples, parent, distances
return children_, n_connected_components, n_samples, parent
|
Perform single linkage clustering on sparse data via the minimum
spanning tree from scipy.sparse.csgraph, then using union-find to label.
The parent array is then generated by walking through the tree.
|
_single_linkage_tree
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_agglomerative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_agglomerative.py
|
BSD-3-Clause
|
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
"""
if n_clusters > n_leaves:
raise ValueError(
"Cannot extract more clusters than samples: "
f"{n_clusters} clusters were given for a tree with {n_leaves} leaves."
)
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for _ in range(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
|
Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
|
_hc_cut
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_agglomerative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_agglomerative.py
|
BSD-3-Clause
|
def _fit(self, X):
"""Fit without validation
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``.
Returns
-------
self : object
Returns the fitted instance.
"""
memory = check_memory(self.memory)
if not ((self.n_clusters is None) ^ (self.distance_threshold is None)):
raise ValueError(
"Exactly one of n_clusters and "
"distance_threshold has to be set, and the other "
"needs to be None."
)
if self.distance_threshold is not None and not self.compute_full_tree:
raise ValueError(
"compute_full_tree must be True if distance_threshold is set."
)
if self.linkage == "ward" and self.metric != "euclidean":
raise ValueError(
f"{self.metric} was provided as metric. Ward can only "
"work with euclidean distances."
)
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=["csr", "coo", "lil"]
)
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == "auto":
if self.distance_threshold is not None:
compute_full_tree = True
else:
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, 0.02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != "ward":
kwargs["linkage"] = self.linkage
kwargs["affinity"] = self.metric
distance_threshold = self.distance_threshold
return_distance = (distance_threshold is not None) or self.compute_distances
out = memory.cache(tree_builder)(
X,
connectivity=connectivity,
n_clusters=n_clusters,
return_distance=return_distance,
**kwargs,
)
(self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[
:4
]
if return_distance:
self.distances_ = out[-1]
if self.distance_threshold is not None: # distance_threshold is used
self.n_clusters_ = (
np.count_nonzero(self.distances_ >= distance_threshold) + 1
)
else: # n_clusters is used
self.n_clusters_ = self.n_clusters
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters_, self.children_, self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reassign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
|
Fit without validation
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``.
Returns
-------
self : object
Returns the fitted instance.
|
_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_agglomerative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_agglomerative.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the transformer.
"""
X = validate_data(self, X, ensure_min_features=2)
super()._fit(X.T)
self._n_features_out = self.n_clusters_
return self
|
Fit the hierarchical clustering on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the transformer.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_agglomerative.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_agglomerative.py
|
BSD-3-Clause
|
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r @ X @ c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
|
Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
|
_scale_normalize
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
|
Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
|
_bistochastic_normalize
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError(
"Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0."
)
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
|
Normalize ``X`` according to Kluger's log-interactions scheme.
|
_log_normalize
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Create a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
SpectralBiclustering instance.
"""
X = validate_data(self, X, accept_sparse="csr", dtype=np.float64)
self._check_parameters(X.shape[0])
self._fit(X)
return self
|
Create a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
SpectralBiclustering instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == "randomized":
kwargs = {}
if self.n_svd_vecs is not None:
kwargs["n_oversamples"] = self.n_svd_vecs
u, _, vt = _randomized_svd(
array, n_components, random_state=self.random_state, **kwargs
)
elif self.svd_method == "arpack":
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
|
Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
|
_svd
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
|
Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
|
_fit_best_piecewise
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
Project ``data`` to ``vectors`` and cluster the result.
|
_project_and_cluster
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bicluster.py
|
BSD-3-Clause
|
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in range(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
|
This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
|
_iterate_sparse_X
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features,
dtype=node.init_centroids_.dtype,
)
new_node2 = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features,
dtype=node.init_centroids_.dtype,
)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True
)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
# make sure node1 is closest to itself even if all distances are equal.
# This can only happen when all node.centroids_ are duplicates leading to all
# distances between centroids being zero.
node1_closer[farthest_idx[0]] = True
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
|
The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
|
_split_node
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
|
Remove a subcluster from a node and update it with the
split subclusters.
|
update_split_subclusters
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.0
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[
closest_index
].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[
closest_index
].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_,
threshold,
branching_factor,
)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2
)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
|
Insert a new subcluster into the node.
|
insert_cf_subcluster
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_sq_norm = np.dot(new_centroid, new_centroid)
# The squared radius of the cluster is defined:
# r^2 = sum_i ||x_i - c||^2 / n
# with x_i the n points assigned to the cluster and c its centroid:
# c = sum_i x_i / n
# This can be expanded to:
# r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
# and therefore simplifies to:
# r^2 = sum_i ||x_i||^2 / n - ||c||^2
sq_radius = new_ss / new_n - new_sq_norm
if sq_radius <= threshold**2:
(
self.n_samples_,
self.linear_sum_,
self.squared_sum_,
self.centroid_,
self.sq_norm_,
) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm)
return True
return False
|
Check if a cluster is worthy enough to be merged. If
yes then merge.
|
merge_subcluster
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
|
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
|
_get_leaves
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
return self._fit(X, partial=True)
|
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
return self._predict(X)
|
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def _predict(self, X):
"""Predict data using the ``centroids_`` of subclusters."""
kwargs = {"Y_norm_squared": self._subcluster_norms}
with config_context(assume_finite=True):
argmin = pairwise_distances_argmin(
X, self.subcluster_centers_, metric_kwargs=kwargs
)
return self.subcluster_labels_[argmin]
|
Predict data using the ``centroids_`` of subclusters.
|
_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
with config_context(assume_finite=True):
return euclidean_distances(X, self.subcluster_centers_)
|
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integral):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters),
ConvergenceWarning,
)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
if compute_labels:
self.labels_ = self._predict(X)
|
Global clustering for the subclusters obtained after fitting
|
_global_clustering
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_birch.py
|
BSD-3-Clause
|
def __init__(self, center, indices, score):
"""Create a new cluster node in the tree.
The node holds the center of this cluster and the indices of the data points
that belong to it.
"""
self.center = center
self.indices = indices
self.score = score
self.left = None
self.right = None
|
Create a new cluster node in the tree.
The node holds the center of this cluster and the indices of the data points
that belong to it.
|
__init__
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def split(self, labels, centers, scores):
"""Split the cluster node into two subclusters."""
self.left = _BisectingTree(
indices=self.indices[labels == 0], center=centers[0], score=scores[0]
)
self.right = _BisectingTree(
indices=self.indices[labels == 1], center=centers[1], score=scores[1]
)
# reset the indices attribute to save memory
self.indices = None
|
Split the cluster node into two subclusters.
|
split
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def get_cluster_to_bisect(self):
"""Return the cluster node to bisect next.
It's based on the score of the cluster, which can be either the number of
data points assigned to that cluster or the inertia of that cluster
(see `bisecting_strategy` for details).
"""
max_score = None
for cluster_leaf in self.iter_leaves():
if max_score is None or cluster_leaf.score > max_score:
max_score = cluster_leaf.score
best_cluster_leaf = cluster_leaf
return best_cluster_leaf
|
Return the cluster node to bisect next.
It's based on the score of the cluster, which can be either the number of
data points assigned to that cluster or the inertia of that cluster
(see `bisecting_strategy` for details).
|
get_cluster_to_bisect
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def iter_leaves(self):
"""Iterate over all the cluster leaves in the tree."""
if self.left is None:
yield self
else:
yield from self.left.iter_leaves()
yield from self.right.iter_leaves()
|
Iterate over all the cluster leaves in the tree.
|
iter_leaves
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def _warn_mkl_vcomp(self, n_active_threads):
"""Warn when vcomp and mkl are both present"""
warnings.warn(
"BisectingKMeans is known to have a memory leak on Windows "
"with MKL, when there are less chunks than available "
"threads. You can avoid it by setting the environment"
f" variable OMP_NUM_THREADS={n_active_threads}."
)
|
Warn when vcomp and mkl are both present
|
_warn_mkl_vcomp
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def _inertia_per_cluster(self, X, centers, labels, sample_weight):
"""Calculate the sum of squared errors (inertia) per cluster.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
The input samples.
centers : ndarray of shape (n_clusters=2, n_features)
The cluster centers.
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
Returns
-------
inertia_per_cluster : ndarray of shape (n_clusters=2,)
Sum of squared errors (inertia) for each cluster.
"""
n_clusters = centers.shape[0] # = 2 since centers comes from a bisection
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
inertia_per_cluster = np.empty(n_clusters)
for label in range(n_clusters):
inertia_per_cluster[label] = _inertia(
X, sample_weight, centers, labels, self._n_threads, single_label=label
)
return inertia_per_cluster
|
Calculate the sum of squared errors (inertia) per cluster.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
The input samples.
centers : ndarray of shape (n_clusters=2, n_features)
The cluster centers.
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
Returns
-------
inertia_per_cluster : ndarray of shape (n_clusters=2,)
Sum of squared errors (inertia) for each cluster.
|
_inertia_per_cluster
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect):
"""Split a cluster into 2 subsclusters.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
Training instances to cluster.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
cluster_to_bisect : _BisectingTree node object
The cluster node to split.
"""
X = X[cluster_to_bisect.indices]
x_squared_norms = x_squared_norms[cluster_to_bisect.indices]
sample_weight = sample_weight[cluster_to_bisect.indices]
best_inertia = None
# Split samples in X into 2 clusters.
# Repeating `n_init` times to obtain best clusters
for _ in range(self.n_init):
centers_init = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=self.init,
random_state=self._random_state,
n_centroids=2,
sample_weight=sample_weight,
)
labels, inertia, centers, _ = self._kmeans_single(
X,
sample_weight,
centers_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
n_threads=self._n_threads,
)
# allow small tolerance on the inertia to accommodate for
# non-deterministic rounding errors due to parallel computation
if best_inertia is None or inertia < best_inertia * (1 - 1e-6):
best_labels = labels
best_centers = centers
best_inertia = inertia
if self.verbose:
print(f"New centroids from bisection: {best_centers}")
if self.bisecting_strategy == "biggest_inertia":
scores = self._inertia_per_cluster(
X, best_centers, best_labels, sample_weight
)
else: # bisecting_strategy == "largest_cluster"
# Using minlength to make sure that we have the counts for both labels even
# if all samples are labelled 0.
scores = np.bincount(best_labels, minlength=2)
cluster_to_bisect.split(best_labels, best_centers, scores)
|
Split a cluster into 2 subsclusters.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
Training instances to cluster.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
cluster_to_bisect : _BisectingTree node object
The cluster node to split.
|
_bisect
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
self._random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
if self.algorithm == "lloyd" or self.n_clusters == 1:
self._kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
self._kmeans_single = _kmeans_single_elkan
# Subtract of mean of X for more accurate distance computations
if not sp.issparse(X):
self._X_mean = X.mean(axis=0)
X -= self._X_mean
# Initialize the hierarchical clusters tree
self._bisecting_tree = _BisectingTree(
indices=np.arange(X.shape[0]),
center=X.mean(axis=0),
score=0,
)
x_squared_norms = row_norms(X, squared=True)
for _ in range(self.n_clusters - 1):
# Chose cluster to bisect
cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
# Split this cluster into 2 subclusters
self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect)
# Aggregate final labels and centers from the bisecting tree
self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()):
self.labels_[cluster_node.indices] = i
self.cluster_centers_[i] = cluster_node.center
cluster_node.label = i # label final clusters for future prediction
cluster_node.indices = None # release memory
# Restore original data
if not sp.issparse(X):
X += self._X_mean
self.cluster_centers_ += self._X_mean
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
self.inertia_ = _inertia(
X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads
)
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict which cluster each sample in X belongs to.
Prediction is made by going down the hierarchical tree
in searching of closest leaf cluster.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
# sample weights are unused but necessary in cython helpers
sample_weight = np.ones_like(x_squared_norms)
labels = self._predict_recursive(X, sample_weight, self._bisecting_tree)
return labels
|
Predict which cluster each sample in X belongs to.
Prediction is made by going down the hierarchical tree
in searching of closest leaf cluster.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def _predict_recursive(self, X, sample_weight, cluster_node):
"""Predict recursively by going down the hierarchical tree.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
The data points, currently assigned to `cluster_node`, to predict between
the subclusters of this node.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
cluster_node : _BisectingTree node object
The cluster node of the hierarchical tree.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
if cluster_node.left is None:
# This cluster has no subcluster. Labels are just the label of the cluster.
return np.full(X.shape[0], cluster_node.label, dtype=np.int32)
# Determine if data points belong to the left or right subcluster
centers = np.vstack((cluster_node.left.center, cluster_node.right.center))
if hasattr(self, "_X_mean"):
centers += self._X_mean
cluster_labels = _labels_inertia_threadpool_limit(
X,
sample_weight,
centers,
self._n_threads,
return_inertia=False,
)
mask = cluster_labels == 0
# Compute the labels for each subset of the data points.
labels = np.full(X.shape[0], -1, dtype=np.int32)
labels[mask] = self._predict_recursive(
X[mask], sample_weight[mask], cluster_node.left
)
labels[~mask] = self._predict_recursive(
X[~mask], sample_weight[~mask], cluster_node.right
)
return labels
|
Predict recursively by going down the hierarchical tree.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
The data points, currently assigned to `cluster_node`, to predict between
the subclusters of this node.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
cluster_node : _BisectingTree node object
The cluster node of the hierarchical tree.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
|
_predict_recursive
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_bisect_k_means.py
|
BSD-3-Clause
|
def dbscan(
X,
eps=0.5,
*,
min_samples=5,
metric="minkowski",
metric_params=None,
algorithm="auto",
leaf_size=30,
p=2,
sample_weight=None,
n_jobs=None,
):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : str or callable, default='minkowski'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=2
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distance are used, parallel execution is not available
and thus n_jobs will have no effect.
Returns
-------
core_samples : ndarray of shape (n_core_samples,)
Indices of core samples.
labels : ndarray of shape (n_samples,)
Cluster labels for each point. Noisy samples are given the label -1.
See Also
--------
DBSCAN : An estimator interface for this clustering algorithm.
OPTICS : A similar estimator interface clustering at multiple values of
eps. Our implementation is optimized for memory usage.
Notes
-----
For an example, see :ref:`sphx_glr_auto_examples_cluster_plot_dbscan.py`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower
memory usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
<https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
:doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
<10.1145/3068335>`
ACM Transactions on Database Systems (TODS), 42(3), 19.
Examples
--------
>>> from sklearn.cluster import dbscan
>>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]]
>>> core_samples, labels = dbscan(X, eps=3, min_samples=2)
>>> core_samples
array([0, 1, 2, 3, 4])
>>> labels
array([ 0, 0, 0, 1, 1, -1])
"""
est = DBSCAN(
eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
algorithm=algorithm,
leaf_size=leaf_size,
p=p,
n_jobs=n_jobs,
)
est.fit(X, sample_weight=sample_weight)
return est.core_sample_indices_, est.labels_
|
Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : str or callable, default='minkowski'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=2
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distance are used, parallel execution is not available
and thus n_jobs will have no effect.
Returns
-------
core_samples : ndarray of shape (n_core_samples,)
Indices of core samples.
labels : ndarray of shape (n_samples,)
Cluster labels for each point. Noisy samples are given the label -1.
See Also
--------
DBSCAN : An estimator interface for this clustering algorithm.
OPTICS : A similar estimator interface clustering at multiple values of
eps. Our implementation is optimized for memory usage.
Notes
-----
For an example, see :ref:`sphx_glr_auto_examples_cluster_plot_dbscan.py`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower
memory usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
<https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
:doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
<10.1145/3068335>`
ACM Transactions on Database Systems (TODS), 42(3), 19.
Examples
--------
>>> from sklearn.cluster import dbscan
>>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]]
>>> core_samples, labels = dbscan(X, eps=3, min_samples=2)
>>> core_samples
array([0, 1, 2, 3, 4])
>>> labels
array([ 0, 0, 0, 1, 1, -1])
|
dbscan
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_dbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_dbscan.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
"""
X = validate_data(self, X, accept_sparse="csr")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Calculate neighborhood for all samples. This leaves the original
# point in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if self.metric == "precomputed" and sparse.issparse(X):
# set the diagonal to explicit values, as a point is its own
# neighbor
X = X.copy() # copy to avoid in-place modification
with warnings.catch_warnings():
warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(
radius=self.eps,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array(
[np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]
)
# Initially, all samples are noise.
labels = np.full(X.shape[0], -1, dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
|
Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_dbscan.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_dbscan.py
|
BSD-3-Clause
|
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
|
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_feature_agglomeration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_feature_agglomeration.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X_original : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `X` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X_original : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `X` assigned to
each of the cluster of samples.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_feature_agglomeration.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_feature_agglomeration.py
|
BSD-3-Clause
|
def kmeans_plusplus(
X,
n_clusters,
*,
sample_weight=None,
x_squared_norms=None,
random_state=None,
n_local_trials=None,
):
"""Init n_clusters seeds according to k-means++.
.. versionadded:: 0.24
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds from.
n_clusters : int
The number of centroids to initialize.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in `X`. If `None`, all observations
are assigned equal weight. `sample_weight` is ignored if `init`
is a callable or a user provided array.
.. versionadded:: 1.3
x_squared_norms : array-like of shape (n_samples,), default=None
Squared Euclidean norm of each data point.
random_state : int or RandomState instance, default=None
Determines random number generation for centroid initialization. Pass
an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)) which is the recommended setting.
Setting to 1 disables the greedy cluster selection and recovers the
vanilla k-means++ algorithm which was empirically shown to work less
well than its greedy variant.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Examples
--------
>>> from sklearn.cluster import kmeans_plusplus
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0)
>>> centers
array([[10, 2],
[ 1, 0]])
>>> indices
array([3, 2])
"""
# Check data
check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if X.shape[0] < n_clusters:
raise ValueError(
f"n_samples={X.shape[0]} should be >= n_clusters={n_clusters}."
)
# Check parameters
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
else:
x_squared_norms = check_array(x_squared_norms, dtype=X.dtype, ensure_2d=False)
if x_squared_norms.shape[0] != X.shape[0]:
raise ValueError(
f"The length of x_squared_norms {x_squared_norms.shape[0]} should "
f"be equal to the length of n_samples {X.shape[0]}."
)
random_state = check_random_state(random_state)
# Call private k-means++
centers, indices = _kmeans_plusplus(
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials
)
return centers, indices
|
Init n_clusters seeds according to k-means++.
.. versionadded:: 0.24
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds from.
n_clusters : int
The number of centroids to initialize.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in `X`. If `None`, all observations
are assigned equal weight. `sample_weight` is ignored if `init`
is a callable or a user provided array.
.. versionadded:: 1.3
x_squared_norms : array-like of shape (n_samples,), default=None
Squared Euclidean norm of each data point.
random_state : int or RandomState instance, default=None
Determines random number generation for centroid initialization. Pass
an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)) which is the recommended setting.
Setting to 1 disables the greedy cluster selection and recovers the
vanilla k-means++ algorithm which was empirically shown to work less
well than its greedy variant.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Examples
--------
>>> from sklearn.cluster import kmeans_plusplus
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0)
>>> centers
array([[10, 2],
[ 1, 0]])
>>> indices
array([3, 2])
|
kmeans_plusplus
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _kmeans_plusplus(
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None
):
"""Computational component for initialization of n_clusters by
k-means++. Prior validation of data is assumed.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[[center_id]].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = _euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True
)
current_pot = closest_dist_sq @ sample_weight
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(
stable_cumsum(sample_weight * closest_dist_sq), rand_vals
)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = _euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True
)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[[best_candidate]].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
return centers, indices
|
Computational component for initialization of n_clusters by
k-means++. Prior validation of data is assumed.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
|
_kmeans_plusplus
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _tolerance(X, tol):
"""Return a tolerance which is dependent on the dataset."""
if tol == 0:
return 0
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
|
Return a tolerance which is dependent on the dataset.
|
_tolerance
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def k_means(
X,
n_clusters,
*,
sample_weight=None,
init="k-means++",
n_init="auto",
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
algorithm="lloyd",
return_n_iter=False,
):
"""Perform K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in `X`. If `None`, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
init : {'k-means++', 'random'}, callable or array-like of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization:
- `'k-means++'` : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
- `'random'`: choose `n_clusters` observations (rows) at random from data
for the initial centroids.
- If an array is passed, it should be of shape `(n_clusters, n_features)`
and gives the initial centers.
- If a callable is passed, it should take arguments `X`, `n_clusters` and a
random state and return an initialization.
n_init : 'auto' or int, default="auto"
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
When `n_init='auto'`, the number of runs depends on the value of init:
10 if using `init='random'` or `init` is a callable;
1 if using `init='k-means++'` or `init` is an array-like.
.. versionadded:: 1.2
Added 'auto' option for `n_init`.
.. versionchanged:: 1.4
Default value for `n_init` changed to `'auto'`.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If `copy_x` is True (default), then the original data is
not modified. If False, the original data is modified, and put back
before the function returns, but small numerical differences may be
introduced by subtracting and then adding the data mean. Note that if
the original data is not C-contiguous, a copy will be made even if
`copy_x` is False. If the original data is sparse, but not in CSR format,
a copy will be made even if `copy_x` is False.
algorithm : {"lloyd", "elkan"}, default="lloyd"
K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
The `"elkan"` variation can be more efficient on some datasets with
well-defined clusters, by using the triangle inequality. However it's
more memory intensive due to the allocation of an extra array of shape
`(n_samples, n_clusters)`.
.. versionchanged:: 0.18
Added Elkan algorithm
.. versionchanged:: 1.1
Renamed "full" to "lloyd", and deprecated "auto" and "full".
Changed "auto" to use "lloyd" instead of "elkan".
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
The `label[i]` is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import k_means
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centroid, label, inertia = k_means(
... X, n_clusters=2, n_init="auto", random_state=0
... )
>>> centroid
array([[10., 2.],
[ 1., 2.]])
>>> label
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> inertia
16.0
"""
est = KMeans(
n_clusters=n_clusters,
init=init,
n_init=n_init,
max_iter=max_iter,
verbose=verbose,
tol=tol,
random_state=random_state,
copy_x=copy_x,
algorithm=algorithm,
).fit(X, sample_weight=sample_weight)
if return_n_iter:
return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_
else:
return est.cluster_centers_, est.labels_, est.inertia_
|
Perform K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in `X`. If `None`, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
init : {'k-means++', 'random'}, callable or array-like of shape (n_clusters, n_features), default='k-means++'
Method for initialization:
- `'k-means++'` : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
- `'random'`: choose `n_clusters` observations (rows) at random from data
for the initial centroids.
- If an array is passed, it should be of shape `(n_clusters, n_features)`
and gives the initial centers.
- If a callable is passed, it should take arguments `X`, `n_clusters` and a
random state and return an initialization.
n_init : 'auto' or int, default="auto"
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
When `n_init='auto'`, the number of runs depends on the value of init:
10 if using `init='random'` or `init` is a callable;
1 if using `init='k-means++'` or `init` is an array-like.
.. versionadded:: 1.2
Added 'auto' option for `n_init`.
.. versionchanged:: 1.4
Default value for `n_init` changed to `'auto'`.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If `copy_x` is True (default), then the original data is
not modified. If False, the original data is modified, and put back
before the function returns, but small numerical differences may be
introduced by subtracting and then adding the data mean. Note that if
the original data is not C-contiguous, a copy will be made even if
`copy_x` is False. If the original data is sparse, but not in CSR format,
a copy will be made even if `copy_x` is False.
algorithm : {"lloyd", "elkan"}, default="lloyd"
K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
The `"elkan"` variation can be more efficient on some datasets with
well-defined clusters, by using the triangle inequality. However it's
more memory intensive due to the allocation of an extra array of shape
`(n_samples, n_clusters)`.
.. versionchanged:: 0.18
Added Elkan algorithm
.. versionchanged:: 1.1
Renamed "full" to "lloyd", and deprecated "auto" and "full".
Changed "auto" to use "lloyd" instead of "elkan".
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
The `label[i]` is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import k_means
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centroid, label, inertia = k_means(
... X, n_clusters=2, n_init="auto", random_state=0
... )
>>> centroid
array([[10., 2.],
[ 1., 2.]])
>>> label
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> inertia
16.0
|
k_means
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _kmeans_single_elkan(
X,
sample_weight,
centers_init,
max_iter=300,
verbose=False,
tol=1e-4,
n_threads=1,
):
"""A single run of k-means elkan, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : array-like of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
n_samples = X.shape[0]
n_clusters = centers_init.shape[0]
# Buffers to avoid new allocations at each iteration.
centers = centers_init
centers_new = np.zeros_like(centers)
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
labels = np.full(n_samples, -1, dtype=np.int32)
labels_old = labels.copy()
center_half_distances = euclidean_distances(centers) / 2
distance_next_center = np.partition(
np.asarray(center_half_distances), kth=1, axis=0
)[1]
upper_bounds = np.zeros(n_samples, dtype=X.dtype)
lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
init_bounds = init_bounds_sparse
elkan_iter = elkan_iter_chunked_sparse
_inertia = _inertia_sparse
else:
init_bounds = init_bounds_dense
elkan_iter = elkan_iter_chunked_dense
_inertia = _inertia_dense
init_bounds(
X,
centers,
center_half_distances,
labels,
upper_bounds,
lower_bounds,
n_threads=n_threads,
)
strict_convergence = False
for i in range(max_iter):
elkan_iter(
X,
sample_weight,
centers,
centers_new,
weight_in_clusters,
center_half_distances,
distance_next_center,
upper_bounds,
lower_bounds,
labels,
center_shift,
n_threads,
)
# compute new pairwise distances between centers and closest other
# center of each center for next iterations
center_half_distances = euclidean_distances(centers_new) / 2
distance_next_center = np.partition(
np.asarray(center_half_distances), kth=1, axis=0
)[1]
if verbose:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
print(f"Iteration {i}, inertia {inertia}")
centers, centers_new = centers_new, centers
if np.array_equal(labels, labels_old):
# First check the labels for strict convergence.
if verbose:
print(f"Converged at iteration {i}: strict convergence.")
strict_convergence = True
break
else:
# No strict convergence, check for tol based convergence.
center_shift_tot = (center_shift**2).sum()
if center_shift_tot <= tol:
if verbose:
print(
f"Converged at iteration {i}: center shift "
f"{center_shift_tot} within tolerance {tol}."
)
break
labels_old[:] = labels
if not strict_convergence:
# rerun E-step so that predicted labels match cluster centers
elkan_iter(
X,
sample_weight,
centers,
centers,
weight_in_clusters,
center_half_distances,
distance_next_center,
upper_bounds,
lower_bounds,
labels,
center_shift,
n_threads,
update_centers=False,
)
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia, centers, i + 1
|
A single run of k-means elkan, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : array-like of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
|
_kmeans_single_elkan
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _kmeans_single_lloyd(
X,
sample_weight,
centers_init,
max_iter=300,
verbose=False,
tol=1e-4,
n_threads=1,
):
"""A single run of k-means lloyd, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
n_clusters = centers_init.shape[0]
# Buffers to avoid new allocations at each iteration.
centers = centers_init
centers_new = np.zeros_like(centers)
labels = np.full(X.shape[0], -1, dtype=np.int32)
labels_old = labels.copy()
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
lloyd_iter = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
lloyd_iter = lloyd_iter_chunked_dense
_inertia = _inertia_dense
strict_convergence = False
for i in range(max_iter):
lloyd_iter(
X,
sample_weight,
centers,
centers_new,
weight_in_clusters,
labels,
center_shift,
n_threads,
)
if verbose:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
print(f"Iteration {i}, inertia {inertia}.")
centers, centers_new = centers_new, centers
if np.array_equal(labels, labels_old):
# First check the labels for strict convergence.
if verbose:
print(f"Converged at iteration {i}: strict convergence.")
strict_convergence = True
break
else:
# No strict convergence, check for tol based convergence.
center_shift_tot = (center_shift**2).sum()
if center_shift_tot <= tol:
if verbose:
print(
f"Converged at iteration {i}: center shift "
f"{center_shift_tot} within tolerance {tol}."
)
break
labels_old[:] = labels
if not strict_convergence:
# rerun E-step so that predicted labels match cluster centers
lloyd_iter(
X,
sample_weight,
centers,
centers,
weight_in_clusters,
labels,
center_shift,
n_threads,
update_centers=False,
)
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia, centers, i + 1
|
A single run of k-means lloyd, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
|
_kmeans_single_lloyd
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples to assign to the labels. If sparse matrix, must
be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
return_inertia : bool, default=True
Whether to compute and return the inertia.
Returns
-------
labels : ndarray of shape (n_samples,)
The resulting assignment.
inertia : float
Sum of squared distances of samples to their closest cluster center.
Inertia is only returned if return_inertia is True.
"""
n_samples = X.shape[0]
n_clusters = centers.shape[0]
labels = np.full(n_samples, -1, dtype=np.int32)
center_shift = np.zeros(n_clusters, dtype=centers.dtype)
if sp.issparse(X):
_labels = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
_labels = lloyd_iter_chunked_dense
_inertia = _inertia_dense
_labels(
X,
sample_weight,
centers,
centers_new=None,
weight_in_clusters=None,
labels=labels,
center_shift=center_shift,
n_threads=n_threads,
update_centers=False,
)
if return_inertia:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia
return labels
|
E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples to assign to the labels. If sparse matrix, must
be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
return_inertia : bool, default=True
Whether to compute and return the inertia.
Returns
-------
labels : ndarray of shape (n_samples,)
The resulting assignment.
inertia : float
Sum of squared distances of samples to their closest cluster center.
Inertia is only returned if return_inertia is True.
|
_labels_inertia
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _warn_mkl_vcomp(self, n_active_threads):
"""Issue an estimator specific warning when vcomp and mkl are both present
This method is called by `_check_mkl_vcomp`.
"""
|
Issue an estimator specific warning when vcomp and mkl are both present
This method is called by `_check_mkl_vcomp`.
|
_warn_mkl_vcomp
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _check_mkl_vcomp(self, X, n_samples):
"""Check when vcomp and mkl are both present"""
# The BLAS call inside a prange in lloyd_iter_chunked_dense is known to
# cause a small memory leak when there are less chunks than the number
# of available threads. It only happens when the OpenMP library is
# vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653
if sp.issparse(X):
return
n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE))
if n_active_threads < self._n_threads:
modules = _get_threadpool_controller().info()
has_vcomp = "vcomp" in [module["prefix"] for module in modules]
has_mkl = ("mkl", "intel") in [
(module["internal_api"], module.get("threading_layer", None))
for module in modules
]
if has_vcomp and has_mkl:
self._warn_mkl_vcomp(n_active_threads)
|
Check when vcomp and mkl are both present
|
_check_mkl_vcomp
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _validate_center_shape(self, X, centers):
"""Check if centers is compatible with X and n_clusters."""
if centers.shape[0] != self.n_clusters:
raise ValueError(
f"The shape of the initial centers {centers.shape} does not "
f"match the number of clusters {self.n_clusters}."
)
if centers.shape[1] != X.shape[1]:
raise ValueError(
f"The shape of the initial centers {centers.shape} does not "
f"match the number of features of the data {X.shape[1]}."
)
|
Check if centers is compatible with X and n_clusters.
|
_validate_center_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _init_centroids(
self,
X,
x_squared_norms,
init,
random_state,
sample_weight,
init_size=None,
n_centroids=None,
):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape \
(n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X. `sample_weight` is not used
during initialization if `init` is a callable or a user provided
array.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
n_centroids : int, default=None
Number of centroids to initialize.
If left to 'None' the number of centroids will be equal to
number of clusters to form (self.n_clusters).
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
Initial centroids of clusters.
"""
n_samples = X.shape[0]
n_clusters = self.n_clusters if n_centroids is None else n_centroids
if init_size is not None and init_size < n_samples:
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
sample_weight = sample_weight[init_indices]
if isinstance(init, str) and init == "k-means++":
centers, _ = _kmeans_plusplus(
X,
n_clusters,
random_state=random_state,
x_squared_norms=x_squared_norms,
sample_weight=sample_weight,
)
elif isinstance(init, str) and init == "random":
seeds = random_state.choice(
n_samples,
size=n_clusters,
replace=False,
p=sample_weight / sample_weight.sum(),
)
centers = X[seeds]
elif _is_arraylike_not_scalar(self.init):
centers = init
elif callable(init):
centers = init(X, n_clusters, random_state=random_state)
centers = check_array(centers, dtype=X.dtype, copy=False, order="C")
self._validate_center_shape(X, centers)
if sp.issparse(centers):
centers = centers.toarray()
return centers
|
Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
init : {'k-means++', 'random'}, callable or ndarray of shape (n_clusters, n_features)
Method for initialization.
random_state : RandomState instance
Determines random number generation for centroid initialization.
See :term:`Glossary <random_state>`.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X. `sample_weight` is not used
during initialization if `init` is a callable or a user provided
array.
init_size : int, default=None
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy).
n_centroids : int, default=None
Number of centroids to initialize.
If left to 'None' the number of centroids will be equal to
number of clusters to form (self.n_clusters).
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
Initial centroids of clusters.
|
_init_centroids
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._check_test_data(X)
# sample weights are not used by predict but cython helpers expect an array
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
labels = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
return_inertia=False,
)
return labels
|
Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to transform.
Returns
-------
X_new : ndarray of shape (n_samples, n_clusters)
X transformed in the new space.
"""
check_is_fitted(self)
X = self._check_test_data(X)
return self._transform(X)
|
Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to transform.
Returns
-------
X_new : ndarray of shape (n_samples, n_clusters)
X transformed in the new space.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def score(self, X, y=None, sample_weight=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self)
X = self._check_test_data(X)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
_, scores = _labels_inertia_threadpool_limit(
X, sample_weight, self.cluster_centers_, self._n_threads
)
return -scores
|
Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
|
score
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _warn_mkl_vcomp(self, n_active_threads):
"""Warn when vcomp and mkl are both present"""
warnings.warn(
"KMeans is known to have a memory leak on Windows "
"with MKL, when there are less chunks than available "
"threads. You can avoid it by setting the environment"
f" variable OMP_NUM_THREADS={n_active_threads}."
)
|
Warn when vcomp and mkl are both present
|
_warn_mkl_vcomp
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
# Validate init array
init = self.init
init_is_array_like = _is_arraylike_not_scalar(init)
if init_is_array_like:
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if init_is_array_like:
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if self._algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
best_inertia, best_labels = None, None
for i in range(self._n_init):
# Initialize centers
centers_init = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
sample_weight=sample_weight,
)
if self.verbose:
print("Initialization complete")
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X,
sample_weight,
centers_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self._tol,
n_threads=self._n_threads,
)
# determine if these results are the best so far
# we chose a new run if it has a better inertia and the clustering is
# different from the best so far (it's possible that the inertia is
# slightly better even if the clustering is the same with potentially
# permuted labels, due to rounding errors)
if best_inertia is None or (
inertia < best_inertia
and not _is_same_clustering(labels, best_labels, self.n_clusters)
):
best_labels = labels
best_centers = centers
best_inertia = inertia
best_n_iter = n_iter_
if not sp.issparse(X):
if not self.copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn(
"Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, self.n_clusters),
ConvergenceWarning,
stacklevel=2,
)
self.cluster_centers_ = best_centers
self._n_features_out = self.cluster_centers_.shape[0]
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
|
Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _mini_batch_step(
X,
sample_weight,
centers,
centers_new,
weight_sums,
random_state,
random_reassign=False,
reassignment_ratio=0.01,
verbose=False,
n_threads=1,
):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
"""
# Perform label assignment to nearest centers
# For better efficiency, it's better to run _mini_batch_step in a
# threadpool_limit context than using _labels_inertia_threadpool_limit here
labels, inertia = _labels_inertia(X, sample_weight, centers, n_threads=n_threads)
# Update centers according to the labels
if sp.issparse(X):
_minibatch_update_sparse(
X, sample_weight, centers, centers_new, weight_sums, labels, n_threads
)
else:
_minibatch_update_dense(
X,
sample_weight,
centers,
centers_new,
weight_sums,
labels,
n_threads,
)
# Reassign clusters that have very low weight
if random_reassign and reassignment_ratio > 0:
to_reassign = weight_sums < reassignment_ratio * weight_sums.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > 0.5 * X.shape[0]:
indices_dont_reassign = np.argsort(weight_sums)[int(0.5 * X.shape[0]) :]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = random_state.choice(
X.shape[0], replace=False, size=n_reassigns
)
if verbose:
print(f"[MiniBatchKMeans] Reassigning {n_reassigns} cluster centers.")
if sp.issparse(X):
assign_rows_csr(
X,
new_centers.astype(np.intp, copy=False),
np.where(to_reassign)[0].astype(np.intp, copy=False),
centers_new,
)
else:
centers_new[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
weight_sums[to_reassign] = np.min(weight_sums[~to_reassign])
return inertia
|
Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The original data array. If sparse, must be in CSR format.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers before the current iteration
centers_new : ndarray of shape (n_clusters, n_features)
The cluster centers after the current iteration. Modified in-place.
weight_sums : ndarray of shape (n_clusters,)
The vector in which we keep track of the numbers of points in a
cluster. This array is modified in place.
random_state : RandomState instance
Determines random number generation for low count centers reassignment.
See :term:`Glossary <random_state>`.
random_reassign : boolean, default=False
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, default=0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, default=False
Controls the verbosity.
n_threads : int, default=1
The number of OpenMP threads to use for the computation.
Returns
-------
inertia : float
Sum of squared distances of samples to their closest cluster center.
The inertia is computed after finding the labels and before updating
the centers.
|
_mini_batch_step
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _warn_mkl_vcomp(self, n_active_threads):
"""Warn when vcomp and mkl are both present"""
warnings.warn(
"MiniBatchKMeans is known to have a memory leak on "
"Windows with MKL, when there are less chunks than "
"available threads. You can prevent it by setting "
f"batch_size >= {self._n_threads * CHUNK_SIZE} or by "
"setting the environment variable "
f"OMP_NUM_THREADS={n_active_threads}"
)
|
Warn when vcomp and mkl are both present
|
_warn_mkl_vcomp
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _mini_batch_convergence(
self, step, n_steps, n_samples, centers_squared_diff, batch_inertia
):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= self._batch_size
# count steps starting from 1 for user friendly verbose mode.
step = step + 1
# Ignore first iteration because it's inertia from initialization.
if step == 1:
if self.verbose:
print(
f"Minibatch step {step}/{n_steps}: mean batch "
f"inertia: {batch_inertia}"
)
return False
# Compute an Exponentially Weighted Average of the inertia to
# monitor the convergence while discarding minibatch-local stochastic
# variability: https://en.wikipedia.org/wiki/Moving_average
if self._ewa_inertia is None:
self._ewa_inertia = batch_inertia
else:
alpha = self._batch_size * 2.0 / (n_samples + 1)
alpha = min(alpha, 1)
self._ewa_inertia = self._ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if self.verbose:
print(
f"Minibatch step {step}/{n_steps}: mean batch inertia: "
f"{batch_inertia}, ewa inertia: {self._ewa_inertia}"
)
# Early stopping based on absolute tolerance on squared change of
# centers position
if self._tol > 0.0 and centers_squared_diff <= self._tol:
if self.verbose:
print(f"Converged (small centers change) at step {step}/{n_steps}")
return True
# Early stopping heuristic due to lack of improvement on smoothed
# inertia
if self._ewa_inertia_min is None or self._ewa_inertia < self._ewa_inertia_min:
self._no_improvement = 0
self._ewa_inertia_min = self._ewa_inertia
else:
self._no_improvement += 1
if (
self.max_no_improvement is not None
and self._no_improvement >= self.max_no_improvement
):
if self.verbose:
print(
"Converged (lack of improvement in inertia) at step "
f"{step}/{n_steps}"
)
return True
return False
|
Helper function to encapsulate the early stopping logic
|
_mini_batch_convergence
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def _random_reassign(self):
"""Check if a random reassignment needs to be done.
Do random reassignments each time 10 * n_clusters samples have been
processed.
If there are empty clusters we always want to reassign.
"""
self._n_since_last_reassign += self._batch_size
if (self._counts == 0).any() or self._n_since_last_reassign >= (
10 * self.n_clusters
):
self._n_since_last_reassign = 0
return True
return False
|
Check if a random reassignment needs to be done.
Do random reassignments each time 10 * n_clusters samples have been
processed.
If there are empty clusters we always want to reassign.
|
_random_reassign
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def fit(self, X, y=None, sample_weight=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
)
self._check_params_vs_input(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
n_samples, n_features = X.shape
# Validate init array
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, self._batch_size)
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
# Validation set for the init
validation_indices = random_state.randint(0, n_samples, self._init_size)
X_valid = X[validation_indices]
sample_weight_valid = sample_weight[validation_indices]
# perform several inits with random subsets
best_inertia = None
for init_idx in range(self._n_init):
if self.verbose:
print(f"Init {init_idx + 1}/{self._n_init} with method {init}")
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans.
cluster_centers = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
init_size=self._init_size,
sample_weight=sample_weight,
)
# Compute inertia on a validation set.
_, inertia = _labels_inertia_threadpool_limit(
X_valid,
sample_weight_valid,
cluster_centers,
n_threads=self._n_threads,
)
if self.verbose:
print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}")
if best_inertia is None or inertia < best_inertia:
init_centers = cluster_centers
best_inertia = inertia
centers = init_centers
centers_new = np.empty_like(centers)
# Initialize counts
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
# Attributes to monitor the convergence
self._ewa_inertia = None
self._ewa_inertia_min = None
self._no_improvement = 0
# Initialize number of samples seen since last reassignment
self._n_since_last_reassign = 0
n_steps = (self.max_iter * n_samples) // self._batch_size
with _get_threadpool_controller().limit(limits=1, user_api="blas"):
# Perform the iterative optimization until convergence
for i in range(n_steps):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(0, n_samples, self._batch_size)
# Perform the actual update step on the minibatch data
batch_inertia = _mini_batch_step(
X=X[minibatch_indices],
sample_weight=sample_weight[minibatch_indices],
centers=centers,
centers_new=centers_new,
weight_sums=self._counts,
random_state=random_state,
random_reassign=self._random_reassign(),
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose,
n_threads=self._n_threads,
)
if self._tol > 0.0:
centers_squared_diff = np.sum((centers_new - centers) ** 2)
else:
centers_squared_diff = 0
centers, centers_new = centers_new, centers
# Monitor convergence and do early stopping if necessary
if self._mini_batch_convergence(
i, n_steps, n_samples, centers_squared_diff, batch_inertia
):
break
self.cluster_centers_ = centers
self._n_features_out = self.cluster_centers_.shape[0]
self.n_steps_ = i + 1
self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples))
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
)
else:
self.inertia_ = self._ewa_inertia * n_samples
return self
|
Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
has_centers = hasattr(self, "cluster_centers_")
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
reset=not has_centers,
)
self._random_state = getattr(
self, "_random_state", check_random_state(self.random_state)
)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.n_steps_ = getattr(self, "n_steps_", 0)
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if not has_centers:
# this instance has not been fitted yet (fit or partial_fit)
self._check_params_vs_input(X)
self._n_threads = _openmp_effective_n_threads()
# Validate init array
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, X.shape[0])
# initialize the cluster centers
self.cluster_centers_ = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=self._random_state,
init_size=self._init_size,
sample_weight=sample_weight,
)
# Initialize counts
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
# Initialize number of samples seen since last reassignment
self._n_since_last_reassign = 0
with _get_threadpool_controller().limit(limits=1, user_api="blas"):
_mini_batch_step(
X,
sample_weight=sample_weight,
centers=self.cluster_centers_,
centers_new=self.cluster_centers_,
weight_sums=self._counts,
random_state=self._random_state,
random_reassign=self._random_reassign(),
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose,
n_threads=self._n_threads,
)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
)
self.n_steps_ += 1
self._n_features_out = self.cluster_centers_.shape[0]
return self
|
Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
|
partial_fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_kmeans.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
|
BSD-3-Clause
|
def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):
"""Estimate the bandwidth to use with the mean-shift algorithm.
This function takes time at least quadratic in `n_samples`. For large
datasets, it is wise to subsample by setting `n_samples`. Alternatively,
the parameter `bandwidth` can be set to a small value without estimating
it.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points.
quantile : float, default=0.3
Should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, default=None
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance, default=None
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
bandwidth : float
The bandwidth parameter.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import estimate_bandwidth
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> estimate_bandwidth(X, quantile=0.5)
np.float64(1.61)
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
n_neighbors = int(X.shape[0] * quantile)
if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
n_neighbors = 1
nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.0
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
|
Estimate the bandwidth to use with the mean-shift algorithm.
This function takes time at least quadratic in `n_samples`. For large
datasets, it is wise to subsample by setting `n_samples`. Alternatively,
the parameter `bandwidth` can be set to a small value without estimating
it.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points.
quantile : float, default=0.3
Should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, default=None
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance, default=None
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
bandwidth : float
The bandwidth parameter.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import estimate_bandwidth
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> estimate_bandwidth(X, quantile=0.5)
np.float64(1.61)
|
estimate_bandwidth
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_mean_shift.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_mean_shift.py
|
BSD-3-Clause
|
def mean_shift(
X,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
max_iter=300,
n_jobs=None,
):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
bandwidth : float, default=None
Kernel bandwidth. If not None, must be in the range [0, +inf).
If None, the bandwidth is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like of shape (n_seeds, n_features) or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int, default=None
The number of jobs to use for the computation. The following tasks benefit
from the parallelization:
- The search of nearest neighbors for bandwidth estimation and label
assignments. See the details in the docstring of the
``NearestNeighbors`` class.
- Hill-climbing optimization for all seeds.
See :term:`Glossary <n_jobs>` for more details.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
Notes
-----
For a usage example, see
:ref:`sphx_glr_auto_examples_cluster_plot_mean_shift.py`.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import mean_shift
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> cluster_centers, labels = mean_shift(X, bandwidth=2)
>>> cluster_centers
array([[3.33, 6. ],
[1.33, 0.66]])
>>> labels
array([1, 1, 1, 0, 0, 0])
"""
model = MeanShift(
bandwidth=bandwidth,
seeds=seeds,
min_bin_freq=min_bin_freq,
bin_seeding=bin_seeding,
cluster_all=cluster_all,
n_jobs=n_jobs,
max_iter=max_iter,
).fit(X)
return model.cluster_centers_, model.labels_
|
Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
bandwidth : float, default=None
Kernel bandwidth. If not None, must be in the range [0, +inf).
If None, the bandwidth is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like of shape (n_seeds, n_features) or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int, default=None
The number of jobs to use for the computation. The following tasks benefit
from the parallelization:
- The search of nearest neighbors for bandwidth estimation and label
assignments. See the details in the docstring of the
``NearestNeighbors`` class.
- Hill-climbing optimization for all seeds.
See :term:`Glossary <n_jobs>` for more details.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
Notes
-----
For a usage example, see
:ref:`sphx_glr_auto_examples_cluster_plot_mean_shift.py`.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import mean_shift
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> cluster_centers, labels = mean_shift(X, bandwidth=2)
>>> cluster_centers
array([[3.33, 6. ],
[1.33, 0.66]])
>>> labels
array([1, 1, 1, 0, 0, 0])
|
mean_shift
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_mean_shift.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_mean_shift.py
|
BSD-3-Clause
|
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Find seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
"""
if bin_size == 0:
return X
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array(
[point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
dtype=np.float32,
)
if len(bin_seeds) == len(X):
warnings.warn(
"Binning data failed with provided bin_size=%f, using data points as seeds."
% bin_size
)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
|
Find seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
|
get_bin_seeds
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_mean_shift.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_mean_shift.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
X = validate_data(self, X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
|
Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_mean_shift.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_mean_shift.py
|
BSD-3-Clause
|
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
|
Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
|
predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_mean_shift.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_mean_shift.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Perform OPTICS clustering.
Extracts an ordered list of points and reachability distances, and
performs initial clustering using ``max_eps`` distance specified at
OPTICS object instantiation.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'. If a sparse matrix is provided, it will be
converted into CSR format.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
"""
dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float
if dtype is bool and X.dtype != bool:
msg = (
"Data will be converted to boolean for"
f" metric {self.metric}, to avoid this warning,"
" you may convert the data prior to calling fit."
)
warnings.warn(msg, DataConversionWarning)
X = validate_data(self, X, dtype=dtype, accept_sparse="csr")
if self.metric == "precomputed" and issparse(X):
X = X.copy() # copy to avoid in-place modification
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
# Set each diagonal to an explicit value so each point is its
# own neighbor
X.setdiag(X.diagonal())
memory = check_memory(self.memory)
(
self.ordering_,
self.core_distances_,
self.reachability_,
self.predecessor_,
) = memory.cache(compute_optics_graph)(
X=X,
min_samples=self.min_samples,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
max_eps=self.max_eps,
)
# Extract clusters from the calculated orders and reachability
if self.cluster_method == "xi":
labels_, clusters_ = cluster_optics_xi(
reachability=self.reachability_,
predecessor=self.predecessor_,
ordering=self.ordering_,
min_samples=self.min_samples,
min_cluster_size=self.min_cluster_size,
xi=self.xi,
predecessor_correction=self.predecessor_correction,
)
self.cluster_hierarchy_ = clusters_
elif self.cluster_method == "dbscan":
if self.eps is None:
eps = self.max_eps
else:
eps = self.eps
if eps > self.max_eps:
raise ValueError(
"Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps)
)
labels_ = cluster_optics_dbscan(
reachability=self.reachability_,
core_distances=self.core_distances_,
ordering=self.ordering_,
eps=eps,
)
self.labels_ = labels_
return self
|
Perform OPTICS clustering.
Extracts an ordered list of points and reachability distances, and
performs initial clustering using ``max_eps`` distance specified at
OPTICS object instantiation.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'. If a sparse matrix is provided, it will be
converted into CSR format.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def _compute_core_distances_(X, neighbors, min_samples, working_memory):
"""Compute the k-th nearest neighbor of each sample.
Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
but with more memory efficiency.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
neighbors : NearestNeighbors instance
The fitted nearest neighbors estimator.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
core_distances : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
"""
n_samples = X.shape[0]
core_distances = np.empty(n_samples)
core_distances.fill(np.nan)
chunk_n_rows = get_chunk_n_rows(
row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory
)
slices = gen_batches(n_samples, chunk_n_rows)
for sl in slices:
core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1]
return core_distances
|
Compute the k-th nearest neighbor of each sample.
Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
but with more memory efficiency.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
neighbors : NearestNeighbors instance
The fitted nearest neighbors estimator.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
core_distances : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
|
_compute_core_distances_
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):
"""Perform DBSCAN extraction for an arbitrary epsilon.
Extracting the clusters runs in linear time. Note that this results in
``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (``reachability_``).
core_distances : ndarray of shape (n_samples,)
Distances at which points become core (``core_distances_``).
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (``ordering_``).
eps : float
DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
to one another.
Returns
-------
labels_ : array of shape (n_samples,)
The estimated labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
... X,
... min_samples=2,
... max_eps=np.inf,
... metric="minkowski",
... p=2,
... metric_params=None,
... algorithm="auto",
... leaf_size=30,
... n_jobs=None,
... )
>>> eps = 4.5
>>> labels = cluster_optics_dbscan(
... reachability=reachability,
... core_distances=core_distances,
... ordering=ordering,
... eps=eps,
... )
>>> labels
array([0, 0, 0, 1, 1, 1])
"""
n_samples = len(core_distances)
labels = np.zeros(n_samples, dtype=int)
far_reach = reachability > eps
near_core = core_distances <= eps
labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
labels[far_reach & ~near_core] = -1
return labels
|
Perform DBSCAN extraction for an arbitrary epsilon.
Extracting the clusters runs in linear time. Note that this results in
``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (``reachability_``).
core_distances : ndarray of shape (n_samples,)
Distances at which points become core (``core_distances_``).
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (``ordering_``).
eps : float
DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
to one another.
Returns
-------
labels_ : array of shape (n_samples,)
The estimated labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
... X,
... min_samples=2,
... max_eps=np.inf,
... metric="minkowski",
... p=2,
... metric_params=None,
... algorithm="auto",
... leaf_size=30,
... n_jobs=None,
... )
>>> eps = 4.5
>>> labels = cluster_optics_dbscan(
... reachability=reachability,
... core_distances=core_distances,
... ordering=ordering,
... eps=eps,
... )
>>> labels
array([0, 0, 0, 1, 1, 1])
|
cluster_optics_dbscan
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def cluster_optics_xi(
*,
reachability,
predecessor,
ordering,
min_samples,
min_cluster_size=None,
xi=0.05,
predecessor_correction=True,
):
"""Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`).
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`).
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
-------
labels : ndarray of shape (n_samples,)
The labels assigned to samples. Points which are not included
in any cluster are labeled as -1.
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to ``(end,
-start)`` (ascending) so that larger clusters encompassing smaller
clusters come after such nested smaller clusters. Since ``labels`` does
not reflect the hierarchy, usually ``len(clusters) >
np.unique(labels)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import cluster_optics_xi, compute_optics_graph
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
... X,
... min_samples=2,
... max_eps=np.inf,
... metric="minkowski",
... p=2,
... metric_params=None,
... algorithm="auto",
... leaf_size=30,
... n_jobs=None
... )
>>> min_samples = 2
>>> labels, clusters = cluster_optics_xi(
... reachability=reachability,
... predecessor=predecessor,
... ordering=ordering,
... min_samples=min_samples,
... )
>>> labels
array([0, 0, 0, 1, 1, 1])
>>> clusters
array([[0, 2],
[3, 5],
[0, 5]])
"""
n_samples = len(reachability)
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
if min_cluster_size is None:
min_cluster_size = min_samples
_validate_size(min_cluster_size, n_samples, "min_cluster_size")
if min_cluster_size <= 1:
min_cluster_size = max(2, int(min_cluster_size * n_samples))
clusters = _xi_cluster(
reachability[ordering],
predecessor[ordering],
ordering,
xi,
min_samples,
min_cluster_size,
predecessor_correction,
)
labels = _extract_xi_labels(ordering, clusters)
return labels, clusters
|
Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`).
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`).
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
-------
labels : ndarray of shape (n_samples,)
The labels assigned to samples. Points which are not included
in any cluster are labeled as -1.
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to ``(end,
-start)`` (ascending) so that larger clusters encompassing smaller
clusters come after such nested smaller clusters. Since ``labels`` does
not reflect the hierarchy, usually ``len(clusters) >
np.unique(labels)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import cluster_optics_xi, compute_optics_graph
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
... X,
... min_samples=2,
... max_eps=np.inf,
... metric="minkowski",
... p=2,
... metric_params=None,
... algorithm="auto",
... leaf_size=30,
... n_jobs=None
... )
>>> min_samples = 2
>>> labels, clusters = cluster_optics_xi(
... reachability=reachability,
... predecessor=predecessor,
... ordering=ordering,
... min_samples=min_samples,
... )
>>> labels
array([0, 0, 0, 1, 1, 1])
>>> clusters
array([[0, 2],
[3, 5],
[0, 5]])
|
cluster_optics_xi
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def _extend_region(steep_point, xward_point, start, min_samples):
"""Extend the area until it's maximal.
It's the same function for both upward and downward reagions, depending on
the given input parameters. Assuming:
- steep_{upward/downward}: bool array indicating whether a point is a
steep {upward/downward};
- upward/downward: bool array indicating whether a point is
upward/downward;
To extend an upward reagion, ``steep_point=steep_upward`` and
``xward_point=downward`` are expected, and to extend a downward region,
``steep_point=steep_downward`` and ``xward_point=upward``.
Parameters
----------
steep_point : ndarray of shape (n_samples,), dtype=bool
True if the point is steep downward (upward).
xward_point : ndarray of shape (n_samples,), dtype=bool
True if the point is an upward (respectively downward) point.
start : int
The start of the xward region.
min_samples : int
The same as the min_samples given to OPTICS. Up and down steep
regions can't have more then ``min_samples`` consecutive non-steep
points.
Returns
-------
index : int
The current index iterating over all the samples, i.e. where we are up
to in our search.
end : int
The end of the region, which can be behind the index. The region
includes the ``end`` index.
"""
n_samples = len(steep_point)
non_xward_points = 0
index = start
end = start
# find a maximal area
while index < n_samples:
if steep_point[index]:
non_xward_points = 0
end = index
elif not xward_point[index]:
# it's not a steep point, but still goes up.
non_xward_points += 1
# region should include no more than min_samples consecutive
# non steep xward points.
if non_xward_points > min_samples:
break
else:
return end
index += 1
return end
|
Extend the area until it's maximal.
It's the same function for both upward and downward reagions, depending on
the given input parameters. Assuming:
- steep_{upward/downward}: bool array indicating whether a point is a
steep {upward/downward};
- upward/downward: bool array indicating whether a point is
upward/downward;
To extend an upward reagion, ``steep_point=steep_upward`` and
``xward_point=downward`` are expected, and to extend a downward region,
``steep_point=steep_downward`` and ``xward_point=upward``.
Parameters
----------
steep_point : ndarray of shape (n_samples,), dtype=bool
True if the point is steep downward (upward).
xward_point : ndarray of shape (n_samples,), dtype=bool
True if the point is an upward (respectively downward) point.
start : int
The start of the xward region.
min_samples : int
The same as the min_samples given to OPTICS. Up and down steep
regions can't have more then ``min_samples`` consecutive non-steep
points.
Returns
-------
index : int
The current index iterating over all the samples, i.e. where we are up
to in our search.
end : int
The end of the region, which can be behind the index. The region
includes the ``end`` index.
|
_extend_region
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot):
"""Update steep down areas (SDAs) using the new maximum in between (mib)
value, and the given complement of xi, i.e. ``1 - xi``.
"""
if np.isinf(mib):
return []
res = [
sda for sda in sdas if mib <= reachability_plot[sda["start"]] * xi_complement
]
for sda in res:
sda["mib"] = max(sda["mib"], mib)
return res
|
Update steep down areas (SDAs) using the new maximum in between (mib)
value, and the given complement of xi, i.e. ``1 - xi``.
|
_update_filter_sdas
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):
"""Correct for predecessors.
Applies Algorithm 2 of [1]_.
Input parameters are ordered by the computer OPTICS ordering.
.. [1] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
"""
while s < e:
if reachability_plot[s] > reachability_plot[e]:
return s, e
p_e = predecessor_plot[e]
for i in range(s, e):
if p_e == ordering[i]:
return s, e
e -= 1
return None, None
|
Correct for predecessors.
Applies Algorithm 2 of [1]_.
Input parameters are ordered by the computer OPTICS ordering.
.. [1] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
|
_correct_predecessor
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def _xi_cluster(
reachability_plot,
predecessor_plot,
ordering,
xi,
min_samples,
min_cluster_size,
predecessor_correction,
):
"""Automatically extract clusters according to the Xi-steep method.
This is rouphly an implementation of Figure 19 of the OPTICS paper.
Parameters
----------
reachability_plot : array-like of shape (n_samples,)
The reachability plot, i.e. reachability ordered according to
the calculated ordering, all computed by OPTICS.
predecessor_plot : array-like of shape (n_samples,)
Predecessors ordered according to the calculated ordering.
xi : float, between 0 and 1
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
min_samples : int > 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
min_cluster_size : int > 1
Minimum number of samples in an OPTICS cluster.
predecessor_correction : bool
Correct clusters based on the calculated predecessors.
Returns
-------
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of [start, end] in each row, with all
indices inclusive. The clusters are ordered in a way that larger
clusters encompassing smaller clusters come after those smaller
clusters.
"""
# Our implementation adds an inf to the end of reachability plot
# this helps to find potential clusters at the end of the
# reachability plot even if there's no upward region at the end of it.
reachability_plot = np.hstack((reachability_plot, np.inf))
xi_complement = 1 - xi
sdas = [] # steep down areas, introduced in section 4.3.2 of the paper
clusters = []
index = 0
mib = 0.0 # maximum in between, section 4.3.2
# Our implementation corrects a mistake in the original
# paper, i.e., in Definition 9 steep downward point,
# r(p) * (1 - x1) <= r(p + 1) should be
# r(p) * (1 - x1) >= r(p + 1)
with np.errstate(invalid="ignore"):
ratio = reachability_plot[:-1] / reachability_plot[1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
# the following loop is almost exactly as Figure 19 of the paper.
# it jumps over the areas which are not either steep down or up areas
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
# just continue if steep_index has been a part of a discovered xward
# area.
if steep_index < index:
continue
mib = max(mib, np.max(reachability_plot[index : steep_index + 1]))
# steep downward areas
if steep_downward[steep_index]:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
D_start = steep_index
D_end = _extend_region(steep_downward, upward, D_start, min_samples)
D = {"start": D_start, "end": D_end, "mib": 0.0}
sdas.append(D)
index = D_end + 1
mib = reachability_plot[index]
# steep upward areas
else:
sdas = _update_filter_sdas(sdas, mib, xi_complement, reachability_plot)
U_start = steep_index
U_end = _extend_region(steep_upward, downward, U_start, min_samples)
index = U_end + 1
mib = reachability_plot[index]
U_clusters = []
for D in sdas:
c_start = D["start"]
c_end = U_end
# line (**), sc2*
if reachability_plot[c_end + 1] * xi_complement < D["mib"]:
continue
# Definition 11: criterion 4
D_max = reachability_plot[D["start"]]
if D_max * xi_complement >= reachability_plot[c_end + 1]:
# Find the first index from the left side which is almost
# at the same level as the end of the detected cluster.
while (
reachability_plot[c_start + 1] > reachability_plot[c_end + 1]
and c_start < D["end"]
):
c_start += 1
elif reachability_plot[c_end + 1] * xi_complement >= D_max:
# Find the first index from the right side which is almost
# at the same level as the beginning of the detected
# cluster.
# Our implementation corrects a mistake in the original
# paper, i.e., in Definition 11 4c, r(x) < r(sD) should be
# r(x) > r(sD).
while reachability_plot[c_end - 1] > D_max and c_end > U_start:
c_end -= 1
# predecessor correction
if predecessor_correction:
c_start, c_end = _correct_predecessor(
reachability_plot, predecessor_plot, ordering, c_start, c_end
)
if c_start is None:
continue
# Definition 11: criterion 3.a
if c_end - c_start + 1 < min_cluster_size:
continue
# Definition 11: criterion 1
if c_start > D["end"]:
continue
# Definition 11: criterion 2
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
# add smaller clusters first.
U_clusters.reverse()
clusters.extend(U_clusters)
return np.array(clusters)
|
Automatically extract clusters according to the Xi-steep method.
This is rouphly an implementation of Figure 19 of the OPTICS paper.
Parameters
----------
reachability_plot : array-like of shape (n_samples,)
The reachability plot, i.e. reachability ordered according to
the calculated ordering, all computed by OPTICS.
predecessor_plot : array-like of shape (n_samples,)
Predecessors ordered according to the calculated ordering.
xi : float, between 0 and 1
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
min_samples : int > 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
min_cluster_size : int > 1
Minimum number of samples in an OPTICS cluster.
predecessor_correction : bool
Correct clusters based on the calculated predecessors.
Returns
-------
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of [start, end] in each row, with all
indices inclusive. The clusters are ordered in a way that larger
clusters encompassing smaller clusters come after those smaller
clusters.
|
_xi_cluster
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def _extract_xi_labels(ordering, clusters):
"""Extracts the labels from the clusters returned by `_xi_cluster`.
We rely on the fact that clusters are stored
with the smaller clusters coming before the larger ones.
Parameters
----------
ordering : array-like of shape (n_samples,)
The ordering of points calculated by OPTICS
clusters : array-like of shape (n_clusters, 2)
List of clusters i.e. (start, end) tuples,
as returned by `_xi_cluster`.
Returns
-------
labels : ndarray of shape (n_samples,)
"""
labels = np.full(len(ordering), -1, dtype=int)
label = 0
for c in clusters:
if not np.any(labels[c[0] : (c[1] + 1)] != -1):
labels[c[0] : (c[1] + 1)] = label
label += 1
labels[ordering] = labels.copy()
return labels
|
Extracts the labels from the clusters returned by `_xi_cluster`.
We rely on the fact that clusters are stored
with the smaller clusters coming before the larger ones.
Parameters
----------
ordering : array-like of shape (n_samples,)
The ordering of points calculated by OPTICS
clusters : array-like of shape (n_clusters, 2)
List of clusters i.e. (start, end) tuples,
as returned by `_xi_cluster`.
Returns
-------
labels : ndarray of shape (n_samples,)
|
_extract_xi_labels
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_optics.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_optics.py
|
BSD-3-Clause
|
def cluster_qr(vectors):
"""Find the discrete partition closest to the eigenvector embedding.
This implementation was proposed in [1]_.
.. versionadded:: 1.1
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
Returns
-------
labels : array of integers, shape: n_samples
The cluster labels of vectors.
References
----------
.. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
"""
k = vectors.shape[1]
_, _, piv = qr(vectors.T, pivoting=True)
ut, _, v = svd(vectors[piv[:k], :].T)
vectors = abs(np.dot(vectors, np.dot(ut, v.conj())))
return vectors.argmax(axis=1)
|
Find the discrete partition closest to the eigenvector embedding.
This implementation was proposed in [1]_.
.. versionadded:: 1.1
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
Returns
-------
labels : array of integers, shape: n_samples
The cluster labels of vectors.
References
----------
.. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
|
cluster_qr
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_spectral.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_spectral.py
|
BSD-3-Clause
|
def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components),
)
t_svd = vectors_discrete.T @ vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
except LinAlgError:
svd_restarts += 1
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError("SVD did not converge")
return labels
|
Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
|
discretize
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_spectral.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_spectral.py
|
BSD-3-Clause
|
def spectral_clustering(
affinity,
*,
n_clusters=8,
n_components=None,
eigen_solver=None,
random_state=None,
n_init=10,
eigen_tol="auto",
assign_labels="kmeans",
verbose=False,
):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts [1]_, [2]_.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition method. If None then ``'arpack'`` is used.
See [4]_ for more details regarding ``'lobpcg'``.
Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional
Algebraic MultiGrid preconditioning and requires pyamg to be installed.
It can be faster on very large sparse problems [6]_ and [7]_.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
values of `tol<1e-5` may lead to convergence issues and should be
avoided.
.. versionadded:: 1.2
Added 'auto' option.
assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are three ways to assign labels after the Laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization [3]_.
The cluster_qr method [5]_ directly extracts clusters from eigenvectors
in spectral clustering. In contrast to k-means and discretization, cluster_qr
has no tuning parameters and is not an iterative method, yet may outperform
k-means and discretization in terms of both quality and speed. For a detailed
comparison of clustering strategies, refer to the following example:
:ref:`sphx_glr_auto_examples_cluster_plot_coin_segmentation.py`.
.. versionchanged:: 1.1
Added new labeling method 'cluster_qr'.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
Notes
-----
The graph should contain only one connected component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for `k=2`: it is a
normalized spectral clustering.
References
----------
.. [1] :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
.. [2] :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
.. [3] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
.. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
A. V. Knyazev
SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
<10.1137/S1064827500366124>`
.. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
.. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning
for computing eigenvalues of graph Laplacians in image segmentation, 2006
Andrew Knyazev
<10.13140/RG.2.2.35280.02565>`
.. [7] :doi:`Preconditioned spectral clustering for stochastic block partition
streaming graph challenge (Preliminary version at arXiv.)
David Zhuzhunashvili, Andrew Knyazev
<10.1109/HPEC.2017.8091045>`
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> from sklearn.cluster import spectral_clustering
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> affinity = pairwise_kernels(X, metric='rbf')
>>> spectral_clustering(
... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0
... )
array([1, 1, 1, 0, 0, 0])
"""
clusterer = SpectralClustering(
n_clusters=n_clusters,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
n_init=n_init,
affinity="precomputed",
eigen_tol=eigen_tol,
assign_labels=assign_labels,
verbose=verbose,
).fit(affinity)
return clusterer.labels_
|
Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts [1]_, [2]_.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition method. If None then ``'arpack'`` is used.
See [4]_ for more details regarding ``'lobpcg'``.
Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional
Algebraic MultiGrid preconditioning and requires pyamg to be installed.
It can be faster on very large sparse problems [6]_ and [7]_.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
values of `tol<1e-5` may lead to convergence issues and should be
avoided.
.. versionadded:: 1.2
Added 'auto' option.
assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are three ways to assign labels after the Laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization [3]_.
The cluster_qr method [5]_ directly extracts clusters from eigenvectors
in spectral clustering. In contrast to k-means and discretization, cluster_qr
has no tuning parameters and is not an iterative method, yet may outperform
k-means and discretization in terms of both quality and speed. For a detailed
comparison of clustering strategies, refer to the following example:
:ref:`sphx_glr_auto_examples_cluster_plot_coin_segmentation.py`.
.. versionchanged:: 1.1
Added new labeling method 'cluster_qr'.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
Notes
-----
The graph should contain only one connected component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for `k=2`: it is a
normalized spectral clustering.
References
----------
.. [1] :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
.. [2] :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
.. [3] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
.. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
A. V. Knyazev
SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
<10.1137/S1064827500366124>`
.. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
.. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning
for computing eigenvalues of graph Laplacians in image segmentation, 2006
Andrew Knyazev
<10.13140/RG.2.2.35280.02565>`
.. [7] :doi:`Preconditioned spectral clustering for stochastic block partition
streaming graph challenge (Preliminary version at arXiv.)
David Zhuzhunashvili, Andrew Knyazev
<10.1109/HPEC.2017.8091045>`
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> from sklearn.cluster import spectral_clustering
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> affinity = pairwise_kernels(X, metric='rbf')
>>> spectral_clustering(
... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0
... )
array([1, 1, 1, 0, 0, 0])
|
spectral_clustering
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_spectral.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_spectral.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
A fitted instance of the estimator.
"""
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
ensure_min_samples=2,
)
allow_squared = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn(
"The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``."
)
if self.affinity == "nearest_neighbors":
connectivity = kneighbors_graph(
X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs
)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed":
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params["gamma"] = self.gamma
params["degree"] = self.degree
params["coef0"] = self.coef0
self.affinity_matrix_ = pairwise_kernels(
X, metric=self.affinity, filter_params=True, **params
)
random_state = check_random_state(self.random_state)
n_components = (
self.n_clusters if self.n_components is None else self.n_components
)
# We now obtain the real valued solution matrix to the
# relaxed Ncut problem, solving the eigenvalue problem
# L_sym x = lambda x and recovering u = D^-1/2 x.
# The first eigenvector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = _spectral_embedding(
self.affinity_matrix_,
n_components=n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
eigen_tol=self.eigen_tol,
drop_first=False,
)
if self.verbose:
print(f"Computing label assignment using {self.assign_labels}")
if self.assign_labels == "kmeans":
_, self.labels_, _ = k_means(
maps,
self.n_clusters,
random_state=random_state,
n_init=self.n_init,
verbose=self.verbose,
)
elif self.assign_labels == "cluster_qr":
self.labels_ = cluster_qr(maps)
else:
self.labels_ = discretize(maps, random_state=random_state)
return self
|
Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
A fitted instance of the estimator.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/_spectral.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_spectral.py
|
BSD-3-Clause
|
def test_affinity_propagation(global_random_seed, global_dtype):
"""Test consistency of the affinity propagations."""
S = -euclidean_distances(X.astype(global_dtype, copy=False), squared=True)
preference = np.median(S) * 10
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference, random_state=global_random_seed
)
n_clusters_ = len(cluster_centers_indices)
assert n_clusters == n_clusters_
|
Test consistency of the affinity propagations.
|
test_affinity_propagation
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def test_affinity_propagation_precomputed():
"""Check equality of precomputed affinity matrix to internally computed affinity
matrix.
"""
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
af = AffinityPropagation(
preference=preference, affinity="precomputed", random_state=28
)
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True, random_state=37)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert np.unique(labels).size == n_clusters_
assert n_clusters == n_clusters_
|
Check equality of precomputed affinity matrix to internally computed affinity
matrix.
|
test_affinity_propagation_precomputed
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def test_affinity_propagation_no_copy():
"""Check behaviour of not copying the input data."""
S = -euclidean_distances(X, squared=True)
S_original = S.copy()
preference = np.median(S) * 10
assert not np.allclose(S.diagonal(), preference)
# with copy=True S should not be modified
affinity_propagation(S, preference=preference, copy=True, random_state=0)
assert_allclose(S, S_original)
assert not np.allclose(S.diagonal(), preference)
assert_allclose(S.diagonal(), np.zeros(S.shape[0]))
# with copy=False S will be modified inplace
affinity_propagation(S, preference=preference, copy=False, random_state=0)
assert_allclose(S.diagonal(), preference)
# test that copy=True and copy=False lead to the same result
S = S_original.copy()
af = AffinityPropagation(preference=preference, verbose=True, random_state=0)
labels = af.fit(X).labels_
_, labels_no_copy = affinity_propagation(
S, preference=preference, copy=False, random_state=74
)
assert_array_equal(labels, labels_no_copy)
|
Check behaviour of not copying the input data.
|
test_affinity_propagation_no_copy
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def test_affinity_propagation_affinity_shape():
"""Check the shape of the affinity matrix when using `affinity_propagation."""
S = -euclidean_distances(X, squared=True)
err_msg = "The matrix of similarities must be a square array"
with pytest.raises(ValueError, match=err_msg):
affinity_propagation(S[:, :-1])
|
Check the shape of the affinity matrix when using `affinity_propagation.
|
test_affinity_propagation_affinity_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def test_affinity_propagation_random_state():
"""Check that different random states lead to different initialisations
by looking at the center locations after two iterations.
"""
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=300, centers=centers, cluster_std=0.5, random_state=0
)
# random_state = 0
ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=0)
ap.fit(X)
centers0 = ap.cluster_centers_
# random_state = 76
ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=76)
ap.fit(X)
centers76 = ap.cluster_centers_
# check that the centers have not yet converged to the same solution
assert np.mean((centers0 - centers76) ** 2) > 1
|
Check that different random states lead to different initialisations
by looking at the center locations after two iterations.
|
test_affinity_propagation_random_state
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def test_affinity_propagation_convergence_warning_dense_sparse(container, global_dtype):
"""
Check that having sparse or dense `centers` format should not
influence the convergence.
Non-regression test for gh-13334.
"""
centers = container(np.zeros((1, 10)))
rng = np.random.RandomState(42)
X = rng.rand(40, 10).astype(global_dtype, copy=False)
y = (4 * rng.rand(40)).astype(int)
ap = AffinityPropagation(random_state=46)
ap.fit(X, y)
ap.cluster_centers_ = centers
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
assert_array_equal(ap.predict(X), np.zeros(X.shape[0], dtype=int))
|
Check that having sparse or dense `centers` format should not
influence the convergence.
Non-regression test for gh-13334.
|
test_affinity_propagation_convergence_warning_dense_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def test_affinity_propagation_equal_points():
"""Make sure we do not assign multiple clusters to equal points.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/20043
"""
X = np.zeros((8, 1))
af = AffinityPropagation(affinity="euclidean", damping=0.5, random_state=42).fit(X)
assert np.all(af.labels_ == 0)
|
Make sure we do not assign multiple clusters to equal points.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/20043
|
test_affinity_propagation_equal_points
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_affinity_propagation.py
|
BSD-3-Clause
|
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100), decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100), decimal=1)
|
Check that rows sum to one constant, and columns to another.
|
_do_scale_test
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bicluster.py
|
BSD-3-Clause
|
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1)
|
Check that rows and columns sum to the same constant.
|
_do_bistochastic_test
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bicluster.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bicluster.py
|
BSD-3-Clause
|
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert threshold >= sc.radius
current_leaf = current_leaf.next_leaf_
|
Use the leaf linked list for traversal
|
check_threshold
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_birch.py
|
BSD-3-Clause
|
def test_both_subclusters_updated():
"""Check that both subclusters are updated when a node a split, even when there are
duplicated data points. Non-regression test for #23269.
"""
X = np.array(
[
[-2.6192791, -1.5053215],
[-2.9993038, -1.6863596],
[-2.3724914, -1.3438171],
[-2.336792, -1.3417323],
[-2.4089134, -1.3290224],
[-2.3724914, -1.3438171],
[-3.364009, -1.8846745],
[-2.3724914, -1.3438171],
[-2.617677, -1.5003285],
[-2.2960556, -1.3260119],
[-2.3724914, -1.3438171],
[-2.5459878, -1.4533926],
[-2.25979, -1.3003055],
[-2.4089134, -1.3290224],
[-2.3724914, -1.3438171],
[-2.4089134, -1.3290224],
[-2.5459878, -1.4533926],
[-2.3724914, -1.3438171],
[-2.9720619, -1.7058647],
[-2.336792, -1.3417323],
[-2.3724914, -1.3438171],
],
dtype=np.float32,
)
# no error
Birch(branching_factor=5, threshold=1e-5, n_clusters=None).fit(X)
|
Check that both subclusters are updated when a node a split, even when there are
duplicated data points. Non-regression test for #23269.
|
test_both_subclusters_updated
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_birch.py
|
BSD-3-Clause
|
def test_three_clusters(bisecting_strategy, init):
"""Tries to perform bisect k-means for three clusters to check
if splitting data is performed correctly.
"""
X = np.array(
[[1, 1], [10, 1], [3, 1], [10, 0], [2, 1], [10, 2], [10, 8], [10, 9], [10, 10]]
)
bisect_means = BisectingKMeans(
n_clusters=3,
random_state=0,
bisecting_strategy=bisecting_strategy,
init=init,
)
bisect_means.fit(X)
expected_centers = [[2, 1], [10, 1], [10, 9]]
expected_labels = [0, 1, 0, 1, 0, 1, 2, 2, 2]
assert_allclose(
sorted(expected_centers), sorted(bisect_means.cluster_centers_.tolist())
)
assert_allclose(v_measure_score(expected_labels, bisect_means.labels_), 1.0)
|
Tries to perform bisect k-means for three clusters to check
if splitting data is performed correctly.
|
test_three_clusters
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bisect_k_means.py
|
BSD-3-Clause
|
def test_sparse(csr_container):
"""Test Bisecting K-Means with sparse data.
Checks if labels and centers are the same between dense and sparse.
"""
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
X[X < 0.8] = 0
X_csr = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_means.fit(X_csr)
sparse_centers = bisect_means.cluster_centers_
bisect_means.fit(X)
normal_centers = bisect_means.cluster_centers_
# Check if results is the same for dense and sparse data
assert_allclose(normal_centers, sparse_centers, atol=1e-8)
|
Test Bisecting K-Means with sparse data.
Checks if labels and centers are the same between dense and sparse.
|
test_sparse
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bisect_k_means.py
|
BSD-3-Clause
|
def test_n_clusters(n_clusters):
"""Test if resulting labels are in range [0, n_clusters - 1]."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0)
bisect_means.fit(X)
assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters))
|
Test if resulting labels are in range [0, n_clusters - 1].
|
test_n_clusters
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bisect_k_means.py
|
BSD-3-Clause
|
def test_fit_predict(csr_container):
"""Check if labels from fit(X) method are same as from fit(X).predict(X)."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_means.fit(X)
assert_array_equal(bisect_means.labels_, bisect_means.predict(X))
|
Check if labels from fit(X) method are same as from fit(X).predict(X).
|
test_fit_predict
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bisect_k_means.py
|
BSD-3-Clause
|
def test_dtype_preserved(csr_container, global_dtype):
"""Check that centers dtype is the same as input data dtype."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2).astype(global_dtype, copy=False)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
km = BisectingKMeans(n_clusters=3, random_state=0)
km.fit(X)
assert km.cluster_centers_.dtype == global_dtype
|
Check that centers dtype is the same as input data dtype.
|
test_dtype_preserved
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bisect_k_means.py
|
BSD-3-Clause
|
def test_float32_float64_equivalence(csr_container):
"""Check that the results are the same between float32 and float64."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32))
assert_allclose(km32.cluster_centers_, km64.cluster_centers_)
assert_array_equal(km32.labels_, km64.labels_)
|
Check that the results are the same between float32 and float64.
|
test_float32_float64_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/cluster/tests/test_bisect_k_means.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/tests/test_bisect_k_means.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.