code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def check_random_state(seed): """Turn seed into a np.random.RandomState instance. Parameters ---------- seed : None, int or instance of RandomState If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. Returns ------- :class:`numpy:numpy.random.RandomState` The random state object based on `seed` parameter. Examples -------- >>> from sklearn.utils.validation import check_random_state >>> check_random_state(42) RandomState(MT19937) at 0x... """ if seed is None or seed is np.random: return np.random.mtrand._rand if isinstance(seed, numbers.Integral): return np.random.RandomState(seed) if isinstance(seed, np.random.RandomState): return seed raise ValueError( "%r cannot be used to seed a numpy.random.RandomState instance" % seed )
Turn seed into a np.random.RandomState instance. Parameters ---------- seed : None, int or instance of RandomState If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError. Returns ------- :class:`numpy:numpy.random.RandomState` The random state object based on `seed` parameter. Examples -------- >>> from sklearn.utils.validation import check_random_state >>> check_random_state(42) RandomState(MT19937) at 0x...
check_random_state
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def has_fit_parameter(estimator, parameter): """Check whether the estimator's fit method supports the given parameter. Parameters ---------- estimator : object An estimator to inspect. parameter : str The searched parameter. Returns ------- is_parameter : bool Whether the parameter was found to be a named parameter of the estimator's fit method. Examples -------- >>> from sklearn.svm import SVC >>> from sklearn.utils.validation import has_fit_parameter >>> has_fit_parameter(SVC(), "sample_weight") True """ return ( # This is used during test collection in common tests. The # hasattr(estimator, "fit") makes it so that we don't fail for an estimator # that does not have a `fit` method during collection of checks. The right # checks will fail later. hasattr(estimator, "fit") and parameter in signature(estimator.fit).parameters )
Check whether the estimator's fit method supports the given parameter. Parameters ---------- estimator : object An estimator to inspect. parameter : str The searched parameter. Returns ------- is_parameter : bool Whether the parameter was found to be a named parameter of the estimator's fit method. Examples -------- >>> from sklearn.svm import SVC >>> from sklearn.utils.validation import has_fit_parameter >>> has_fit_parameter(SVC(), "sample_weight") True
has_fit_parameter
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def check_symmetric(array, *, tol=1e-10, raise_warning=True, raise_exception=False): """Make sure that array is 2D, square and symmetric. If the array is not symmetric, then a symmetrized version is returned. Optionally, a warning or exception is raised if the matrix is not symmetric. Parameters ---------- array : {ndarray, sparse matrix} Input object to check / convert. Must be two-dimensional and square, otherwise a ValueError will be raised. tol : float, default=1e-10 Absolute tolerance for equivalence of arrays. Default = 1E-10. raise_warning : bool, default=True If True then raise a warning if conversion is required. raise_exception : bool, default=False If True then raise an exception if array is not symmetric. Returns ------- array_sym : {ndarray, sparse matrix} Symmetrized version of the input array, i.e. the average of array and array.transpose(). If sparse, then duplicate entries are first summed and zeros are eliminated. Examples -------- >>> import numpy as np >>> from sklearn.utils.validation import check_symmetric >>> symmetric_array = np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]) >>> check_symmetric(symmetric_array) array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]) >>> from scipy.sparse import csr_matrix >>> sparse_symmetric_array = csr_matrix(symmetric_array) >>> check_symmetric(sparse_symmetric_array) <Compressed Sparse Row sparse matrix of dtype 'int64' with 6 stored elements and shape (3, 3)> """ if (array.ndim != 2) or (array.shape[0] != array.shape[1]): raise ValueError( "array must be 2-dimensional and square. shape = {0}".format(array.shape) ) if sp.issparse(array): diff = array - array.T # only csr, csc, and coo have `data` attribute if diff.format not in ["csr", "csc", "coo"]: diff = diff.tocsr() symmetric = np.all(abs(diff.data) < tol) else: symmetric = np.allclose(array, array.T, atol=tol) if not symmetric: if raise_exception: raise ValueError("Array must be symmetric") if raise_warning: warnings.warn( ( "Array is not symmetric, and will be converted " "to symmetric by average with its transpose." ), stacklevel=2, ) if sp.issparse(array): conversion = "to" + array.format array = getattr(0.5 * (array + array.T), conversion)() else: array = 0.5 * (array + array.T) return array
Make sure that array is 2D, square and symmetric. If the array is not symmetric, then a symmetrized version is returned. Optionally, a warning or exception is raised if the matrix is not symmetric. Parameters ---------- array : {ndarray, sparse matrix} Input object to check / convert. Must be two-dimensional and square, otherwise a ValueError will be raised. tol : float, default=1e-10 Absolute tolerance for equivalence of arrays. Default = 1E-10. raise_warning : bool, default=True If True then raise a warning if conversion is required. raise_exception : bool, default=False If True then raise an exception if array is not symmetric. Returns ------- array_sym : {ndarray, sparse matrix} Symmetrized version of the input array, i.e. the average of array and array.transpose(). If sparse, then duplicate entries are first summed and zeros are eliminated. Examples -------- >>> import numpy as np >>> from sklearn.utils.validation import check_symmetric >>> symmetric_array = np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]) >>> check_symmetric(symmetric_array) array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]) >>> from scipy.sparse import csr_matrix >>> sparse_symmetric_array = csr_matrix(symmetric_array) >>> check_symmetric(sparse_symmetric_array) <Compressed Sparse Row sparse matrix of dtype 'int64' with 6 stored elements and shape (3, 3)>
check_symmetric
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _is_fitted(estimator, attributes=None, all_or_any=all): """Determine if an estimator is fitted Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: ``["coef_", "estimator_", ...], "coef_"`` If `None`, `estimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Returns ------- fitted : bool Whether the estimator is fitted. """ if attributes is not None: if not isinstance(attributes, (list, tuple)): attributes = [attributes] return all_or_any([hasattr(estimator, attr) for attr in attributes]) if hasattr(estimator, "__sklearn_is_fitted__"): return estimator.__sklearn_is_fitted__() fitted_attrs = [ v for v in vars(estimator) if v.endswith("_") and not v.startswith("__") ] return len(fitted_attrs) > 0
Determine if an estimator is fitted Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: ``["coef_", "estimator_", ...], "coef_"`` If `None`, `estimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Returns ------- fitted : bool Whether the estimator is fitted.
_is_fitted
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def check_is_fitted(estimator, attributes=None, *, msg=None, all_or_any=all): """Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore) and otherwise raises a :class:`~sklearn.exceptions.NotFittedError` with the given message. If an estimator does not set any attributes with a trailing underscore, it can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the estimator is fitted or not. See :ref:`sphx_glr_auto_examples_developing_estimators_sklearn_is_fitted.py` for an example on how to use the API. If no `attributes` are passed, this function will pass if an estimator is stateless. An estimator can indicate it's stateless by setting the `requires_fit` tag. See :ref:`estimator_tags` for more information. Note that the `requires_fit` tag is ignored if `attributes` are passed. Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: ``["coef_", "estimator_", ...], "coef_"`` If `None`, `estimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. msg : str, default=None The default error message is, "This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator." For custom messages if "%(name)s" is present in the message string, it is substituted for the estimator name. Eg. : "Estimator, %(name)s, must be fitted before sparsifying". all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Raises ------ TypeError If the estimator is a class or not an estimator instance NotFittedError If the attributes are not found. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.utils.validation import check_is_fitted >>> from sklearn.exceptions import NotFittedError >>> lr = LogisticRegression() >>> try: ... check_is_fitted(lr) ... except NotFittedError as exc: ... print(f"Model is not fitted yet.") Model is not fitted yet. >>> lr.fit([[1, 2], [1, 3]], [1, 0]) LogisticRegression() >>> check_is_fitted(lr) """ if isclass(estimator): raise TypeError("{} is a class, not an instance.".format(estimator)) if msg is None: msg = ( "This %(name)s instance is not fitted yet. Call 'fit' with " "appropriate arguments before using this estimator." ) if not hasattr(estimator, "fit"): raise TypeError("%s is not an estimator instance." % (estimator)) tags = get_tags(estimator) if not tags.requires_fit and attributes is None: return if not _is_fitted(estimator, attributes, all_or_any): raise NotFittedError(msg % {"name": type(estimator).__name__})
Perform is_fitted validation for estimator. Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore) and otherwise raises a :class:`~sklearn.exceptions.NotFittedError` with the given message. If an estimator does not set any attributes with a trailing underscore, it can define a ``__sklearn_is_fitted__`` method returning a boolean to specify if the estimator is fitted or not. See :ref:`sphx_glr_auto_examples_developing_estimators_sklearn_is_fitted.py` for an example on how to use the API. If no `attributes` are passed, this function will pass if an estimator is stateless. An estimator can indicate it's stateless by setting the `requires_fit` tag. See :ref:`estimator_tags` for more information. Note that the `requires_fit` tag is ignored if `attributes` are passed. Parameters ---------- estimator : estimator instance Estimator instance for which the check is performed. attributes : str, list or tuple of str, default=None Attribute name(s) given as string or a list/tuple of strings Eg.: ``["coef_", "estimator_", ...], "coef_"`` If `None`, `estimator` is considered fitted if there exist an attribute that ends with a underscore and does not start with double underscore. msg : str, default=None The default error message is, "This %(name)s instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator." For custom messages if "%(name)s" is present in the message string, it is substituted for the estimator name. Eg. : "Estimator, %(name)s, must be fitted before sparsifying". all_or_any : callable, {all, any}, default=all Specify whether all or any of the given attributes must exist. Raises ------ TypeError If the estimator is a class or not an estimator instance NotFittedError If the attributes are not found. Examples -------- >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.utils.validation import check_is_fitted >>> from sklearn.exceptions import NotFittedError >>> lr = LogisticRegression() >>> try: ... check_is_fitted(lr) ... except NotFittedError as exc: ... print(f"Model is not fitted yet.") Model is not fitted yet. >>> lr.fit([[1, 2], [1, 3]], [1, 0]) LogisticRegression() >>> check_is_fitted(lr)
check_is_fitted
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _estimator_has(attr, *, delegates=("estimator_", "estimator")): """Check if we can delegate a method to the underlying estimator. We check the `delegates` in the order they are passed. By default, we first check the fitted estimator if available, otherwise we check the unfitted estimator. Parameters ---------- attr : str Name of the attribute the delegate might or might not have. delegates: tuple of str, default=("estimator_", "estimator") A tuple of sub-estimator(s) to check if we can delegate the `attr` method. Returns ------- check : function Function to check if the delegate has the attribute. Raises ------ ValueError Raised when none of the delegates are present in the object. """ def check(self): for delegate in delegates: # In meta estimators with multiple sub estimators, # only the attribute of the first sub estimator is checked, # assuming uniformity across all sub estimators. if hasattr(self, delegate): delegator = getattr(self, delegate) if isinstance(delegator, Sequence): return getattr(delegator[0], attr) else: return getattr(delegator, attr) raise ValueError(f"None of the delegates {delegates} are present in the class.") return check
Check if we can delegate a method to the underlying estimator. We check the `delegates` in the order they are passed. By default, we first check the fitted estimator if available, otherwise we check the unfitted estimator. Parameters ---------- attr : str Name of the attribute the delegate might or might not have. delegates: tuple of str, default=("estimator_", "estimator") A tuple of sub-estimator(s) to check if we can delegate the `attr` method. Returns ------- check : function Function to check if the delegate has the attribute. Raises ------ ValueError Raised when none of the delegates are present in the object.
_estimator_has
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def check_non_negative(X, whom): """ Check if there is any negative value in an array. Parameters ---------- X : {array-like, sparse matrix} Input data. whom : str Who passed X to this function. """ xp, _ = get_namespace(X) # avoid X.min() on sparse matrix since it also sorts the indices if sp.issparse(X): if X.format in ["lil", "dok"]: X = X.tocsr() if X.data.size == 0: X_min = 0 else: X_min = X.data.min() else: X_min = xp.min(X) if X_min < 0: raise ValueError(f"Negative values in data passed to {whom}.")
Check if there is any negative value in an array. Parameters ---------- X : {array-like, sparse matrix} Input data. whom : str Who passed X to this function.
check_non_negative
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def check_scalar( x, name, target_type, *, min_val=None, max_val=None, include_boundaries="both", ): """Validate scalar parameters type and value. Parameters ---------- x : object The scalar parameter to validate. name : str The name of the parameter to be printed in error messages. target_type : type or tuple Acceptable data types for the parameter. min_val : float or int, default=None The minimum valid value the parameter can take. If None (default) it is implied that the parameter does not have a lower bound. max_val : float or int, default=None The maximum valid value the parameter can take. If None (default) it is implied that the parameter does not have an upper bound. include_boundaries : {"left", "right", "both", "neither"}, default="both" Whether the interval defined by `min_val` and `max_val` should include the boundaries. Possible choices are: - `"left"`: only `min_val` is included in the valid interval. It is equivalent to the interval `[ min_val, max_val )`. - `"right"`: only `max_val` is included in the valid interval. It is equivalent to the interval `( min_val, max_val ]`. - `"both"`: `min_val` and `max_val` are included in the valid interval. It is equivalent to the interval `[ min_val, max_val ]`. - `"neither"`: neither `min_val` nor `max_val` are included in the valid interval. It is equivalent to the interval `( min_val, max_val )`. Returns ------- x : numbers.Number The validated number. Raises ------ TypeError If the parameter's type does not match the desired type. ValueError If the parameter's value violates the given bounds. If `min_val`, `max_val` and `include_boundaries` are inconsistent. Examples -------- >>> from sklearn.utils.validation import check_scalar >>> check_scalar(10, "x", int, min_val=1, max_val=20) 10 """ def type_name(t): """Convert type into humman readable string.""" module = t.__module__ qualname = t.__qualname__ if module == "builtins": return qualname elif t == numbers.Real: return "float" elif t == numbers.Integral: return "int" return f"{module}.{qualname}" if not isinstance(x, target_type): if isinstance(target_type, tuple): types_str = ", ".join(type_name(t) for t in target_type) target_type_str = f"{{{types_str}}}" else: target_type_str = type_name(target_type) raise TypeError( f"{name} must be an instance of {target_type_str}, not" f" {type(x).__qualname__}." ) expected_include_boundaries = ("left", "right", "both", "neither") if include_boundaries not in expected_include_boundaries: raise ValueError( f"Unknown value for `include_boundaries`: {include_boundaries!r}. " f"Possible values are: {expected_include_boundaries}." ) if max_val is None and include_boundaries == "right": raise ValueError( "`include_boundaries`='right' without specifying explicitly `max_val` " "is inconsistent." ) if min_val is None and include_boundaries == "left": raise ValueError( "`include_boundaries`='left' without specifying explicitly `min_val` " "is inconsistent." ) comparison_operator = ( operator.lt if include_boundaries in ("left", "both") else operator.le ) if min_val is not None and comparison_operator(x, min_val): raise ValueError( f"{name} == {x}, must be" f" {'>=' if include_boundaries in ('left', 'both') else '>'} {min_val}." ) comparison_operator = ( operator.gt if include_boundaries in ("right", "both") else operator.ge ) if max_val is not None and comparison_operator(x, max_val): raise ValueError( f"{name} == {x}, must be" f" {'<=' if include_boundaries in ('right', 'both') else '<'} {max_val}." ) return x
Validate scalar parameters type and value. Parameters ---------- x : object The scalar parameter to validate. name : str The name of the parameter to be printed in error messages. target_type : type or tuple Acceptable data types for the parameter. min_val : float or int, default=None The minimum valid value the parameter can take. If None (default) it is implied that the parameter does not have a lower bound. max_val : float or int, default=None The maximum valid value the parameter can take. If None (default) it is implied that the parameter does not have an upper bound. include_boundaries : {"left", "right", "both", "neither"}, default="both" Whether the interval defined by `min_val` and `max_val` should include the boundaries. Possible choices are: - `"left"`: only `min_val` is included in the valid interval. It is equivalent to the interval `[ min_val, max_val )`. - `"right"`: only `max_val` is included in the valid interval. It is equivalent to the interval `( min_val, max_val ]`. - `"both"`: `min_val` and `max_val` are included in the valid interval. It is equivalent to the interval `[ min_val, max_val ]`. - `"neither"`: neither `min_val` nor `max_val` are included in the valid interval. It is equivalent to the interval `( min_val, max_val )`. Returns ------- x : numbers.Number The validated number. Raises ------ TypeError If the parameter's type does not match the desired type. ValueError If the parameter's value violates the given bounds. If `min_val`, `max_val` and `include_boundaries` are inconsistent. Examples -------- >>> from sklearn.utils.validation import check_scalar >>> check_scalar(10, "x", int, min_val=1, max_val=20) 10
check_scalar
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def type_name(t): """Convert type into humman readable string.""" module = t.__module__ qualname = t.__qualname__ if module == "builtins": return qualname elif t == numbers.Real: return "float" elif t == numbers.Integral: return "int" return f"{module}.{qualname}"
Convert type into humman readable string.
type_name
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_psd_eigenvalues(lambdas, enable_warnings=False): """Check the eigenvalues of a positive semidefinite (PSD) matrix. Checks the provided array of PSD matrix eigenvalues for numerical or conditioning issues and returns a fixed validated version. This method should typically be used if the PSD matrix is user-provided (e.g. a Gram matrix) or computed using a user-provided dissimilarity metric (e.g. kernel function), or if the decomposition process uses approximation methods (randomized SVD, etc.). It checks for three things: - that there are no significant imaginary parts in eigenvalues (more than 1e-5 times the maximum real part). If this check fails, it raises a ``ValueError``. Otherwise all non-significant imaginary parts that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. - that eigenvalues are not all negative. If this check fails, it raises a ``ValueError`` - that there are no significant negative eigenvalues with absolute value more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest positive eigenvalue in double (simple) precision. If this check fails, it raises a ``ValueError``. Otherwise all negative eigenvalues that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Finally, all the positive eigenvalues that are too small (with a value smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Parameters ---------- lambdas : array-like of shape (n_eigenvalues,) Array of eigenvalues to check / fix. enable_warnings : bool, default=False When this is set to ``True``, a ``PositiveSpectrumWarning`` will be raised when there are imaginary parts, negative eigenvalues, or extremely small non-zero eigenvalues. Otherwise no warning will be raised. In both cases, imaginary parts, negative eigenvalues, and extremely small non-zero eigenvalues will be set to zero. Returns ------- lambdas_fixed : ndarray of shape (n_eigenvalues,) A fixed validated copy of the array of eigenvalues. Examples -------- >>> from sklearn.utils.validation import _check_psd_eigenvalues >>> _check_psd_eigenvalues([1, 2]) # nominal case array([1, 2]) >>> _check_psd_eigenvalues([5, 5j]) # significant imag part Traceback (most recent call last): ... ValueError: There are significant imaginary parts in eigenvalues (1 of the maximum real part). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part array([5., 0.]) >>> _check_psd_eigenvalues([-5, -1]) # all negative Traceback (most recent call last): ... ValueError: All eigenvalues are negative (maximum is -1). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -1]) # significant negative Traceback (most recent call last): ... ValueError: There are significant negative eigenvalues (0.2 of the maximum positive). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative array([5., 0.]) >>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small) array([5., 0.]) """ lambdas = np.array(lambdas) is_double_precision = lambdas.dtype == np.float64 # note: the minimum value available is # - single-precision: np.finfo('float32').eps = 1.2e-07 # - double-precision: np.finfo('float64').eps = 2.2e-16 # the various thresholds used for validation # we may wish to change the value according to precision. significant_imag_ratio = 1e-5 significant_neg_ratio = 1e-5 if is_double_precision else 5e-3 significant_neg_value = 1e-10 if is_double_precision else 1e-6 small_pos_ratio = 1e-12 if is_double_precision else 2e-7 # Check that there are no significant imaginary parts if not np.isreal(lambdas).all(): max_imag_abs = np.abs(np.imag(lambdas)).max() max_real_abs = np.abs(np.real(lambdas)).max() if max_imag_abs > significant_imag_ratio * max_real_abs: raise ValueError( "There are significant imaginary parts in eigenvalues (%g " "of the maximum real part). Either the matrix is not PSD, or " "there was an issue while computing the eigendecomposition " "of the matrix." % (max_imag_abs / max_real_abs) ) # warn about imaginary parts being removed if enable_warnings: warnings.warn( "There are imaginary parts in eigenvalues (%g " "of the maximum real part). Either the matrix is not" " PSD, or there was an issue while computing the " "eigendecomposition of the matrix. Only the real " "parts will be kept." % (max_imag_abs / max_real_abs), PositiveSpectrumWarning, ) # Remove all imaginary parts (even if zero) lambdas = np.real(lambdas) # Check that there are no significant negative eigenvalues max_eig = lambdas.max() if max_eig < 0: raise ValueError( "All eigenvalues are negative (maximum is %g). " "Either the matrix is not PSD, or there was an " "issue while computing the eigendecomposition of " "the matrix." % max_eig ) else: min_eig = lambdas.min() if ( min_eig < -significant_neg_ratio * max_eig and min_eig < -significant_neg_value ): raise ValueError( "There are significant negative eigenvalues (%g" " of the maximum positive). Either the matrix is " "not PSD, or there was an issue while computing " "the eigendecomposition of the matrix." % (-min_eig / max_eig) ) elif min_eig < 0: # Remove all negative values and warn about it if enable_warnings: warnings.warn( "There are negative eigenvalues (%g of the " "maximum positive). Either the matrix is not " "PSD, or there was an issue while computing the" " eigendecomposition of the matrix. Negative " "eigenvalues will be replaced with 0." % (-min_eig / max_eig), PositiveSpectrumWarning, ) lambdas[lambdas < 0] = 0 # Check for conditioning (small positive non-zeros) too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig) if too_small_lambdas.any(): if enable_warnings: warnings.warn( "Badly conditioned PSD matrix spectrum: the largest " "eigenvalue is more than %g times the smallest. " "Small eigenvalues will be replaced with 0." "" % (1 / small_pos_ratio), PositiveSpectrumWarning, ) lambdas[too_small_lambdas] = 0 return lambdas
Check the eigenvalues of a positive semidefinite (PSD) matrix. Checks the provided array of PSD matrix eigenvalues for numerical or conditioning issues and returns a fixed validated version. This method should typically be used if the PSD matrix is user-provided (e.g. a Gram matrix) or computed using a user-provided dissimilarity metric (e.g. kernel function), or if the decomposition process uses approximation methods (randomized SVD, etc.). It checks for three things: - that there are no significant imaginary parts in eigenvalues (more than 1e-5 times the maximum real part). If this check fails, it raises a ``ValueError``. Otherwise all non-significant imaginary parts that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. - that eigenvalues are not all negative. If this check fails, it raises a ``ValueError`` - that there are no significant negative eigenvalues with absolute value more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest positive eigenvalue in double (simple) precision. If this check fails, it raises a ``ValueError``. Otherwise all negative eigenvalues that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Finally, all the positive eigenvalues that are too small (with a value smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Parameters ---------- lambdas : array-like of shape (n_eigenvalues,) Array of eigenvalues to check / fix. enable_warnings : bool, default=False When this is set to ``True``, a ``PositiveSpectrumWarning`` will be raised when there are imaginary parts, negative eigenvalues, or extremely small non-zero eigenvalues. Otherwise no warning will be raised. In both cases, imaginary parts, negative eigenvalues, and extremely small non-zero eigenvalues will be set to zero. Returns ------- lambdas_fixed : ndarray of shape (n_eigenvalues,) A fixed validated copy of the array of eigenvalues. Examples -------- >>> from sklearn.utils.validation import _check_psd_eigenvalues >>> _check_psd_eigenvalues([1, 2]) # nominal case array([1, 2]) >>> _check_psd_eigenvalues([5, 5j]) # significant imag part Traceback (most recent call last): ... ValueError: There are significant imaginary parts in eigenvalues (1 of the maximum real part). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part array([5., 0.]) >>> _check_psd_eigenvalues([-5, -1]) # all negative Traceback (most recent call last): ... ValueError: All eigenvalues are negative (maximum is -1). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -1]) # significant negative Traceback (most recent call last): ... ValueError: There are significant negative eigenvalues (0.2 of the maximum positive). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative array([5., 0.]) >>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small) array([5., 0.])
_check_psd_eigenvalues
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_sample_weight( sample_weight, X, *, dtype=None, ensure_non_negative=False, copy=False ): """Validate sample weights. Note that passing sample_weight=None will output an array of ones. Therefore, in some cases, you may want to protect the call with: if sample_weight is not None: sample_weight = _check_sample_weight(...) Parameters ---------- sample_weight : {ndarray, Number or None}, shape (n_samples,) Input sample weights. X : {ndarray, list, sparse matrix} Input data. dtype : dtype, default=None dtype of the validated `sample_weight`. If None, and `sample_weight` is an array: - If `sample_weight.dtype` is one of `{np.float64, np.float32}`, then the dtype is preserved. - Else the output has NumPy's default dtype: `np.float64`. If `dtype` is not `{np.float32, np.float64, None}`, then output will be `np.float64`. ensure_non_negative : bool, default=False, Whether or not the weights are expected to be non-negative. .. versionadded:: 1.0 copy : bool, default=False If True, a copy of sample_weight will be created. Returns ------- sample_weight : ndarray of shape (n_samples,) Validated sample weight. It is guaranteed to be "C" contiguous. """ xp, _, device = get_namespace_and_device(sample_weight, X) n_samples = _num_samples(X) max_float_type = _max_precision_float_dtype(xp, device) float_dtypes = ( [xp.float32] if max_float_type == xp.float32 else [xp.float64, xp.float32] ) if dtype is not None and dtype not in float_dtypes: dtype = max_float_type if sample_weight is None: sample_weight = xp.ones(n_samples, dtype=dtype) elif isinstance(sample_weight, numbers.Number): sample_weight = xp.full(n_samples, sample_weight, dtype=dtype) else: if dtype is None: dtype = float_dtypes sample_weight = check_array( sample_weight, accept_sparse=False, ensure_2d=False, dtype=dtype, order="C", copy=copy, input_name="sample_weight", ) if sample_weight.ndim != 1: raise ValueError("Sample weights must be 1D array or scalar") if sample_weight.shape != (n_samples,): raise ValueError( "sample_weight.shape == {}, expected {}!".format( sample_weight.shape, (n_samples,) ) ) if ensure_non_negative: check_non_negative(sample_weight, "`sample_weight`") return sample_weight
Validate sample weights. Note that passing sample_weight=None will output an array of ones. Therefore, in some cases, you may want to protect the call with: if sample_weight is not None: sample_weight = _check_sample_weight(...) Parameters ---------- sample_weight : {ndarray, Number or None}, shape (n_samples,) Input sample weights. X : {ndarray, list, sparse matrix} Input data. dtype : dtype, default=None dtype of the validated `sample_weight`. If None, and `sample_weight` is an array: - If `sample_weight.dtype` is one of `{np.float64, np.float32}`, then the dtype is preserved. - Else the output has NumPy's default dtype: `np.float64`. If `dtype` is not `{np.float32, np.float64, None}`, then output will be `np.float64`. ensure_non_negative : bool, default=False, Whether or not the weights are expected to be non-negative. .. versionadded:: 1.0 copy : bool, default=False If True, a copy of sample_weight will be created. Returns ------- sample_weight : ndarray of shape (n_samples,) Validated sample weight. It is guaranteed to be "C" contiguous.
_check_sample_weight
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9): """Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0. """ if sp.issparse(x) and sp.issparse(y): x = x.tocsr() y = y.tocsr() x.sum_duplicates() y.sum_duplicates() return ( np.array_equal(x.indices, y.indices) and np.array_equal(x.indptr, y.indptr) and np.allclose(x.data, y.data, rtol=rtol, atol=atol) ) elif not sp.issparse(x) and not sp.issparse(y): return np.allclose(x, y, rtol=rtol, atol=atol) raise ValueError( "Can only compare two sparse matrices, not a sparse matrix and an array" )
Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0.
_allclose_dense_sparse
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_response_method(estimator, response_method): """Check if `response_method` is available in estimator and return it. .. versionadded:: 1.3 Parameters ---------- estimator : estimator instance Classifier or regressor to check. response_method : {"predict_proba", "predict_log_proba", "decision_function", "predict"} or list of such str Specifies the response method to use get prediction from an estimator (i.e. :term:`predict_proba`, :term:`predict_log_proba`, :term:`decision_function` or :term:`predict`). Possible choices are: - if `str`, it corresponds to the name to the method to return; - if a list of `str`, it provides the method names in order of preference. The method returned corresponds to the first method in the list and which is implemented by `estimator`. Returns ------- prediction_method : callable Prediction method of estimator. Raises ------ AttributeError If `response_method` is not available in `estimator`. """ if isinstance(response_method, str): list_methods = [response_method] else: list_methods = response_method prediction_method = [getattr(estimator, method, None) for method in list_methods] prediction_method = reduce(lambda x, y: x or y, prediction_method) if prediction_method is None: raise AttributeError( f"{estimator.__class__.__name__} has none of the following attributes: " f"{', '.join(list_methods)}." ) return prediction_method
Check if `response_method` is available in estimator and return it. .. versionadded:: 1.3 Parameters ---------- estimator : estimator instance Classifier or regressor to check. response_method : {"predict_proba", "predict_log_proba", "decision_function", "predict"} or list of such str Specifies the response method to use get prediction from an estimator (i.e. :term:`predict_proba`, :term:`predict_log_proba`, :term:`decision_function` or :term:`predict`). Possible choices are: - if `str`, it corresponds to the name to the method to return; - if a list of `str`, it provides the method names in order of preference. The method returned corresponds to the first method in the list and which is implemented by `estimator`. Returns ------- prediction_method : callable Prediction method of estimator. Raises ------ AttributeError If `response_method` is not available in `estimator`.
_check_response_method
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_method_params(X, params, indices=None): """Check and validate the parameters passed to a specific method like `fit`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data array. params : dict Dictionary containing the parameters passed to the method. indices : array-like of shape (n_samples,), default=None Indices to be selected if the parameter has the same size as `X`. Returns ------- method_params_validated : dict Validated parameters. We ensure that the values support indexing. """ from . import _safe_indexing method_params_validated = {} for param_key, param_value in params.items(): if ( not _is_arraylike(param_value) and not sp.issparse(param_value) ) or _num_samples(param_value) != _num_samples(X): # Non-indexable pass-through (for now for backward-compatibility). # https://github.com/scikit-learn/scikit-learn/issues/15805 method_params_validated[param_key] = param_value else: # Any other method_params should support indexing # (e.g. for cross-validation). method_params_validated[param_key] = _make_indexable(param_value) method_params_validated[param_key] = _safe_indexing( method_params_validated[param_key], indices ) return method_params_validated
Check and validate the parameters passed to a specific method like `fit`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data array. params : dict Dictionary containing the parameters passed to the method. indices : array-like of shape (n_samples,), default=None Indices to be selected if the parameter has the same size as `X`. Returns ------- method_params_validated : dict Validated parameters. We ensure that the values support indexing.
_check_method_params
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _is_pandas_df_or_series(X): """Return True if the X is a pandas dataframe or series.""" try: pd = sys.modules["pandas"] except KeyError: return False return isinstance(X, (pd.DataFrame, pd.Series))
Return True if the X is a pandas dataframe or series.
_is_pandas_df_or_series
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _is_pandas_df(X): """Return True if the X is a pandas dataframe.""" try: pd = sys.modules["pandas"] except KeyError: return False return isinstance(X, pd.DataFrame)
Return True if the X is a pandas dataframe.
_is_pandas_df
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _is_pyarrow_data(X): """Return True if the X is a pyarrow Table, RecordBatch, Array or ChunkedArray.""" try: pa = sys.modules["pyarrow"] except KeyError: return False return isinstance(X, (pa.Table, pa.RecordBatch, pa.Array, pa.ChunkedArray))
Return True if the X is a pyarrow Table, RecordBatch, Array or ChunkedArray.
_is_pyarrow_data
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _is_polars_df_or_series(X): """Return True if the X is a polars dataframe or series.""" try: pl = sys.modules["polars"] except KeyError: return False return isinstance(X, (pl.DataFrame, pl.Series))
Return True if the X is a polars dataframe or series.
_is_polars_df_or_series
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _is_polars_df(X): """Return True if the X is a polars dataframe.""" try: pl = sys.modules["polars"] except KeyError: return False return isinstance(X, pl.DataFrame)
Return True if the X is a polars dataframe.
_is_polars_df
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _get_feature_names(X): """Get feature names from X. Support for other array containers should place its implementation here. Parameters ---------- X : {ndarray, dataframe} of shape (n_samples, n_features) Array container to extract feature names. - pandas dataframe : The columns will be considered to be feature names. If the dataframe contains non-string feature names, `None` is returned. - All other array containers will return `None`. Returns ------- names: ndarray or None Feature names of `X`. Unrecognized array containers will return `None`. """ feature_names = None # extract feature names for support array containers if _is_pandas_df(X): # Make sure we can inspect columns names from pandas, even with # versions too old to expose a working implementation of # __dataframe__.column_names() and avoid introducing any # additional copy. # TODO: remove the pandas-specific branch once the minimum supported # version of pandas has a working implementation of # __dataframe__.column_names() that is guaranteed to not introduce any # additional copy of the data without having to impose allow_copy=False # that could fail with other libraries. Note: in the longer term, we # could decide to instead rely on the __dataframe_namespace__ API once # adopted by our minimally supported pandas version. feature_names = np.asarray(X.columns, dtype=object) elif hasattr(X, "__dataframe__"): df_protocol = X.__dataframe__() feature_names = np.asarray(list(df_protocol.column_names()), dtype=object) if feature_names is None or len(feature_names) == 0: return types = sorted(t.__qualname__ for t in set(type(v) for v in feature_names)) # mixed type of string and non-string is not supported if len(types) > 1 and "str" in types: raise TypeError( "Feature names are only supported if all input features have string names, " f"but your input has {types} as feature name / column name types. " "If you want feature names to be stored and validated, you must convert " "them all to strings, by using X.columns = X.columns.astype(str) for " "example. Otherwise you can remove feature / column names from your input " "data, or convert them all to a non-string data type." ) # Only feature names of all strings are supported if len(types) == 1 and types[0] == "str": return feature_names
Get feature names from X. Support for other array containers should place its implementation here. Parameters ---------- X : {ndarray, dataframe} of shape (n_samples, n_features) Array container to extract feature names. - pandas dataframe : The columns will be considered to be feature names. If the dataframe contains non-string feature names, `None` is returned. - All other array containers will return `None`. Returns ------- names: ndarray or None Feature names of `X`. Unrecognized array containers will return `None`.
_get_feature_names
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True): """Check `input_features` and generate names if needed. Commonly used in :term:`get_feature_names_out`. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. generate_names : bool, default=True Whether to generate names when `input_features` is `None` and `estimator.feature_names_in_` is not defined. This is useful for transformers that validates `input_features` but do not require them in :term:`get_feature_names_out` e.g. `PCA`. Returns ------- feature_names_in : ndarray of str or `None` Feature names in. """ feature_names_in_ = getattr(estimator, "feature_names_in_", None) n_features_in_ = getattr(estimator, "n_features_in_", None) if input_features is not None: input_features = np.asarray(input_features, dtype=object) if feature_names_in_ is not None and not np.array_equal( feature_names_in_, input_features ): raise ValueError("input_features is not equal to feature_names_in_") if n_features_in_ is not None and len(input_features) != n_features_in_: raise ValueError( "input_features should have length equal to number of " f"features ({n_features_in_}), got {len(input_features)}" ) return input_features if feature_names_in_ is not None: return feature_names_in_ if not generate_names: return # Generates feature names if `n_features_in_` is defined if n_features_in_ is None: raise ValueError("Unable to generate feature names without n_features_in_") return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
Check `input_features` and generate names if needed. Commonly used in :term:`get_feature_names_out`. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. generate_names : bool, default=True Whether to generate names when `input_features` is `None` and `estimator.feature_names_in_` is not defined. This is useful for transformers that validates `input_features` but do not require them in :term:`get_feature_names_out` e.g. `PCA`. Returns ------- feature_names_in : ndarray of str or `None` Feature names in.
_check_feature_names_in
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _generate_get_feature_names_out(estimator, n_features_out, input_features=None): """Generate feature names out for estimator using the estimator name as the prefix. The input_feature names are validated but not used. This function is useful for estimators that generate their own names based on `n_features_out`, i.e. PCA. Parameters ---------- estimator : estimator instance Estimator producing output feature names. n_feature_out : int Number of feature names out. input_features : array-like of str or None, default=None Only used to validate feature names with `estimator.feature_names_in_`. Returns ------- feature_names_in : ndarray of str or `None` Feature names in. """ _check_feature_names_in(estimator, input_features, generate_names=False) estimator_name = estimator.__class__.__name__.lower() return np.asarray( [f"{estimator_name}{i}" for i in range(n_features_out)], dtype=object )
Generate feature names out for estimator using the estimator name as the prefix. The input_feature names are validated but not used. This function is useful for estimators that generate their own names based on `n_features_out`, i.e. PCA. Parameters ---------- estimator : estimator instance Estimator producing output feature names. n_feature_out : int Number of feature names out. input_features : array-like of str or None, default=None Only used to validate feature names with `estimator.feature_names_in_`. Returns ------- feature_names_in : ndarray of str or `None` Feature names in.
_generate_get_feature_names_out
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_monotonic_cst(estimator, monotonic_cst=None): """Check the monotonic constraints and return the corresponding array. This helper function should be used in the `fit` method of an estimator that supports monotonic constraints and called after the estimator has introspected input data to set the `n_features_in_` and optionally the `feature_names_in_` attributes. .. versionadded:: 1.2 Parameters ---------- estimator : estimator instance monotonic_cst : array-like of int, dict of str or None, default=None Monotonic constraints for the features. - If array-like, then it should contain only -1, 0 or 1. Each value will be checked to be in [-1, 0, 1]. If a value is -1, then the corresponding feature is required to be monotonically decreasing. - If dict, then it the keys should be the feature names occurring in `estimator.feature_names_in_` and the values should be -1, 0 or 1. - If None, then an array of 0s will be allocated. Returns ------- monotonic_cst : ndarray of int Monotonic constraints for each feature. """ original_monotonic_cst = monotonic_cst if monotonic_cst is None or isinstance(monotonic_cst, dict): monotonic_cst = np.full( shape=estimator.n_features_in_, fill_value=0, dtype=np.int8, ) if isinstance(original_monotonic_cst, dict): if not hasattr(estimator, "feature_names_in_"): raise ValueError( f"{estimator.__class__.__name__} was not fitted on data " "with feature names. Pass monotonic_cst as an integer " "array instead." ) unexpected_feature_names = list( set(original_monotonic_cst) - set(estimator.feature_names_in_) ) unexpected_feature_names.sort() # deterministic error message n_unexpeced = len(unexpected_feature_names) if unexpected_feature_names: if len(unexpected_feature_names) > 5: unexpected_feature_names = unexpected_feature_names[:5] unexpected_feature_names.append("...") raise ValueError( f"monotonic_cst contains {n_unexpeced} unexpected feature " f"names: {unexpected_feature_names}." ) for feature_idx, feature_name in enumerate(estimator.feature_names_in_): if feature_name in original_monotonic_cst: cst = original_monotonic_cst[feature_name] if cst not in [-1, 0, 1]: raise ValueError( f"monotonic_cst['{feature_name}'] must be either " f"-1, 0 or 1. Got {cst!r}." ) monotonic_cst[feature_idx] = cst else: unexpected_cst = np.setdiff1d(monotonic_cst, [-1, 0, 1]) if unexpected_cst.shape[0]: raise ValueError( "monotonic_cst must be an array-like of -1, 0 or 1. Observed " f"values: {unexpected_cst.tolist()}." ) monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8) if monotonic_cst.shape[0] != estimator.n_features_in_: raise ValueError( f"monotonic_cst has shape {monotonic_cst.shape} but the input data " f"X has {estimator.n_features_in_} features." ) return monotonic_cst
Check the monotonic constraints and return the corresponding array. This helper function should be used in the `fit` method of an estimator that supports monotonic constraints and called after the estimator has introspected input data to set the `n_features_in_` and optionally the `feature_names_in_` attributes. .. versionadded:: 1.2 Parameters ---------- estimator : estimator instance monotonic_cst : array-like of int, dict of str or None, default=None Monotonic constraints for the features. - If array-like, then it should contain only -1, 0 or 1. Each value will be checked to be in [-1, 0, 1]. If a value is -1, then the corresponding feature is required to be monotonically decreasing. - If dict, then it the keys should be the feature names occurring in `estimator.feature_names_in_` and the values should be -1, 0 or 1. - If None, then an array of 0s will be allocated. Returns ------- monotonic_cst : ndarray of int Monotonic constraints for each feature.
_check_monotonic_cst
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_pos_label_consistency(pos_label, y_true): """Check if `pos_label` need to be specified or not. In binary classification, we fix `pos_label=1` if the labels are in the set {-1, 1} or {0, 1}. Otherwise, we raise an error asking to specify the `pos_label` parameters. Parameters ---------- pos_label : int, float, bool, str or None The positive label. y_true : ndarray of shape (n_samples,) The target vector. Returns ------- pos_label : int, float, bool or str If `pos_label` can be inferred, it will be returned. Raises ------ ValueError In the case that `y_true` does not have label in {-1, 1} or {0, 1}, it will raise a `ValueError`. """ # ensure binary classification if pos_label is not specified # classes.dtype.kind in ('O', 'U', 'S') is required to avoid # triggering a FutureWarning by calling np.array_equal(a, b) # when elements in the two arrays are not comparable. if pos_label is None: # Compute classes only if pos_label is not specified: classes = np.unique(y_true) if classes.dtype.kind in "OUS" or not ( np.array_equal(classes, [0, 1]) or np.array_equal(classes, [-1, 1]) or np.array_equal(classes, [0]) or np.array_equal(classes, [-1]) or np.array_equal(classes, [1]) ): classes_repr = ", ".join([repr(c) for c in classes.tolist()]) raise ValueError( f"y_true takes value in {{{classes_repr}}} and pos_label is not " "specified: either make y_true take value in {0, 1} or " "{-1, 1} or pass pos_label explicitly." ) pos_label = 1 return pos_label
Check if `pos_label` need to be specified or not. In binary classification, we fix `pos_label=1` if the labels are in the set {-1, 1} or {0, 1}. Otherwise, we raise an error asking to specify the `pos_label` parameters. Parameters ---------- pos_label : int, float, bool, str or None The positive label. y_true : ndarray of shape (n_samples,) The target vector. Returns ------- pos_label : int, float, bool or str If `pos_label` can be inferred, it will be returned. Raises ------ ValueError In the case that `y_true` does not have label in {-1, 1} or {0, 1}, it will raise a `ValueError`.
_check_pos_label_consistency
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _to_object_array(sequence): """Convert sequence to a 1-D NumPy array of object dtype. numpy.array constructor has a similar use but it's output is ambiguous. It can be 1-D NumPy array of object dtype if the input is a ragged array, but if the input is a list of equal length arrays, then the output is a 2D numpy.array. _to_object_array solves this ambiguity by guarantying that the output is a 1-D NumPy array of objects for any input. Parameters ---------- sequence : array-like of shape (n_elements,) The sequence to be converted. Returns ------- out : ndarray of shape (n_elements,), dtype=object The converted sequence into a 1-D NumPy array of object dtype. Examples -------- >>> import numpy as np >>> from sklearn.utils.validation import _to_object_array >>> _to_object_array([np.array([0]), np.array([1])]) array([array([0]), array([1])], dtype=object) >>> _to_object_array([np.array([0]), np.array([1, 2])]) array([array([0]), array([1, 2])], dtype=object) >>> _to_object_array([np.array([0]), np.array([1, 2])]) array([array([0]), array([1, 2])], dtype=object) """ out = np.empty(len(sequence), dtype=object) out[:] = sequence return out
Convert sequence to a 1-D NumPy array of object dtype. numpy.array constructor has a similar use but it's output is ambiguous. It can be 1-D NumPy array of object dtype if the input is a ragged array, but if the input is a list of equal length arrays, then the output is a 2D numpy.array. _to_object_array solves this ambiguity by guarantying that the output is a 1-D NumPy array of objects for any input. Parameters ---------- sequence : array-like of shape (n_elements,) The sequence to be converted. Returns ------- out : ndarray of shape (n_elements,), dtype=object The converted sequence into a 1-D NumPy array of object dtype. Examples -------- >>> import numpy as np >>> from sklearn.utils.validation import _to_object_array >>> _to_object_array([np.array([0]), np.array([1])]) array([array([0]), array([1])], dtype=object) >>> _to_object_array([np.array([0]), np.array([1, 2])]) array([array([0]), array([1, 2])], dtype=object) >>> _to_object_array([np.array([0]), np.array([1, 2])]) array([array([0]), array([1, 2])], dtype=object)
_to_object_array
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_feature_names(estimator, X, *, reset): """Set or check the `feature_names_in_` attribute of an estimator. .. versionadded:: 1.0 .. versionchanged:: 1.6 Moved from :class:`~sklearn.base.BaseEstimator` to :mod:`sklearn.utils.validation`. Parameters ---------- estimator : estimator instance The estimator to validate the input for. X : {ndarray, dataframe} of shape (n_samples, n_features) The input samples. reset : bool Whether to reset the `feature_names_in_` attribute. If False, the input will be checked for consistency with feature names of data provided when reset was last True. .. note:: It is recommended to call `reset=True` in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. """ if reset: feature_names_in = _get_feature_names(X) if feature_names_in is not None: estimator.feature_names_in_ = feature_names_in elif hasattr(estimator, "feature_names_in_"): # Delete the attribute when the estimator is fitted on a new dataset # that has no feature names. delattr(estimator, "feature_names_in_") return fitted_feature_names = getattr(estimator, "feature_names_in_", None) X_feature_names = _get_feature_names(X) if fitted_feature_names is None and X_feature_names is None: # no feature names seen in fit and in X return if X_feature_names is not None and fitted_feature_names is None: warnings.warn( f"X has feature names, but {estimator.__class__.__name__} was fitted " "without feature names" ) return if X_feature_names is None and fitted_feature_names is not None: warnings.warn( "X does not have valid feature names, but" f" {estimator.__class__.__name__} was fitted with feature names" ) return # validate the feature names against the `feature_names_in_` attribute if len(fitted_feature_names) != len(X_feature_names) or np.any( fitted_feature_names != X_feature_names ): message = "The feature names should match those that were passed during fit.\n" fitted_feature_names_set = set(fitted_feature_names) X_feature_names_set = set(X_feature_names) unexpected_names = sorted(X_feature_names_set - fitted_feature_names_set) missing_names = sorted(fitted_feature_names_set - X_feature_names_set) def add_names(names): output = "" max_n_names = 5 for i, name in enumerate(names): if i >= max_n_names: output += "- ...\n" break output += f"- {name}\n" return output if unexpected_names: message += "Feature names unseen at fit time:\n" message += add_names(unexpected_names) if missing_names: message += "Feature names seen at fit time, yet now missing:\n" message += add_names(missing_names) if not missing_names and not unexpected_names: message += "Feature names must be in the same order as they were in fit.\n" raise ValueError(message)
Set or check the `feature_names_in_` attribute of an estimator. .. versionadded:: 1.0 .. versionchanged:: 1.6 Moved from :class:`~sklearn.base.BaseEstimator` to :mod:`sklearn.utils.validation`. Parameters ---------- estimator : estimator instance The estimator to validate the input for. X : {ndarray, dataframe} of shape (n_samples, n_features) The input samples. reset : bool Whether to reset the `feature_names_in_` attribute. If False, the input will be checked for consistency with feature names of data provided when reset was last True. .. note:: It is recommended to call `reset=True` in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`.
_check_feature_names
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _check_n_features(estimator, X, reset): """Set the `n_features_in_` attribute, or check against it on an estimator. .. versionchanged:: 1.6 Moved from :class:`~sklearn.base.BaseEstimator` to :mod:`~sklearn.utils.validation`. Parameters ---------- estimator : estimator instance The estimator to validate the input for. X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples. reset : bool If True, the `n_features_in_` attribute is set to `X.shape[1]`. If False and the attribute exists, then check that it is equal to `X.shape[1]`. If False and the attribute does *not* exist, then the check is skipped. .. note:: It is recommended to call reset=True in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. """ try: n_features = _num_features(X) except TypeError as e: if not reset and hasattr(estimator, "n_features_in_"): raise ValueError( "X does not contain any features, but " f"{estimator.__class__.__name__} is expecting " f"{estimator.n_features_in_} features" ) from e # If the number of features is not defined and reset=True, # then we skip this check return if reset: estimator.n_features_in_ = n_features return if not hasattr(estimator, "n_features_in_"): # Skip this check if the expected number of expected input features # was not recorded by calling fit first. This is typically the case # for stateless transformers. return if n_features != estimator.n_features_in_: raise ValueError( f"X has {n_features} features, but {estimator.__class__.__name__} " f"is expecting {estimator.n_features_in_} features as input." )
Set the `n_features_in_` attribute, or check against it on an estimator. .. versionchanged:: 1.6 Moved from :class:`~sklearn.base.BaseEstimator` to :mod:`~sklearn.utils.validation`. Parameters ---------- estimator : estimator instance The estimator to validate the input for. X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples. reset : bool If True, the `n_features_in_` attribute is set to `X.shape[1]`. If False and the attribute exists, then check that it is equal to `X.shape[1]`. If False and the attribute does *not* exist, then the check is skipped. .. note:: It is recommended to call reset=True in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`.
_check_n_features
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def validate_data( _estimator, /, X="no_validation", y="no_validation", reset=True, validate_separately=False, skip_check_array=False, **check_params, ): """Validate input data and set or check feature names and counts of the input. This helper function should be used in an estimator that requires input validation. This mutates the estimator and sets the `n_features_in_` and `feature_names_in_` attributes if `reset=True`. .. versionadded:: 1.6 Parameters ---------- _estimator : estimator instance The estimator to validate the input for. X : {array-like, sparse matrix, dataframe} of shape \ (n_samples, n_features), default='no validation' The input samples. If `'no_validation'`, no validation is performed on `X`. This is useful for meta-estimator which can delegate input validation to their underlying estimator(s). In that case `y` must be passed and the only accepted `check_params` are `multi_output` and `y_numeric`. y : array-like of shape (n_samples,), default='no_validation' The targets. - If `None`, :func:`~sklearn.utils.check_array` is called on `X`. If the estimator's `requires_y` tag is True, then an error will be raised. - If `'no_validation'`, :func:`~sklearn.utils.check_array` is called on `X` and the estimator's `requires_y` tag is ignored. This is a default placeholder and is never meant to be explicitly set. In that case `X` must be passed. - Otherwise, only `y` with `_check_y` or both `X` and `y` are checked with either :func:`~sklearn.utils.check_array` or :func:`~sklearn.utils.check_X_y` depending on `validate_separately`. reset : bool, default=True Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. .. note:: It is recommended to call `reset=True` in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. validate_separately : False or tuple of dicts, default=False Only used if `y` is not `None`. If `False`, call :func:`~sklearn.utils.check_X_y`. Else, it must be a tuple of kwargs to be used for calling :func:`~sklearn.utils.check_array` on `X` and `y` respectively. `estimator=self` is automatically added to these dicts to generate more informative error message in case of invalid input data. skip_check_array : bool, default=False If `True`, `X` and `y` are unchanged and only `feature_names_in_` and `n_features_in_` are checked. Otherwise, :func:`~sklearn.utils.check_array` is called on `X` and `y`. **check_params : kwargs Parameters passed to :func:`~sklearn.utils.check_array` or :func:`~sklearn.utils.check_X_y`. Ignored if validate_separately is not False. `estimator=self` is automatically added to these params to generate more informative error message in case of invalid input data. Returns ------- out : {ndarray, sparse matrix} or tuple of these The validated input. A tuple is returned if both `X` and `y` are validated. """ _check_feature_names(_estimator, X, reset=reset) tags = get_tags(_estimator) if y is None and tags.target_tags.required: raise ValueError( f"This {_estimator.__class__.__name__} estimator " "requires y to be passed, but the target y is None." ) no_val_X = isinstance(X, str) and X == "no_validation" no_val_y = y is None or (isinstance(y, str) and y == "no_validation") if no_val_X and no_val_y: raise ValueError("Validation should be done on X, y or both.") default_check_params = {"estimator": _estimator} check_params = {**default_check_params, **check_params} if skip_check_array: if not no_val_X and no_val_y: out = X elif no_val_X and not no_val_y: out = y else: out = X, y elif not no_val_X and no_val_y: out = check_array(X, input_name="X", **check_params) elif no_val_X and not no_val_y: out = _check_y(y, **check_params) else: if validate_separately: # We need this because some estimators validate X and y # separately, and in general, separately calling check_array() # on X and y isn't equivalent to just calling check_X_y() # :( check_X_params, check_y_params = validate_separately if "estimator" not in check_X_params: check_X_params = {**default_check_params, **check_X_params} X = check_array(X, input_name="X", **check_X_params) if "estimator" not in check_y_params: check_y_params = {**default_check_params, **check_y_params} y = check_array(y, input_name="y", **check_y_params) else: X, y = check_X_y(X, y, **check_params) out = X, y if not no_val_X and check_params.get("ensure_2d", True): _check_n_features(_estimator, X, reset=reset) return out
Validate input data and set or check feature names and counts of the input. This helper function should be used in an estimator that requires input validation. This mutates the estimator and sets the `n_features_in_` and `feature_names_in_` attributes if `reset=True`. .. versionadded:: 1.6 Parameters ---------- _estimator : estimator instance The estimator to validate the input for. X : {array-like, sparse matrix, dataframe} of shape (n_samples, n_features), default='no validation' The input samples. If `'no_validation'`, no validation is performed on `X`. This is useful for meta-estimator which can delegate input validation to their underlying estimator(s). In that case `y` must be passed and the only accepted `check_params` are `multi_output` and `y_numeric`. y : array-like of shape (n_samples,), default='no_validation' The targets. - If `None`, :func:`~sklearn.utils.check_array` is called on `X`. If the estimator's `requires_y` tag is True, then an error will be raised. - If `'no_validation'`, :func:`~sklearn.utils.check_array` is called on `X` and the estimator's `requires_y` tag is ignored. This is a default placeholder and is never meant to be explicitly set. In that case `X` must be passed. - Otherwise, only `y` with `_check_y` or both `X` and `y` are checked with either :func:`~sklearn.utils.check_array` or :func:`~sklearn.utils.check_X_y` depending on `validate_separately`. reset : bool, default=True Whether to reset the `n_features_in_` attribute. If False, the input will be checked for consistency with data provided when reset was last True. .. note:: It is recommended to call `reset=True` in `fit` and in the first call to `partial_fit`. All other methods that validate `X` should set `reset=False`. validate_separately : False or tuple of dicts, default=False Only used if `y` is not `None`. If `False`, call :func:`~sklearn.utils.check_X_y`. Else, it must be a tuple of kwargs to be used for calling :func:`~sklearn.utils.check_array` on `X` and `y` respectively. `estimator=self` is automatically added to these dicts to generate more informative error message in case of invalid input data. skip_check_array : bool, default=False If `True`, `X` and `y` are unchanged and only `feature_names_in_` and `n_features_in_` are checked. Otherwise, :func:`~sklearn.utils.check_array` is called on `X` and `y`. **check_params : kwargs Parameters passed to :func:`~sklearn.utils.check_array` or :func:`~sklearn.utils.check_X_y`. Ignored if validate_separately is not False. `estimator=self` is automatically added to these params to generate more informative error message in case of invalid input data. Returns ------- out : {ndarray, sparse matrix} or tuple of these The validated input. A tuple is returned if both `X` and `y` are validated.
validate_data
python
scikit-learn/scikit-learn
sklearn/utils/validation.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/validation.py
BSD-3-Clause
def _init_arpack_v0(size, random_state): """Initialize the starting vector for iteration in ARPACK functions. Initialize a ndarray with values sampled from the uniform distribution on [-1, 1]. This initialization model has been chosen to be consistent with the ARPACK one as another initialization can lead to convergence issues. Parameters ---------- size : int The size of the eigenvalue vector to be initialized. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator used to generate a uniform distribution. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- v0 : ndarray of shape (size,) The initialized vector. """ random_state = check_random_state(random_state) v0 = random_state.uniform(-1, 1, size) return v0
Initialize the starting vector for iteration in ARPACK functions. Initialize a ndarray with values sampled from the uniform distribution on [-1, 1]. This initialization model has been chosen to be consistent with the ARPACK one as another initialization can lead to convergence issues. Parameters ---------- size : int The size of the eigenvalue vector to be initialized. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator used to generate a uniform distribution. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- v0 : ndarray of shape (size,) The initialized vector.
_init_arpack_v0
python
scikit-learn/scikit-learn
sklearn/utils/_arpack.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_arpack.py
BSD-3-Clause
def yield_namespaces(include_numpy_namespaces=True): """Yield supported namespace. This is meant to be used for testing purposes only. Parameters ---------- include_numpy_namespaces : bool, default=True If True, also yield numpy namespaces. Returns ------- array_namespace : str The name of the Array API namespace. """ for array_namespace in [ # The following is used to test the array_api_compat wrapper when # array_api_dispatch is enabled: in particular, the arrays used in the # tests are regular numpy arrays without any "device" attribute. "numpy", # Stricter NumPy-based Array API implementation. The # array_api_strict.Array instances always have a dummy "device" attribute. "array_api_strict", "cupy", "torch", ]: if not include_numpy_namespaces and array_namespace in _NUMPY_NAMESPACE_NAMES: continue yield array_namespace
Yield supported namespace. This is meant to be used for testing purposes only. Parameters ---------- include_numpy_namespaces : bool, default=True If True, also yield numpy namespaces. Returns ------- array_namespace : str The name of the Array API namespace.
yield_namespaces
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def yield_namespace_device_dtype_combinations(include_numpy_namespaces=True): """Yield supported namespace, device, dtype tuples for testing. Use this to test that an estimator works with all combinations. Use in conjunction with `ids=_get_namespace_device_dtype_ids` to give clearer pytest parametrization ID names. Parameters ---------- include_numpy_namespaces : bool, default=True If True, also yield numpy namespaces. Returns ------- array_namespace : str The name of the Array API namespace. device : str The name of the device on which to allocate the arrays. Can be None to indicate that the default value should be used. dtype_name : str The name of the data type to use for arrays. Can be None to indicate that the default value should be used. """ for array_namespace in yield_namespaces( include_numpy_namespaces=include_numpy_namespaces ): if array_namespace == "torch": for device, dtype in itertools.product( ("cpu", "cuda"), ("float64", "float32") ): yield array_namespace, device, dtype yield array_namespace, "mps", "float32" elif array_namespace == "array_api_strict": try: import array_api_strict yield array_namespace, array_api_strict.Device("CPU_DEVICE"), "float64" yield array_namespace, array_api_strict.Device("device1"), "float32" except ImportError: # Those combinations will typically be skipped by pytest if # array_api_strict is not installed but we still need to see them in # the test output. yield array_namespace, "CPU_DEVICE", "float64" yield array_namespace, "device1", "float32" else: yield array_namespace, None, None
Yield supported namespace, device, dtype tuples for testing. Use this to test that an estimator works with all combinations. Use in conjunction with `ids=_get_namespace_device_dtype_ids` to give clearer pytest parametrization ID names. Parameters ---------- include_numpy_namespaces : bool, default=True If True, also yield numpy namespaces. Returns ------- array_namespace : str The name of the Array API namespace. device : str The name of the device on which to allocate the arrays. Can be None to indicate that the default value should be used. dtype_name : str The name of the data type to use for arrays. Can be None to indicate that the default value should be used.
yield_namespace_device_dtype_combinations
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _check_array_api_dispatch(array_api_dispatch): """Check that array_api_compat is installed and NumPy version is compatible. array_api_compat follows NEP29, which has a higher minimum NumPy version than scikit-learn. """ if not array_api_dispatch: return scipy_version = parse_version(scipy.__version__) min_scipy_version = "1.14.0" if scipy_version < parse_version(min_scipy_version): raise ImportError( f"SciPy must be {min_scipy_version} or newer" " (found {scipy.__version__}) to dispatch array using" " the array API specification" ) if os.environ.get("SCIPY_ARRAY_API") != "1": raise RuntimeError( "Scikit-learn array API support was enabled but scipy's own support is " "not enabled. Please set the SCIPY_ARRAY_API=1 environment variable " "before importing sklearn or scipy. More details at: " "https://docs.scipy.org/doc/scipy/dev/api-dev/array_api.html" )
Check that array_api_compat is installed and NumPy version is compatible. array_api_compat follows NEP29, which has a higher minimum NumPy version than scikit-learn.
_check_array_api_dispatch
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _single_array_device(array): """Hardware device where the array data resides on.""" if ( isinstance(array, (numpy.ndarray, numpy.generic)) or not hasattr(array, "device") # When array API dispatch is disabled, we expect the scikit-learn code # to use np.asarray so that the resulting NumPy array will implicitly use the # CPU. In this case, scikit-learn should stay as device neutral as possible, # hence the use of `device=None` which is accepted by all libraries, before # and after the expected conversion to NumPy via np.asarray. or not get_config()["array_api_dispatch"] ): return None else: return array.device
Hardware device where the array data resides on.
_single_array_device
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def device(*array_list, remove_none=True, remove_types=(str,)): """Hardware device where the array data resides on. If the hardware device is not the same for all arrays, an error is raised. Parameters ---------- *array_list : arrays List of array instances from NumPy or an array API compatible library. remove_none : bool, default=True Whether to ignore None objects passed in array_list. remove_types : tuple or list, default=(str,) Types to ignore in array_list. Returns ------- out : device `device` object (see the "Device Support" section of the array API spec). """ array_list = _remove_non_arrays( *array_list, remove_none=remove_none, remove_types=remove_types ) if not array_list: return None device_ = _single_array_device(array_list[0]) # Note: here we cannot simply use a Python `set` as it requires # hashable members which is not guaranteed for Array API device # objects. In particular, CuPy devices are not hashable at the # time of writing. for array in array_list[1:]: device_other = _single_array_device(array) if device_ != device_other: raise ValueError( f"Input arrays use different devices: {device_}, {device_other}" ) return device_
Hardware device where the array data resides on. If the hardware device is not the same for all arrays, an error is raised. Parameters ---------- *array_list : arrays List of array instances from NumPy or an array API compatible library. remove_none : bool, default=True Whether to ignore None objects passed in array_list. remove_types : tuple or list, default=(str,) Types to ignore in array_list. Returns ------- out : device `device` object (see the "Device Support" section of the array API spec).
device
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def isdtype(dtype, kind, *, xp): """Returns a boolean indicating whether a provided dtype is of type "kind". Included in the v2022.12 of the Array API spec. https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html """ if isinstance(kind, tuple): return any(_isdtype_single(dtype, k, xp=xp) for k in kind) else: return _isdtype_single(dtype, kind, xp=xp)
Returns a boolean indicating whether a provided dtype is of type "kind". Included in the v2022.12 of the Array API spec. https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
isdtype
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def supported_float_dtypes(xp): """Supported floating point types for the namespace. Note: float16 is not officially part of the Array API spec at the time of writing but scikit-learn estimators and functions can choose to accept it when xp.float16 is defined. https://data-apis.org/array-api/latest/API_specification/data_types.html """ if hasattr(xp, "float16"): return (xp.float64, xp.float32, xp.float16) else: return (xp.float64, xp.float32)
Supported floating point types for the namespace. Note: float16 is not officially part of the Array API spec at the time of writing but scikit-learn estimators and functions can choose to accept it when xp.float16 is defined. https://data-apis.org/array-api/latest/API_specification/data_types.html
supported_float_dtypes
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def ensure_common_namespace_device(reference, *arrays): """Ensure that all arrays use the same namespace and device as reference. If necessary the arrays are moved to the same namespace and device as the reference array. Parameters ---------- reference : array Reference array. *arrays : array Arrays to check. Returns ------- arrays : list Arrays with the same namespace and device as reference. """ xp, is_array_api = get_namespace(reference) if is_array_api: device_ = device(reference) # Move arrays to the same namespace and device as the reference array. return [xp.asarray(a, device=device_) for a in arrays] else: return arrays
Ensure that all arrays use the same namespace and device as reference. If necessary the arrays are moved to the same namespace and device as the reference array. Parameters ---------- reference : array Reference array. *arrays : array Arrays to check. Returns ------- arrays : list Arrays with the same namespace and device as reference.
ensure_common_namespace_device
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _remove_non_arrays(*arrays, remove_none=True, remove_types=(str,)): """Filter arrays to exclude None and/or specific types. Sparse arrays are always filtered out. Parameters ---------- *arrays : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. Returns ------- filtered_arrays : list List of arrays filtered as requested. An empty list is returned if no input passes the filters. """ filtered_arrays = [] remove_types = tuple(remove_types) for array in arrays: if remove_none and array is None: continue if isinstance(array, remove_types): continue if sp.issparse(array): continue filtered_arrays.append(array) return filtered_arrays
Filter arrays to exclude None and/or specific types. Sparse arrays are always filtered out. Parameters ---------- *arrays : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. Returns ------- filtered_arrays : list List of arrays filtered as requested. An empty list is returned if no input passes the filters.
_remove_non_arrays
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def get_namespace(*arrays, remove_none=True, remove_types=(str,), xp=None): """Get namespace of arrays. Introspect `arrays` arguments and return their common Array API compatible namespace object, if any. Note that sparse arrays are filtered by default. See: https://numpy.org/neps/nep-0047-array-api-standard.html If `arrays` are regular numpy arrays, `array_api_compat.numpy` is returned instead. Namespace support is not enabled by default. To enabled it call: sklearn.set_config(array_api_dispatch=True) or: with sklearn.config_context(array_api_dispatch=True): # your code here Otherwise `array_api_compat.numpy` is always returned irrespective of the fact that arrays implement the `__array_namespace__` protocol or not. Note that if no arrays pass the set filters, ``_NUMPY_API_WRAPPER_INSTANCE, False`` is returned. Parameters ---------- *arrays : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. xp : module, default=None Precomputed array namespace module. When passed, typically from a caller that has already performed inspection of its own inputs, skips array namespace inspection. Returns ------- namespace : module Namespace shared by array objects. If any of the `arrays` are not arrays, the namespace defaults to the NumPy namespace. is_array_api_compliant : bool True if the arrays are containers that implement the array API spec (see https://data-apis.org/array-api/latest/index.html). Always False when array_api_dispatch=False. """ array_api_dispatch = get_config()["array_api_dispatch"] if not array_api_dispatch: if xp is not None: return xp, False else: return np_compat, False if xp is not None: return xp, True arrays = _remove_non_arrays( *arrays, remove_none=remove_none, remove_types=remove_types, ) if not arrays: return np_compat, False _check_array_api_dispatch(array_api_dispatch) namespace, is_array_api_compliant = array_api_compat.get_namespace(*arrays), True if namespace.__name__ == "array_api_strict" and hasattr( namespace, "set_array_api_strict_flags" ): namespace.set_array_api_strict_flags(api_version="2024.12") return namespace, is_array_api_compliant
Get namespace of arrays. Introspect `arrays` arguments and return their common Array API compatible namespace object, if any. Note that sparse arrays are filtered by default. See: https://numpy.org/neps/nep-0047-array-api-standard.html If `arrays` are regular numpy arrays, `array_api_compat.numpy` is returned instead. Namespace support is not enabled by default. To enabled it call: sklearn.set_config(array_api_dispatch=True) or: with sklearn.config_context(array_api_dispatch=True): # your code here Otherwise `array_api_compat.numpy` is always returned irrespective of the fact that arrays implement the `__array_namespace__` protocol or not. Note that if no arrays pass the set filters, ``_NUMPY_API_WRAPPER_INSTANCE, False`` is returned. Parameters ---------- *arrays : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. xp : module, default=None Precomputed array namespace module. When passed, typically from a caller that has already performed inspection of its own inputs, skips array namespace inspection. Returns ------- namespace : module Namespace shared by array objects. If any of the `arrays` are not arrays, the namespace defaults to the NumPy namespace. is_array_api_compliant : bool True if the arrays are containers that implement the array API spec (see https://data-apis.org/array-api/latest/index.html). Always False when array_api_dispatch=False.
get_namespace
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def get_namespace_and_device( *array_list, remove_none=True, remove_types=(str,), xp=None ): """Combination into one single function of `get_namespace` and `device`. Parameters ---------- *array_list : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. xp : module, default=None Precomputed array namespace module. When passed, typically from a caller that has already performed inspection of its own inputs, skips array namespace inspection. Returns ------- namespace : module Namespace shared by array objects. If any of the `arrays` are not arrays, the namespace defaults to NumPy. is_array_api_compliant : bool True if the arrays are containers that implement the Array API spec. Always False when array_api_dispatch=False. device : device `device` object (see the "Device Support" section of the array API spec). """ skip_remove_kwargs = dict(remove_none=False, remove_types=[]) array_list = _remove_non_arrays( *array_list, remove_none=remove_none, remove_types=remove_types, ) arrays_device = device(*array_list, **skip_remove_kwargs) if xp is None: xp, is_array_api = get_namespace(*array_list, **skip_remove_kwargs) else: xp, is_array_api = xp, True if is_array_api: return xp, is_array_api, arrays_device else: return xp, False, arrays_device
Combination into one single function of `get_namespace` and `device`. Parameters ---------- *array_list : array objects Array objects. remove_none : bool, default=True Whether to ignore None objects passed in arrays. remove_types : tuple or list, default=(str,) Types to ignore in the arrays. xp : module, default=None Precomputed array namespace module. When passed, typically from a caller that has already performed inspection of its own inputs, skips array namespace inspection. Returns ------- namespace : module Namespace shared by array objects. If any of the `arrays` are not arrays, the namespace defaults to NumPy. is_array_api_compliant : bool True if the arrays are containers that implement the Array API spec. Always False when array_api_dispatch=False. device : device `device` object (see the "Device Support" section of the array API spec).
get_namespace_and_device
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _fill_or_add_to_diagonal(array, value, xp, add_value=True, wrap=False): """Implementation to facilitate adding or assigning specified values to the diagonal of a 2-d array. If ``add_value`` is `True` then the values will be added to the diagonal elements otherwise the values will be assigned to the diagonal elements. By default, ``add_value`` is set to `True. This is currently only supported for 2-d arrays. The implementation is taken from the `numpy.fill_diagonal` function: https://github.com/numpy/numpy/blob/v2.0.0/numpy/lib/_index_tricks_impl.py#L799-L929 """ if array.ndim != 2: raise ValueError( f"array should be 2-d. Got array with shape {tuple(array.shape)}" ) value = xp.asarray(value, dtype=array.dtype, device=device(array)) end = None # Explicit, fast formula for the common case. For 2-d arrays, we # accept rectangular ones. step = array.shape[1] + 1 if not wrap: end = array.shape[1] * array.shape[1] array_flat = xp.reshape(array, (-1,)) if add_value: array_flat[:end:step] += value else: array_flat[:end:step] = value
Implementation to facilitate adding or assigning specified values to the diagonal of a 2-d array. If ``add_value`` is `True` then the values will be added to the diagonal elements otherwise the values will be assigned to the diagonal elements. By default, ``add_value`` is set to `True. This is currently only supported for 2-d arrays. The implementation is taken from the `numpy.fill_diagonal` function: https://github.com/numpy/numpy/blob/v2.0.0/numpy/lib/_index_tricks_impl.py#L799-L929
_fill_or_add_to_diagonal
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _max_precision_float_dtype(xp, device): """Return the float dtype with the highest precision supported by the device.""" # TODO: Update to use `__array_namespace__info__()` from array-api v2023.12 # when/if that becomes more widespread. if _is_xp_namespace(xp, "torch") and str(device).startswith( "mps" ): # pragma: no cover return xp.float32 return xp.float64
Return the float dtype with the highest precision supported by the device.
_max_precision_float_dtype
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _find_matching_floating_dtype(*arrays, xp): """Find a suitable floating point dtype when computing with arrays. If any of the arrays are floating point, return the dtype with the highest precision by following official type promotion rules: https://data-apis.org/array-api/latest/API_specification/type_promotion.html If there are no floating point input arrays (all integral inputs for instance), return the default floating point dtype for the namespace. """ dtyped_arrays = [xp.asarray(a) for a in arrays if hasattr(a, "dtype")] floating_dtypes = [ a.dtype for a in dtyped_arrays if xp.isdtype(a.dtype, "real floating") ] if floating_dtypes: # Return the floating dtype with the highest precision: return xp.result_type(*floating_dtypes) # If none of the input arrays have a floating point dtype, they must be all # integer arrays or containers of Python scalars: return the default # floating point dtype for the namespace (implementation specific). return xp.asarray(0.0).dtype
Find a suitable floating point dtype when computing with arrays. If any of the arrays are floating point, return the dtype with the highest precision by following official type promotion rules: https://data-apis.org/array-api/latest/API_specification/type_promotion.html If there are no floating point input arrays (all integral inputs for instance), return the default floating point dtype for the namespace.
_find_matching_floating_dtype
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _average(a, axis=None, weights=None, normalize=True, xp=None): """Partial port of np.average to support the Array API. It does a best effort at mimicking the return dtype rule described at https://numpy.org/doc/stable/reference/generated/numpy.average.html but only for the common cases needed in scikit-learn. """ xp, _, device_ = get_namespace_and_device(a, weights) if _is_numpy_namespace(xp): if normalize: return xp.asarray(numpy.average(a, axis=axis, weights=weights)) elif axis is None and weights is not None: return xp.asarray(numpy.dot(a, weights)) a = xp.asarray(a, device=device_) if weights is not None: weights = xp.asarray(weights, device=device_) if weights is not None and a.shape != weights.shape: if axis is None: raise TypeError( f"Axis must be specified when the shape of a {tuple(a.shape)} and " f"weights {tuple(weights.shape)} differ." ) if tuple(weights.shape) != (a.shape[axis],): raise ValueError( f"Shape of weights weights.shape={tuple(weights.shape)} must be " f"consistent with a.shape={tuple(a.shape)} and {axis=}." ) # If weights are 1D, add singleton dimensions for broadcasting shape = [1] * a.ndim shape[axis] = a.shape[axis] weights = xp.reshape(weights, shape) if xp.isdtype(a.dtype, "complex floating"): raise NotImplementedError( "Complex floating point values are not supported by average." ) if weights is not None and xp.isdtype(weights.dtype, "complex floating"): raise NotImplementedError( "Complex floating point values are not supported by average." ) output_dtype = _find_matching_floating_dtype(a, weights, xp=xp) a = xp.astype(a, output_dtype) if weights is None: return (xp.mean if normalize else xp.sum)(a, axis=axis) weights = xp.astype(weights, output_dtype) sum_ = xp.sum(xp.multiply(a, weights), axis=axis) if not normalize: return sum_ scale = xp.sum(weights, axis=axis) if xp.any(scale == 0.0): raise ZeroDivisionError("Weights sum to zero, can't be normalized") return sum_ / scale
Partial port of np.average to support the Array API. It does a best effort at mimicking the return dtype rule described at https://numpy.org/doc/stable/reference/generated/numpy.average.html but only for the common cases needed in scikit-learn.
_average
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _asarray_with_order( array, dtype=None, order=None, copy=None, *, xp=None, device=None ): """Helper to support the order kwarg only for NumPy-backed arrays Memory layout parameter `order` is not exposed in the Array API standard, however some input validation code in scikit-learn needs to work both for classes and functions that will leverage Array API only operations and for code that inherently relies on NumPy backed data containers with specific memory layout constraints (e.g. our own Cython code). The purpose of this helper is to make it possible to share code for data container validation without memory copies for both downstream use cases: the `order` parameter is only enforced if the input array implementation is NumPy based, otherwise `order` is just silently ignored. """ xp, _ = get_namespace(array, xp=xp) if _is_numpy_namespace(xp): # Use NumPy API to support order if copy is True: array = numpy.array(array, order=order, dtype=dtype) else: array = numpy.asarray(array, order=order, dtype=dtype) # At this point array is a NumPy ndarray. We convert it to an array # container that is consistent with the input's namespace. return xp.asarray(array) else: return xp.asarray(array, dtype=dtype, copy=copy, device=device)
Helper to support the order kwarg only for NumPy-backed arrays Memory layout parameter `order` is not exposed in the Array API standard, however some input validation code in scikit-learn needs to work both for classes and functions that will leverage Array API only operations and for code that inherently relies on NumPy backed data containers with specific memory layout constraints (e.g. our own Cython code). The purpose of this helper is to make it possible to share code for data container validation without memory copies for both downstream use cases: the `order` parameter is only enforced if the input array implementation is NumPy based, otherwise `order` is just silently ignored.
_asarray_with_order
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _ravel(array, xp=None): """Array API compliant version of np.ravel. For non numpy namespaces, it just returns a flattened array, that might be or not be a copy. """ xp, _ = get_namespace(array, xp=xp) if _is_numpy_namespace(xp): array = numpy.asarray(array) return xp.asarray(numpy.ravel(array, order="C")) return xp.reshape(array, shape=(-1,))
Array API compliant version of np.ravel. For non numpy namespaces, it just returns a flattened array, that might be or not be a copy.
_ravel
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _convert_to_numpy(array, xp): """Convert X into a NumPy ndarray on the CPU.""" if _is_xp_namespace(xp, "torch"): return array.cpu().numpy() elif _is_xp_namespace(xp, "cupy"): # pragma: nocover return array.get() elif _is_xp_namespace(xp, "array_api_strict"): return numpy.asarray(xp.asarray(array, device=xp.Device("CPU_DEVICE"))) return numpy.asarray(array)
Convert X into a NumPy ndarray on the CPU.
_convert_to_numpy
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _estimator_with_converted_arrays(estimator, converter): """Create new estimator which converting all attributes that are arrays. The converter is called on all NumPy arrays and arrays that support the `DLPack interface <https://dmlc.github.io/dlpack/latest/>`__. Parameters ---------- estimator : Estimator Estimator to convert converter : callable Callable that takes an array attribute and returns the converted array. Returns ------- new_estimator : Estimator Convert estimator """ from sklearn.base import clone new_estimator = clone(estimator) for key, attribute in vars(estimator).items(): if hasattr(attribute, "__dlpack__") or isinstance(attribute, numpy.ndarray): attribute = converter(attribute) setattr(new_estimator, key, attribute) return new_estimator
Create new estimator which converting all attributes that are arrays. The converter is called on all NumPy arrays and arrays that support the `DLPack interface <https://dmlc.github.io/dlpack/latest/>`__. Parameters ---------- estimator : Estimator Estimator to convert converter : callable Callable that takes an array attribute and returns the converted array. Returns ------- new_estimator : Estimator Convert estimator
_estimator_with_converted_arrays
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _atol_for_type(dtype_or_dtype_name): """Return the absolute tolerance for a given numpy dtype.""" if dtype_or_dtype_name is None: # If no dtype is specified when running tests for a given namespace, we # expect the same floating precision level as NumPy's default floating # point dtype. dtype_or_dtype_name = numpy.float64 return numpy.finfo(dtype_or_dtype_name).eps * 100
Return the absolute tolerance for a given numpy dtype.
_atol_for_type
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def indexing_dtype(xp): """Return a platform-specific integer dtype suitable for indexing. On 32-bit platforms, this will typically return int32 and int64 otherwise. Note: using dtype is recommended for indexing transient array datastructures. For long-lived arrays, such as the fitted attributes of estimators, it is instead recommended to use platform-independent int32 if we do not expect to index more 2B elements. Using fixed dtypes simplifies the handling of serialized models, e.g. to deploy a model fit on a 64-bit platform to a target 32-bit platform such as WASM/pyodide. """ # Currently this is implemented with simple hack that assumes that # following "may be" statements in the Array API spec always hold: # > The default integer data type should be the same across platforms, but # > the default may vary depending on whether Python is 32-bit or 64-bit. # > The default array index data type may be int32 on 32-bit platforms, but # > the default should be int64 otherwise. # https://data-apis.org/array-api/latest/API_specification/data_types.html#default-data-types # TODO: once sufficiently adopted, we might want to instead rely on the # newer inspection API: https://github.com/data-apis/array-api/issues/640 return xp.asarray(0).dtype
Return a platform-specific integer dtype suitable for indexing. On 32-bit platforms, this will typically return int32 and int64 otherwise. Note: using dtype is recommended for indexing transient array datastructures. For long-lived arrays, such as the fitted attributes of estimators, it is instead recommended to use platform-independent int32 if we do not expect to index more 2B elements. Using fixed dtypes simplifies the handling of serialized models, e.g. to deploy a model fit on a 64-bit platform to a target 32-bit platform such as WASM/pyodide.
indexing_dtype
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _isin(element, test_elements, xp, assume_unique=False, invert=False): """Calculates ``element in test_elements``, broadcasting over `element` only. Returns a boolean array of the same shape as `element` that is True where an element of `element` is in `test_elements` and False otherwise. """ if _is_numpy_namespace(xp): return xp.asarray( numpy.isin( element=element, test_elements=test_elements, assume_unique=assume_unique, invert=invert, ) ) original_element_shape = element.shape element = xp.reshape(element, (-1,)) test_elements = xp.reshape(test_elements, (-1,)) return xp.reshape( _in1d( ar1=element, ar2=test_elements, xp=xp, assume_unique=assume_unique, invert=invert, ), original_element_shape, )
Calculates ``element in test_elements``, broadcasting over `element` only. Returns a boolean array of the same shape as `element` that is True where an element of `element` is in `test_elements` and False otherwise.
_isin
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _in1d(ar1, ar2, xp, assume_unique=False, invert=False): """Checks whether each element of an array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. This function has been adapted using the original implementation present in numpy: https://github.com/numpy/numpy/blob/v1.26.0/numpy/lib/arraysetops.py#L524-L758 """ xp, _ = get_namespace(ar1, ar2, xp=xp) # This code is run to make the code significantly faster if ar2.shape[0] < 10 * ar1.shape[0] ** 0.145: if invert: mask = xp.ones(ar1.shape[0], dtype=xp.bool, device=device(ar1)) for a in ar2: mask &= ar1 != a else: mask = xp.zeros(ar1.shape[0], dtype=xp.bool, device=device(ar1)) for a in ar2: mask |= ar1 == a return mask if not assume_unique: ar1, rev_idx = xp.unique_inverse(ar1) ar2 = xp.unique_values(ar2) ar = xp.concat((ar1, ar2)) device_ = device(ar) # We need this to be a stable sort. order = xp.argsort(ar, stable=True) reverse_order = xp.argsort(order, stable=True) sar = xp.take(ar, order, axis=0) if size(sar) >= 1: bool_ar = sar[1:] != sar[:-1] if invert else sar[1:] == sar[:-1] else: # indexing undefined in standard when sar is empty bool_ar = xp.asarray([False]) if invert else xp.asarray([True]) flag = xp.concat((bool_ar, xp.asarray([invert], device=device_))) ret = xp.take(flag, reverse_order, axis=0) if assume_unique: return ret[: ar1.shape[0]] else: return xp.take(ret, rev_idx, axis=0)
Checks whether each element of an array is also present in a second array. Returns a boolean array the same length as `ar1` that is True where an element of `ar1` is in `ar2` and False otherwise. This function has been adapted using the original implementation present in numpy: https://github.com/numpy/numpy/blob/v1.26.0/numpy/lib/arraysetops.py#L524-L758
_in1d
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _count_nonzero(X, axis=None, sample_weight=None, xp=None, device=None): """A variant of `sklearn.utils.sparsefuncs.count_nonzero` for the Array API. If the array `X` is sparse, and we are using the numpy namespace then we simply call the original function. This function only supports 2D arrays. """ from .sparsefuncs import count_nonzero xp, _ = get_namespace(X, sample_weight, xp=xp) if _is_numpy_namespace(xp) and sp.issparse(X): return count_nonzero(X, axis=axis, sample_weight=sample_weight) assert X.ndim == 2 weights = xp.ones_like(X, device=device) if sample_weight is not None: sample_weight = xp.asarray(sample_weight, device=device) sample_weight = xp.reshape(sample_weight, (sample_weight.shape[0], 1)) weights = xp.astype(weights, sample_weight.dtype) * sample_weight zero_scalar = xp.asarray(0, device=device, dtype=weights.dtype) return xp.sum(xp.where(X != 0, weights, zero_scalar), axis=axis)
A variant of `sklearn.utils.sparsefuncs.count_nonzero` for the Array API. If the array `X` is sparse, and we are using the numpy namespace then we simply call the original function. This function only supports 2D arrays.
_count_nonzero
python
scikit-learn/scikit-learn
sklearn/utils/_array_api.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_array_api.py
BSD-3-Clause
def _set_deprecated(self, value, *, new_key, deprecated_key, warning_message): """Set key in dictionary to be deprecated with its warning message.""" self.__dict__["_deprecated_key_to_warnings"][deprecated_key] = warning_message self[new_key] = self[deprecated_key] = value
Set key in dictionary to be deprecated with its warning message.
_set_deprecated
python
scikit-learn/scikit-learn
sklearn/utils/_bunch.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_bunch.py
BSD-3-Clause
def chunk_generator(gen, chunksize): """Chunk generator, ``gen`` into lists of length ``chunksize``. The last chunk may have a length less than ``chunksize``.""" while True: chunk = list(islice(gen, chunksize)) if chunk: yield chunk else: return
Chunk generator, ``gen`` into lists of length ``chunksize``. The last chunk may have a length less than ``chunksize``.
chunk_generator
python
scikit-learn/scikit-learn
sklearn/utils/_chunking.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_chunking.py
BSD-3-Clause
def gen_batches(n, batch_size, *, min_batch_size=0): """Generator to create slices containing `batch_size` elements from 0 to `n`. The last slice may contain less than `batch_size` elements, when `batch_size` does not divide `n`. Parameters ---------- n : int Size of the sequence. batch_size : int Number of elements in each batch. min_batch_size : int, default=0 Minimum number of elements in each batch. Yields ------ slice of `batch_size` elements See Also -------- gen_even_slices: Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] >>> list(gen_batches(7, 3, min_batch_size=0)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(7, 3, min_batch_size=2)) [slice(0, 3, None), slice(3, 7, None)] """ start = 0 for _ in range(int(n // batch_size)): end = start + batch_size if end + min_batch_size > n: continue yield slice(start, end) start = end if start < n: yield slice(start, n)
Generator to create slices containing `batch_size` elements from 0 to `n`. The last slice may contain less than `batch_size` elements, when `batch_size` does not divide `n`. Parameters ---------- n : int Size of the sequence. batch_size : int Number of elements in each batch. min_batch_size : int, default=0 Minimum number of elements in each batch. Yields ------ slice of `batch_size` elements See Also -------- gen_even_slices: Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_batches >>> list(gen_batches(7, 3)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(6, 3)) [slice(0, 3, None), slice(3, 6, None)] >>> list(gen_batches(2, 3)) [slice(0, 2, None)] >>> list(gen_batches(7, 3, min_batch_size=0)) [slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)] >>> list(gen_batches(7, 3, min_batch_size=2)) [slice(0, 3, None), slice(3, 7, None)]
gen_batches
python
scikit-learn/scikit-learn
sklearn/utils/_chunking.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_chunking.py
BSD-3-Clause
def gen_even_slices(n, n_packs, *, n_samples=None): """Generator to create `n_packs` evenly spaced slices going up to `n`. If `n_packs` does not divide `n`, except for the first `n % n_packs` slices, remaining slices may contain fewer elements. Parameters ---------- n : int Size of the sequence. n_packs : int Number of slices to generate. n_samples : int, default=None Number of samples. Pass `n_samples` when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Yields ------ `slice` representing a set of indices from 0 to n. See Also -------- gen_batches: Generator to create slices containing batch_size elements from 0 to n. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n if n_samples is not None: end = min(n_samples, end) yield slice(start, end, None) start = end
Generator to create `n_packs` evenly spaced slices going up to `n`. If `n_packs` does not divide `n`, except for the first `n % n_packs` slices, remaining slices may contain fewer elements. Parameters ---------- n : int Size of the sequence. n_packs : int Number of slices to generate. n_samples : int, default=None Number of samples. Pass `n_samples` when the slices are to be used for sparse matrix indexing; slicing off-the-end raises an exception, while it works for NumPy arrays. Yields ------ `slice` representing a set of indices from 0 to n. See Also -------- gen_batches: Generator to create slices containing batch_size elements from 0 to n. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
gen_even_slices
python
scikit-learn/scikit-learn
sklearn/utils/_chunking.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_chunking.py
BSD-3-Clause
def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None): """Calculate how many rows can be processed within `working_memory`. Parameters ---------- row_bytes : int The expected number of bytes of memory that will be consumed during the processing of each row. max_n_rows : int, default=None The maximum return value. working_memory : int or float, default=None The number of rows to fit inside this number of MiB will be returned. When None (default), the value of ``sklearn.get_config()['working_memory']`` is used. Returns ------- int The number of rows which can be processed within `working_memory`. Warns ----- Issues a UserWarning if `row_bytes exceeds `working_memory` MiB. """ if working_memory is None: working_memory = get_config()["working_memory"] chunk_n_rows = int(working_memory * (2**20) // row_bytes) if max_n_rows is not None: chunk_n_rows = min(chunk_n_rows, max_n_rows) if chunk_n_rows < 1: warnings.warn( "Could not adhere to working_memory config. " "Currently %.0fMiB, %.0fMiB required." % (working_memory, np.ceil(row_bytes * 2**-20)) ) chunk_n_rows = 1 return chunk_n_rows
Calculate how many rows can be processed within `working_memory`. Parameters ---------- row_bytes : int The expected number of bytes of memory that will be consumed during the processing of each row. max_n_rows : int, default=None The maximum return value. working_memory : int or float, default=None The number of rows to fit inside this number of MiB will be returned. When None (default), the value of ``sklearn.get_config()['working_memory']`` is used. Returns ------- int The number of rows which can be processed within `working_memory`. Warns ----- Issues a UserWarning if `row_bytes exceeds `working_memory` MiB.
get_chunk_n_rows
python
scikit-learn/scikit-learn
sklearn/utils/_chunking.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_chunking.py
BSD-3-Clause
def _unique(values, *, return_inverse=False, return_counts=False): """Helper function to find unique values with support for python objects. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- values : ndarray Values to check for unknowns. return_inverse : bool, default=False If True, also return the indices of the unique values. return_counts : bool, default=False If True, also return the number of times each unique item appears in values. Returns ------- unique : ndarray The sorted unique values. unique_inverse : ndarray The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. unique_counts : ndarray The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. """ if values.dtype == object: return _unique_python( values, return_inverse=return_inverse, return_counts=return_counts ) # numerical return _unique_np( values, return_inverse=return_inverse, return_counts=return_counts )
Helper function to find unique values with support for python objects. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- values : ndarray Values to check for unknowns. return_inverse : bool, default=False If True, also return the indices of the unique values. return_counts : bool, default=False If True, also return the number of times each unique item appears in values. Returns ------- unique : ndarray The sorted unique values. unique_inverse : ndarray The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. unique_counts : ndarray The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True.
_unique
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _unique_np(values, return_inverse=False, return_counts=False): """Helper function to find unique values for numpy arrays that correctly accounts for nans. See `_unique` documentation for details.""" xp, _ = get_namespace(values) inverse, counts = None, None if return_inverse and return_counts: uniques, _, inverse, counts = xp.unique_all(values) elif return_inverse: uniques, inverse = xp.unique_inverse(values) elif return_counts: uniques, counts = xp.unique_counts(values) else: uniques = xp.unique_values(values) # np.unique will have duplicate missing values at the end of `uniques` # here we clip the nans and remove it from uniques if uniques.size and is_scalar_nan(uniques[-1]): nan_idx = _searchsorted(uniques, xp.nan, xp=xp) uniques = uniques[: nan_idx + 1] if return_inverse: inverse[inverse > nan_idx] = nan_idx if return_counts: counts[nan_idx] = xp.sum(counts[nan_idx:]) counts = counts[: nan_idx + 1] ret = (uniques,) if return_inverse: ret += (inverse,) if return_counts: ret += (counts,) return ret[0] if len(ret) == 1 else ret
Helper function to find unique values for numpy arrays that correctly accounts for nans. See `_unique` documentation for details.
_unique_np
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def to_list(self): """Convert tuple to a list where None is always first.""" output = [] if self.none: output.append(None) if self.nan: output.append(np.nan) return output
Convert tuple to a list where None is always first.
to_list
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _extract_missing(values): """Extract missing values from `values`. Parameters ---------- values: set Set of values to extract missing from. Returns ------- output: set Set with missing values extracted. missing_values: MissingValues Object with missing value information. """ missing_values_set = { value for value in values if value is None or is_scalar_nan(value) } if not missing_values_set: return values, MissingValues(nan=False, none=False) if None in missing_values_set: if len(missing_values_set) == 1: output_missing_values = MissingValues(nan=False, none=True) else: # If there is more than one missing value, then it has to be # float('nan') or np.nan output_missing_values = MissingValues(nan=True, none=True) else: output_missing_values = MissingValues(nan=True, none=False) # create set without the missing values output = values - missing_values_set return output, output_missing_values
Extract missing values from `values`. Parameters ---------- values: set Set of values to extract missing from. Returns ------- output: set Set with missing values extracted. missing_values: MissingValues Object with missing value information.
_extract_missing
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _map_to_integer(values, uniques): """Map values based on its position in uniques.""" xp, _ = get_namespace(values, uniques) table = _nandict({val: i for i, val in enumerate(uniques)}) return xp.asarray([table[v] for v in values], device=device(values))
Map values based on its position in uniques.
_map_to_integer
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _check_unknown(values, known_values, return_mask=False): """ Helper function to check for unknowns in values to be encoded. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- values : array Values to check for unknowns. known_values : array Known values. Must be unique. return_mask : bool, default=False If True, return a mask of the same shape as `values` indicating the valid values. Returns ------- diff : list The unique values present in `values` and not in `know_values`. valid_mask : boolean array Additionally returned if ``return_mask=True``. """ xp, _ = get_namespace(values, known_values) valid_mask = None if not xp.isdtype(values.dtype, "numeric"): values_set = set(values) values_set, missing_in_values = _extract_missing(values_set) uniques_set = set(known_values) uniques_set, missing_in_uniques = _extract_missing(uniques_set) diff = values_set - uniques_set nan_in_diff = missing_in_values.nan and not missing_in_uniques.nan none_in_diff = missing_in_values.none and not missing_in_uniques.none def is_valid(value): return ( value in uniques_set or (missing_in_uniques.none and value is None) or (missing_in_uniques.nan and is_scalar_nan(value)) ) if return_mask: if diff or nan_in_diff or none_in_diff: valid_mask = xp.array([is_valid(value) for value in values]) else: valid_mask = xp.ones(len(values), dtype=xp.bool) diff = list(diff) if none_in_diff: diff.append(None) if nan_in_diff: diff.append(np.nan) else: unique_values = xp.unique_values(values) diff = xpx.setdiff1d(unique_values, known_values, assume_unique=True, xp=xp) if return_mask: if diff.size: valid_mask = _isin(values, known_values, xp) else: valid_mask = xp.ones(len(values), dtype=xp.bool) # check for nans in the known_values if xp.any(xp.isnan(known_values)): diff_is_nan = xp.isnan(diff) if xp.any(diff_is_nan): # removes nan from valid_mask if diff.size and return_mask: is_nan = xp.isnan(values) valid_mask[is_nan] = 1 # remove nan from diff diff = diff[~diff_is_nan] diff = list(diff) if return_mask: return diff, valid_mask return diff
Helper function to check for unknowns in values to be encoded. Uses pure python method for object dtype, and numpy method for all other dtypes. Parameters ---------- values : array Values to check for unknowns. known_values : array Known values. Must be unique. return_mask : bool, default=False If True, return a mask of the same shape as `values` indicating the valid values. Returns ------- diff : list The unique values present in `values` and not in `know_values`. valid_mask : boolean array Additionally returned if ``return_mask=True``.
_check_unknown
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _generate_items(self, items): """Generate items without nans. Stores the nan counts separately.""" for item in items: if not is_scalar_nan(item): yield item continue if not hasattr(self, "nan_count"): self.nan_count = 0 self.nan_count += 1
Generate items without nans. Stores the nan counts separately.
_generate_items
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _get_counts(values, uniques): """Get the count of each of the `uniques` in `values`. The counts will use the order passed in by `uniques`. For non-object dtypes, `uniques` is assumed to be sorted and `np.nan` is at the end. """ if values.dtype.kind in "OU": counter = _NaNCounter(values) output = np.zeros(len(uniques), dtype=np.int64) for i, item in enumerate(uniques): with suppress(KeyError): output[i] = counter[item] return output unique_values, counts = _unique_np(values, return_counts=True) # Recorder unique_values based on input: `uniques` uniques_in_values = np.isin(uniques, unique_values, assume_unique=True) if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]): uniques_in_values[-1] = True unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values]) output = np.zeros_like(uniques, dtype=np.int64) output[uniques_in_values] = counts[unique_valid_indices] return output
Get the count of each of the `uniques` in `values`. The counts will use the order passed in by `uniques`. For non-object dtypes, `uniques` is assumed to be sorted and `np.nan` is at the end.
_get_counts
python
scikit-learn/scikit-learn
sklearn/utils/_encode.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_encode.py
BSD-3-Clause
def _array_indexing(array, key, key_dtype, axis): """Index an array or scipy.sparse consistently across NumPy version.""" xp, is_array_api = get_namespace(array) if is_array_api: return xp.take(array, key, axis=axis) if issparse(array) and key_dtype == "bool": key = np.asarray(key) if isinstance(key, tuple): key = list(key) return array[key, ...] if axis == 0 else array[:, key]
Index an array or scipy.sparse consistently across NumPy version.
_array_indexing
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def _determine_key_type(key, accept_slice=True): """Determine the data type of key. Parameters ---------- key : scalar, slice or array-like The key from which we want to infer the data type. accept_slice : bool, default=True Whether or not to raise an error if the key is a slice. Returns ------- dtype : {'int', 'str', 'bool', None} Returns the data type of key. """ err_msg = ( "No valid specification of the columns. Only a scalar, list or " "slice of all integers or all strings, or boolean mask is " "allowed" ) dtype_to_str = {int: "int", str: "str", bool: "bool", np.bool_: "bool"} array_dtype_to_str = { "i": "int", "u": "int", "b": "bool", "O": "str", "U": "str", "S": "str", } if key is None: return None if isinstance(key, tuple(dtype_to_str.keys())): try: return dtype_to_str[type(key)] except KeyError: raise ValueError(err_msg) if isinstance(key, slice): if not accept_slice: raise TypeError( "Only array-like or scalar are supported. A Python slice was given." ) if key.start is None and key.stop is None: return None key_start_type = _determine_key_type(key.start) key_stop_type = _determine_key_type(key.stop) if key_start_type is not None and key_stop_type is not None: if key_start_type != key_stop_type: raise ValueError(err_msg) if key_start_type is not None: return key_start_type return key_stop_type # TODO(1.9) remove UserList when the force_int_remainder_cols param # of ColumnTransformer is removed if isinstance(key, (list, tuple, UserList)): unique_key = set(key) key_type = {_determine_key_type(elt) for elt in unique_key} if not key_type: return None if len(key_type) != 1: raise ValueError(err_msg) return key_type.pop() if hasattr(key, "dtype"): xp, is_array_api = get_namespace(key) # NumPy arrays are special-cased in their own branch because the Array API # cannot handle object/string-based dtypes that are often used to index # columns of dataframes by names. if is_array_api and not _is_numpy_namespace(xp): if xp.isdtype(key.dtype, "bool"): return "bool" elif xp.isdtype(key.dtype, "integral"): return "int" else: raise ValueError(err_msg) else: try: return array_dtype_to_str[key.dtype.kind] except KeyError: raise ValueError(err_msg) raise ValueError(err_msg)
Determine the data type of key. Parameters ---------- key : scalar, slice or array-like The key from which we want to infer the data type. accept_slice : bool, default=True Whether or not to raise an error if the key is a slice. Returns ------- dtype : {'int', 'str', 'bool', None} Returns the data type of key.
_determine_key_type
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def _safe_indexing(X, indices, *, axis=0): """Return rows, items or columns of X using indices. .. warning:: This utility is documented, but **private**. This means that backward compatibility might be broken without any deprecation cycle. Parameters ---------- X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series Data from which to sample rows, items or columns. `list` are only supported when `axis=0`. indices : bool, int, str, slice, array-like - If `axis=0`, boolean and integer array-like, integer slice, and scalar integer are supported. - If `axis=1`: - to select a single column, `indices` can be of `int` type for all `X` types and `str` only for dataframe. The selected subset will be 1D, unless `X` is a sparse matrix in which case it will be 2D. - to select multiples columns, `indices` can be one of the following: `list`, `array`, `slice`. The type used in these containers can be one of the following: `int`, 'bool' and `str`. However, `str` is only supported when `X` is a dataframe. The selected subset will be 2D. axis : int, default=0 The axis along which `X` will be subsampled. `axis=0` will select rows while `axis=1` will select columns. Returns ------- subset Subset of X on axis 0 or 1. Notes ----- CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are not supported. Examples -------- >>> import numpy as np >>> from sklearn.utils import _safe_indexing >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> _safe_indexing(data, 0, axis=0) # select the first row array([1, 2]) >>> _safe_indexing(data, 0, axis=1) # select the first column array([1, 3, 5]) """ if indices is None: return X if axis not in (0, 1): raise ValueError( "'axis' should be either 0 (to index rows) or 1 (to index " " column). Got {} instead.".format(axis) ) indices_dtype = _determine_key_type(indices) if axis == 0 and indices_dtype == "str": raise ValueError("String indexing is not supported with 'axis=0'") if axis == 1 and isinstance(X, list): raise ValueError("axis=1 is not supported for lists") if axis == 1 and (ndim := len(getattr(X, "shape", [0]))) != 2: raise ValueError( "'X' should be a 2D NumPy array, 2D sparse matrix or " "dataframe when indexing the columns (i.e. 'axis=1'). " f"Got {type(X)} instead with {ndim} dimension(s)." ) if ( axis == 1 and indices_dtype == "str" and not (_is_pandas_df(X) or _use_interchange_protocol(X)) ): raise ValueError( "Specifying the columns using strings is only supported for dataframes." ) if hasattr(X, "iloc"): # TODO: we should probably use _is_pandas_df_or_series(X) instead but: # 1) Currently, it (probably) works for dataframes compliant to pandas' API. # 2) Updating would require updating some tests such as # test_train_test_split_mock_pandas. return _pandas_indexing(X, indices, indices_dtype, axis=axis) elif _is_polars_df_or_series(X): return _polars_indexing(X, indices, indices_dtype, axis=axis) elif _is_pyarrow_data(X): return _pyarrow_indexing(X, indices, indices_dtype, axis=axis) elif _use_interchange_protocol(X): # pragma: no cover # Once the dataframe X is converted into its dataframe interchange protocol # version by calling X.__dataframe__(), it becomes very hard to turn it back # into its original type, e.g., a pyarrow.Table, see # https://github.com/data-apis/dataframe-api/issues/85. raise warnings.warn( message="A data object with support for the dataframe interchange protocol" "was passed, but scikit-learn does currently not know how to handle this " "kind of data. Some array/list indexing will be tried.", category=UserWarning, ) if hasattr(X, "shape"): return _array_indexing(X, indices, indices_dtype, axis=axis) else: return _list_indexing(X, indices, indices_dtype)
Return rows, items or columns of X using indices. .. warning:: This utility is documented, but **private**. This means that backward compatibility might be broken without any deprecation cycle. Parameters ---------- X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series Data from which to sample rows, items or columns. `list` are only supported when `axis=0`. indices : bool, int, str, slice, array-like - If `axis=0`, boolean and integer array-like, integer slice, and scalar integer are supported. - If `axis=1`: - to select a single column, `indices` can be of `int` type for all `X` types and `str` only for dataframe. The selected subset will be 1D, unless `X` is a sparse matrix in which case it will be 2D. - to select multiples columns, `indices` can be one of the following: `list`, `array`, `slice`. The type used in these containers can be one of the following: `int`, 'bool' and `str`. However, `str` is only supported when `X` is a dataframe. The selected subset will be 2D. axis : int, default=0 The axis along which `X` will be subsampled. `axis=0` will select rows while `axis=1` will select columns. Returns ------- subset Subset of X on axis 0 or 1. Notes ----- CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are not supported. Examples -------- >>> import numpy as np >>> from sklearn.utils import _safe_indexing >>> data = np.array([[1, 2], [3, 4], [5, 6]]) >>> _safe_indexing(data, 0, axis=0) # select the first row array([1, 2]) >>> _safe_indexing(data, 0, axis=1) # select the first column array([1, 3, 5])
_safe_indexing
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def _safe_assign(X, values, *, row_indexer=None, column_indexer=None): """Safe assignment to a numpy array, sparse matrix, or pandas dataframe. Parameters ---------- X : {ndarray, sparse-matrix, dataframe} Array to be modified. It is expected to be 2-dimensional. values : ndarray The values to be assigned to `X`. row_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the rows of interest. If `None`, all rows are selected. column_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the columns of interest. If `None`, all columns are selected. """ row_indexer = slice(None, None, None) if row_indexer is None else row_indexer column_indexer = ( slice(None, None, None) if column_indexer is None else column_indexer ) if hasattr(X, "iloc"): # pandas dataframe with warnings.catch_warnings(): # pandas >= 1.5 raises a warning when using iloc to set values in a column # that does not have the same type as the column being set. It happens # for instance when setting a categorical column with a string. # In the future the behavior won't change and the warning should disappear. # TODO(1.3): check if the warning is still raised or remove the filter. warnings.simplefilter("ignore", FutureWarning) X.iloc[row_indexer, column_indexer] = values else: # numpy array or sparse matrix X[row_indexer, column_indexer] = values
Safe assignment to a numpy array, sparse matrix, or pandas dataframe. Parameters ---------- X : {ndarray, sparse-matrix, dataframe} Array to be modified. It is expected to be 2-dimensional. values : ndarray The values to be assigned to `X`. row_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the rows of interest. If `None`, all rows are selected. column_indexer : array-like, dtype={int, bool}, default=None A 1-dimensional array to select the columns of interest. If `None`, all columns are selected.
_safe_assign
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def _get_column_indices(X, key): """Get feature column indices for input data X and key. For accepted values of `key`, see the docstring of :func:`_safe_indexing`. """ key_dtype = _determine_key_type(key) if _use_interchange_protocol(X): return _get_column_indices_interchange(X.__dataframe__(), key, key_dtype) n_columns = X.shape[1] if isinstance(key, (list, tuple)) and not key: # we get an empty list return [] elif key_dtype in ("bool", "int"): return _get_column_indices_for_bool_or_int(key, n_columns) else: try: all_columns = X.columns except AttributeError: raise ValueError( "Specifying the columns using strings is only supported for dataframes." ) if isinstance(key, str): columns = [key] elif isinstance(key, slice): start, stop = key.start, key.stop if start is not None: start = all_columns.get_loc(start) if stop is not None: # pandas indexing with strings is endpoint included stop = all_columns.get_loc(stop) + 1 else: stop = n_columns + 1 return list(islice(range(n_columns), start, stop)) else: columns = list(key) try: column_indices = [] for col in columns: col_idx = all_columns.get_loc(col) if not isinstance(col_idx, numbers.Integral): raise ValueError( f"Selected columns, {columns}, are not unique in dataframe" ) column_indices.append(col_idx) except KeyError as e: raise ValueError("A given column is not a column of the dataframe") from e return column_indices
Get feature column indices for input data X and key. For accepted values of `key`, see the docstring of :func:`_safe_indexing`.
_get_column_indices
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def _get_column_indices_interchange(X_interchange, key, key_dtype): """Same as _get_column_indices but for X with __dataframe__ protocol.""" n_columns = X_interchange.num_columns() if isinstance(key, (list, tuple)) and not key: # we get an empty list return [] elif key_dtype in ("bool", "int"): return _get_column_indices_for_bool_or_int(key, n_columns) else: column_names = list(X_interchange.column_names()) if isinstance(key, slice): if key.step not in [1, None]: raise NotImplementedError("key.step must be 1 or None") start, stop = key.start, key.stop if start is not None: start = column_names.index(start) if stop is not None: stop = column_names.index(stop) + 1 else: stop = n_columns + 1 return list(islice(range(n_columns), start, stop)) selected_columns = [key] if np.isscalar(key) else key try: return [column_names.index(col) for col in selected_columns] except ValueError as e: raise ValueError("A given column is not a column of the dataframe") from e
Same as _get_column_indices but for X with __dataframe__ protocol.
_get_column_indices_interchange
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def resample( *arrays, replace=True, n_samples=None, random_state=None, stratify=None, sample_weight=None, ): """Resample arrays or sparse matrices in a consistent way. The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of array-like of shape (n_samples,) or \ (n_samples, n_outputs) Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : bool, default=True Implements resampling with replacement. It must be set to True whenever sampling with non-uniform weights: a few data points with very large weights are expected to be sampled several times with probability to preserve the distribution induced by the weights. If False, this will implement (sliced) random permutations. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. If replace is False it should not be larger than the length of arrays. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. stratify : {array-like, sparse matrix} of shape (n_samples,) or \ (n_samples, n_outputs), default=None If not None, data is split in a stratified fashion, using this as the class labels. sample_weight : array-like of shape (n_samples,), default=None Contains weight values to be associated with each sample. Values are normalized to sum to one and interpreted as probability for sampling each data point. .. versionadded:: 1.7 Returns ------- resampled_arrays : sequence of array-like of shape (n_samples,) or \ (n_samples, n_outputs) Sequence of resampled copies of the collections. The original arrays are not impacted. See Also -------- shuffle : Shuffle arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[1., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <Compressed Sparse Row sparse matrix of dtype 'float64' with 4 stored elements and shape (3, 2)> >>> X_sparse.toarray() array([[1., 0.], [2., 1.], [1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) Example using stratification:: >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1] >>> resample(y, n_samples=5, replace=False, stratify=y, ... random_state=0) [1, 1, 1, 0, 1] """ max_n_samples = n_samples random_state = check_random_state(random_state) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, "shape") else len(first) if max_n_samples is None: max_n_samples = n_samples elif (max_n_samples > n_samples) and (not replace): raise ValueError( "Cannot sample %d out of arrays with dim %d when replace is False" % (max_n_samples, n_samples) ) check_consistent_length(*arrays) if sample_weight is not None and not replace: raise NotImplementedError( "Resampling with sample_weight is only implemented for replace=True." ) if sample_weight is not None and stratify is not None: raise NotImplementedError( "Resampling with sample_weight is only implemented for stratify=None." ) if stratify is None: if replace: if sample_weight is not None: sample_weight = _check_sample_weight( sample_weight, first, dtype=np.float64 ) p = sample_weight / sample_weight.sum() else: p = None indices = random_state.choice( n_samples, size=max_n_samples, p=p, replace=True, ) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] else: # Code adapted from StratifiedShuffleSplit() y = check_array(stratify, ensure_2d=False, dtype=None) if y.ndim == 2: # for multi-label y, map each distinct row to a string repr # using join because str(row) uses an ellipsis if len(row) > 1000 y = np.array([" ".join(row.astype("str")) for row in y]) classes, y_indices = np.unique(y, return_inverse=True) n_classes = classes.shape[0] class_counts = np.bincount(y_indices) # Find the sorted list of instances for each class: # (np.unique above performs a sort, so code is O(n logn) already) class_indices = np.split( np.argsort(y_indices, kind="mergesort"), np.cumsum(class_counts)[:-1] ) n_i = _approximate_mode(class_counts, max_n_samples, random_state) indices = [] for i in range(n_classes): indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace) indices.extend(indices_i) indices = random_state.permutation(indices) # convert sparse matrices to CSR for row-based indexing arrays = [a.tocsr() if issparse(a) else a for a in arrays] resampled_arrays = [_safe_indexing(a, indices) for a in arrays] if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays
Resample arrays or sparse matrices in a consistent way. The default strategy implements one step of the bootstrapping procedure. Parameters ---------- *arrays : sequence of array-like of shape (n_samples,) or (n_samples, n_outputs) Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. replace : bool, default=True Implements resampling with replacement. It must be set to True whenever sampling with non-uniform weights: a few data points with very large weights are expected to be sampled several times with probability to preserve the distribution induced by the weights. If False, this will implement (sliced) random permutations. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. If replace is False it should not be larger than the length of arrays. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. stratify : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_outputs), default=None If not None, data is split in a stratified fashion, using this as the class labels. sample_weight : array-like of shape (n_samples,), default=None Contains weight values to be associated with each sample. Values are normalized to sum to one and interpreted as probability for sampling each data point. .. versionadded:: 1.7 Returns ------- resampled_arrays : sequence of array-like of shape (n_samples,) or (n_samples, n_outputs) Sequence of resampled copies of the collections. The original arrays are not impacted. See Also -------- shuffle : Shuffle arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[1., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <Compressed Sparse Row sparse matrix of dtype 'float64' with 4 stored elements and shape (3, 2)> >>> X_sparse.toarray() array([[1., 0.], [2., 1.], [1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) Example using stratification:: >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1] >>> resample(y, n_samples=5, replace=False, stratify=y, ... random_state=0) [1, 1, 1, 0, 1]
resample
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def shuffle(*arrays, random_state=None, n_samples=None): """Shuffle arrays or sparse matrices in a consistent way. This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. It should not be larger than the length of arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled copies of the collections. The original arrays are not impacted. See Also -------- resample : Resample arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[0., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <Compressed Sparse Row sparse matrix of dtype 'float64' with 3 stored elements and shape (3, 2)> >>> X_sparse.toarray() array([[0., 0.], [2., 1.], [1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) """ return resample( *arrays, replace=False, n_samples=n_samples, random_state=random_state )
Shuffle arrays or sparse matrices in a consistent way. This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- *arrays : sequence of indexable data-structures Indexable data-structures can be arrays, lists, dataframes or scipy sparse matrices with consistent first dimension. random_state : int, RandomState instance or None, default=None Determines random number generation for shuffling the data. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. n_samples : int, default=None Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. It should not be larger than the length of arrays. Returns ------- shuffled_arrays : sequence of indexable data-structures Sequence of shuffled copies of the collections. The original arrays are not impacted. See Also -------- resample : Resample arrays or sparse matrices in a consistent way. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> import numpy as np >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]]) >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[0., 0.], [2., 1.], [1., 0.]]) >>> X_sparse <Compressed Sparse Row sparse matrix of dtype 'float64' with 3 stored elements and shape (3, 2)> >>> X_sparse.toarray() array([[0., 0.], [2., 1.], [1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1])
shuffle
python
scikit-learn/scikit-learn
sklearn/utils/_indexing.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_indexing.py
BSD-3-Clause
def _get_mask(X, value_to_mask): """Compute the boolean mask X == value_to_mask. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Input data, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. value_to_mask : {int, float} The value which is to be masked in X. Returns ------- X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features) Missing mask. """ if not sp.issparse(X): # For all cases apart of a sparse input where we need to reconstruct # a sparse output return _get_dense_mask(X, value_to_mask) Xt = _get_dense_mask(X.data, value_to_mask) sparse_constructor = sp.csr_matrix if X.format == "csr" else sp.csc_matrix Xt_sparse = sparse_constructor( (Xt, X.indices.copy(), X.indptr.copy()), shape=X.shape, dtype=bool ) return Xt_sparse
Compute the boolean mask X == value_to_mask. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Input data, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. value_to_mask : {int, float} The value which is to be masked in X. Returns ------- X_mask : {ndarray, sparse matrix} of shape (n_samples, n_features) Missing mask.
_get_mask
python
scikit-learn/scikit-learn
sklearn/utils/_mask.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_mask.py
BSD-3-Clause
def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : array-like Mask to be used on X. Returns ------- mask : ndarray Array that is safe to use on X. Examples -------- >>> from sklearn.utils import safe_mask >>> from scipy.sparse import csr_matrix >>> data = csr_matrix([[1], [2], [3], [4], [5]]) >>> condition = [False, True, True, False, True] >>> mask = safe_mask(data, condition) >>> data[mask].toarray() array([[2], [3], [5]]) """ mask = np.asarray(mask) if np.issubdtype(mask.dtype, np.signedinteger): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask
Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : array-like Mask to be used on X. Returns ------- mask : ndarray Array that is safe to use on X. Examples -------- >>> from sklearn.utils import safe_mask >>> from scipy.sparse import csr_matrix >>> data = csr_matrix([[1], [2], [3], [4], [5]]) >>> condition = [False, True, True, False, True] >>> mask = safe_mask(data, condition) >>> data[mask].toarray() array([[2], [3], [5]])
safe_mask
python
scikit-learn/scikit-learn
sklearn/utils/_mask.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_mask.py
BSD-3-Clause
def axis0_safe_slice(X, mask, len_mask): """Return a mask which is safer to use on X than safe_mask. This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: https://github.com/scipy/scipy/issues/5361 Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : ndarray Mask to be used on X. len_mask : int The length of the mask. Returns ------- mask : ndarray Array that is safe to use on X. """ if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1]))
Return a mask which is safer to use on X than safe_mask. This mask is safer than safe_mask since it returns an empty array, when a sparse matrix is sliced with a boolean mask with all False, instead of raising an unhelpful error in older versions of SciPy. See: https://github.com/scipy/scipy/issues/5361 Also note that we can avoid doing the dot product by checking if the len_mask is not zero in _huber_loss_and_gradient but this is not going to be the bottleneck, since the number of outliers and non_outliers are typically non-zero and it makes the code tougher to follow. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask : ndarray Mask to be used on X. len_mask : int The length of the mask. Returns ------- mask : ndarray Array that is safe to use on X.
axis0_safe_slice
python
scikit-learn/scikit-learn
sklearn/utils/_mask.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_mask.py
BSD-3-Clause
def indices_to_mask(indices, mask_length): """Convert list of indices to boolean mask. Parameters ---------- indices : list-like List of integers treated as indices. mask_length : int Length of boolean mask to be generated. This parameter must be greater than max(indices). Returns ------- mask : 1d boolean nd-array Boolean array that is True where indices are present, else False. Examples -------- >>> from sklearn.utils._mask import indices_to_mask >>> indices = [1, 2 , 3, 4] >>> indices_to_mask(indices, 5) array([False, True, True, True, True]) """ if mask_length <= np.max(indices): raise ValueError("mask_length must be greater than max(indices)") mask = np.zeros(mask_length, dtype=bool) mask[indices] = True return mask
Convert list of indices to boolean mask. Parameters ---------- indices : list-like List of integers treated as indices. mask_length : int Length of boolean mask to be generated. This parameter must be greater than max(indices). Returns ------- mask : 1d boolean nd-array Boolean array that is True where indices are present, else False. Examples -------- >>> from sklearn.utils._mask import indices_to_mask >>> indices = [1, 2 , 3, 4] >>> indices_to_mask(indices, 5) array([False, True, True, True, True])
indices_to_mask
python
scikit-learn/scikit-learn
sklearn/utils/_mask.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_mask.py
BSD-3-Clause
def _raise_for_params(params, owner, method, allow=None): """Raise an error if metadata routing is not enabled and params are passed. .. versionadded:: 1.4 Parameters ---------- params : dict The metadata passed to a method. owner : object The object to which the method belongs. method : str The name of the method, e.g. "fit". allow : list of str, default=None A list of parameters which are allowed to be passed even if metadata routing is not enabled. Raises ------ ValueError If metadata routing is not enabled and params are passed. """ caller = ( f"{owner.__class__.__name__}.{method}" if method else owner.__class__.__name__ ) allow = allow if allow is not None else {} if not _routing_enabled() and (params.keys() - allow): raise ValueError( f"Passing extra keyword arguments to {caller} is only supported if" " enable_metadata_routing=True, which you can set using" " `sklearn.set_config`. See the User Guide" " <https://scikit-learn.org/stable/metadata_routing.html> for more" f" details. Extra parameters passed are: {set(params)}" )
Raise an error if metadata routing is not enabled and params are passed. .. versionadded:: 1.4 Parameters ---------- params : dict The metadata passed to a method. owner : object The object to which the method belongs. method : str The name of the method, e.g. "fit". allow : list of str, default=None A list of parameters which are allowed to be passed even if metadata routing is not enabled. Raises ------ ValueError If metadata routing is not enabled and params are passed.
_raise_for_params
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _raise_for_unsupported_routing(obj, method, **kwargs): """Raise when metadata routing is enabled and metadata is passed. This is used in meta-estimators which have not implemented metadata routing to prevent silent bugs. There is no need to use this function if the meta-estimator is not accepting any metadata, especially in `fit`, since if a meta-estimator accepts any metadata, they would do that in `fit` as well. Parameters ---------- obj : estimator The estimator for which we're raising the error. method : str The method where the error is raised. **kwargs : dict The metadata passed to the method. """ kwargs = {key: value for key, value in kwargs.items() if value is not None} if _routing_enabled() and kwargs: cls_name = obj.__class__.__name__ raise NotImplementedError( f"{cls_name}.{method} cannot accept given metadata ({set(kwargs.keys())})" f" since metadata routing is not yet implemented for {cls_name}." )
Raise when metadata routing is enabled and metadata is passed. This is used in meta-estimators which have not implemented metadata routing to prevent silent bugs. There is no need to use this function if the meta-estimator is not accepting any metadata, especially in `fit`, since if a meta-estimator accepts any metadata, they would do that in `fit` as well. Parameters ---------- obj : estimator The estimator for which we're raising the error. method : str The method where the error is raised. **kwargs : dict The metadata passed to the method.
_raise_for_unsupported_routing
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def get_metadata_routing(self): """Raise `NotImplementedError`. This estimator does not support metadata routing yet.""" raise NotImplementedError( f"{self.__class__.__name__} has not implemented metadata routing yet." )
Raise `NotImplementedError`. This estimator does not support metadata routing yet.
get_metadata_routing
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def request_is_alias(item): """Check if an item is a valid alias. Values in ``VALID_REQUEST_VALUES`` are not considered aliases in this context. Only a string which is a valid identifier is. Parameters ---------- item : object The given item to be checked if it can be an alias. Returns ------- result : bool Whether the given item is a valid alias. """ if item in VALID_REQUEST_VALUES: return False # item is only an alias if it's a valid identifier return isinstance(item, str) and item.isidentifier()
Check if an item is a valid alias. Values in ``VALID_REQUEST_VALUES`` are not considered aliases in this context. Only a string which is a valid identifier is. Parameters ---------- item : object The given item to be checked if it can be an alias. Returns ------- result : bool Whether the given item is a valid alias.
request_is_alias
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def add_request( self, *, param, alias, ): """Add request info for a metadata. Parameters ---------- param : str The property for which a request is set. alias : str, or {True, False, None} Specifies which metadata should be routed to `param` - str: the name (or alias) of metadata given to a meta-estimator that should be routed to this parameter. - True: requested - False: not requested - None: error if passed """ if not request_is_alias(alias) and not request_is_valid(alias): raise ValueError( f"The alias you're setting for `{param}` should be either a " "valid identifier or one of {None, True, False}, but given " f"value is: `{alias}`" ) if alias == param: alias = True if alias == UNUSED: if param in self._requests: del self._requests[param] else: raise ValueError( f"Trying to remove parameter {param} with UNUSED which doesn't" " exist." ) else: self._requests[param] = alias return self
Add request info for a metadata. Parameters ---------- param : str The property for which a request is set. alias : str, or {True, False, None} Specifies which metadata should be routed to `param` - str: the name (or alias) of metadata given to a meta-estimator that should be routed to this parameter. - True: requested - False: not requested - None: error if passed
add_request
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _get_param_names(self, return_alias): """Get names of all metadata that can be consumed or routed by this method. This method returns the names of all metadata, even the ``False`` ones. Parameters ---------- return_alias : bool Controls whether original or aliased names should be returned. If ``False``, aliases are ignored and original names are returned. Returns ------- names : set of str A set of strings with the names of all parameters. """ return set( alias if return_alias and not request_is_valid(alias) else prop for prop, alias in self._requests.items() if not request_is_valid(alias) or alias is not False )
Get names of all metadata that can be consumed or routed by this method. This method returns the names of all metadata, even the ``False`` ones. Parameters ---------- return_alias : bool Controls whether original or aliased names should be returned. If ``False``, aliases are ignored and original names are returned. Returns ------- names : set of str A set of strings with the names of all parameters.
_get_param_names
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _check_warnings(self, *, params): """Check whether metadata is passed which is marked as WARN. If any metadata is passed which is marked as WARN, a warning is raised. Parameters ---------- params : dict The metadata passed to a method. """ params = {} if params is None else params warn_params = { prop for prop, alias in self._requests.items() if alias == WARN and prop in params } for param in warn_params: warn( f"Support for {param} has recently been added to this class. " "To maintain backward compatibility, it is ignored now. " f"Using `set_{self.method}_request({param}={{True, False}})` " "on this method of the class, you can set the request value " "to False to silence this warning, or to True to consume and " "use the metadata." )
Check whether metadata is passed which is marked as WARN. If any metadata is passed which is marked as WARN, a warning is raised. Parameters ---------- params : dict The metadata passed to a method.
_check_warnings
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _route_params(self, params, parent, caller): """Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra props. Parameters ---------- params : dict A dictionary of provided metadata. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the corresponding method. """ self._check_warnings(params=params) unrequested = dict() args = {arg: value for arg, value in params.items() if value is not None} res = Bunch() for prop, alias in self._requests.items(): if alias is False or alias == WARN: continue elif alias is True and prop in args: res[prop] = args[prop] elif alias is None and prop in args: unrequested[prop] = args[prop] elif alias in args: res[prop] = args[alias] if unrequested: if self.method in COMPOSITE_METHODS: callee_methods = COMPOSITE_METHODS[self.method] else: callee_methods = [self.method] set_requests_on = "".join( [ f".set_{method}_request({{metadata}}=True/False)" for method in callee_methods ] ) message = ( f"[{', '.join([key for key in unrequested])}] are passed but are not" " explicitly set as requested or not requested for" f" {self.owner}.{self.method}, which is used within" f" {parent}.{caller}. Call `{self.owner}" + set_requests_on + "` for each metadata you want to request/ignore. See the" " Metadata Routing User guide" " <https://scikit-learn.org/stable/metadata_routing.html> for more" " information." ) raise UnsetMetadataPassedError( message=message, unrequested_params=unrequested, routed_params=res, ) return res
Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra props. Parameters ---------- params : dict A dictionary of provided metadata. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the corresponding method.
_route_params
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _consumes(self, params): """Check whether the given parameters are consumed by this method. Parameters ---------- params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by this method. """ params = set(params) res = set() for prop, alias in self._requests.items(): if alias is True and prop in params: res.add(prop) elif isinstance(alias, str) and alias in params: res.add(alias) return res
Check whether the given parameters are consumed by this method. Parameters ---------- params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by this method.
_consumes
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _route_params(self, *, params, method, parent, caller): """Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra keyword arguments to pass metadata. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the corresponding method. """ return getattr(self, method)._route_params( params=params, parent=parent, caller=caller )
Prepare the given parameters to be passed to the method. The output of this method can be used directly as the input to the corresponding method as extra keyword arguments to pass metadata. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the corresponding method.
_route_params
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _serialize(self): """Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary. """ output = dict() for method in SIMPLE_METHODS: mmr = getattr(self, method) if len(mmr.requests): output[method] = mmr._serialize() return output
Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary.
_serialize
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def add(self, *, caller, callee): """Add a method mapping. Parameters ---------- caller : str Parent estimator's method name in which the ``callee`` is called. callee : str Child object's method name. This method is called in ``caller``. Returns ------- self : MethodMapping Returns self. """ if caller not in METHODS: raise ValueError( f"Given caller:{caller} is not a valid method. Valid methods are:" f" {METHODS}" ) if callee not in METHODS: raise ValueError( f"Given callee:{callee} is not a valid method. Valid methods are:" f" {METHODS}" ) self._routes.append(MethodPair(caller=caller, callee=callee)) return self
Add a method mapping. Parameters ---------- caller : str Parent estimator's method name in which the ``callee`` is called. callee : str Child object's method name. This method is called in ``caller``. Returns ------- self : MethodMapping Returns self.
add
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _serialize(self): """Serialize the object. Returns ------- obj : list A serialized version of the instance in the form of a list. """ result = list() for route in self._routes: result.append({"caller": route.caller, "callee": route.callee}) return result
Serialize the object. Returns ------- obj : list A serialized version of the instance in the form of a list.
_serialize
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def add_self_request(self, obj): """Add `self` (as a consumer) to the routing. This method is used if the router is also a consumer, and hence the router itself needs to be included in the routing. The passed object can be an estimator or a :class:`~sklearn.utils.metadata_routing.MetadataRequest`. A router should add itself using this method instead of `add` since it should be treated differently than the other objects to which metadata is routed by the router. Parameters ---------- obj : object This is typically the router instance, i.e. `self` in a ``get_metadata_routing()`` implementation. It can also be a ``MetadataRequest`` instance. Returns ------- self : MetadataRouter Returns `self`. """ if getattr(obj, "_type", None) == "metadata_request": self._self_request = deepcopy(obj) elif hasattr(obj, "_get_metadata_request"): self._self_request = deepcopy(obj._get_metadata_request()) else: raise ValueError( "Given `obj` is neither a `MetadataRequest` nor does it implement the" " required API. Inheriting from `BaseEstimator` implements the required" " API." ) return self
Add `self` (as a consumer) to the routing. This method is used if the router is also a consumer, and hence the router itself needs to be included in the routing. The passed object can be an estimator or a :class:`~sklearn.utils.metadata_routing.MetadataRequest`. A router should add itself using this method instead of `add` since it should be treated differently than the other objects to which metadata is routed by the router. Parameters ---------- obj : object This is typically the router instance, i.e. `self` in a ``get_metadata_routing()`` implementation. It can also be a ``MetadataRequest`` instance. Returns ------- self : MetadataRouter Returns `self`.
add_self_request
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def add(self, *, method_mapping, **objs): """Add named objects with their corresponding method mapping. Parameters ---------- method_mapping : MethodMapping The mapping between the child and the parent's methods. **objs : dict A dictionary of objects from which metadata is extracted by calling :func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them. Returns ------- self : MetadataRouter Returns `self`. """ method_mapping = deepcopy(method_mapping) for name, obj in objs.items(): self._route_mappings[name] = RouterMappingPair( mapping=method_mapping, router=get_routing_for_object(obj) ) return self
Add named objects with their corresponding method mapping. Parameters ---------- method_mapping : MethodMapping The mapping between the child and the parent's methods. **objs : dict A dictionary of objects from which metadata is extracted by calling :func:`~sklearn.utils.metadata_routing.get_routing_for_object` on them. Returns ------- self : MetadataRouter Returns `self`.
add
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def consumes(self, method, params): """Check whether the given parameters are consumed by the given method. .. versionadded:: 1.4 Parameters ---------- method : str The name of the method to check. params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by the given method. """ res = set() if self._self_request: res = res | self._self_request.consumes(method=method, params=params) for _, route_mapping in self._route_mappings.items(): for caller, callee in route_mapping.mapping: if caller == method: res = res | route_mapping.router.consumes( method=callee, params=params ) return res
Check whether the given parameters are consumed by the given method. .. versionadded:: 1.4 Parameters ---------- method : str The name of the method to check. params : iterable of str An iterable of parameters to check. Returns ------- consumed : set of str A set of parameters which are consumed by the given method.
consumes
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _get_param_names(self, *, method, return_alias, ignore_self_request): """Get names of all metadata that can be consumed or routed by specified \ method. This method returns the names of all metadata, even the ``False`` ones. Parameters ---------- method : str The name of the method for which metadata names are requested. return_alias : bool Controls whether original or aliased names should be returned, which only applies to the stored `self`. If no `self` routing object is stored, this parameter has no effect. ignore_self_request : bool If `self._self_request` should be ignored. This is used in `_route_params`. If ``True``, ``return_alias`` has no effect. Returns ------- names : set of str A set of strings with the names of all parameters. """ res = set() if self._self_request and not ignore_self_request: res = res.union( self._self_request._get_param_names( method=method, return_alias=return_alias ) ) for name, route_mapping in self._route_mappings.items(): for caller, callee in route_mapping.mapping: if caller == method: res = res.union( route_mapping.router._get_param_names( method=callee, return_alias=True, ignore_self_request=False ) ) return res
Get names of all metadata that can be consumed or routed by specified method. This method returns the names of all metadata, even the ``False`` ones. Parameters ---------- method : str The name of the method for which metadata names are requested. return_alias : bool Controls whether original or aliased names should be returned, which only applies to the stored `self`. If no `self` routing object is stored, this parameter has no effect. ignore_self_request : bool If `self._self_request` should be ignored. This is used in `_route_params`. If ``True``, ``return_alias`` has no effect. Returns ------- names : set of str A set of strings with the names of all parameters.
_get_param_names
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _route_params(self, *, params, method, parent, caller): """Prepare the given parameters to be passed to the method. This is used when a router is used as a child object of another router. The parent router then passes all parameters understood by the child object to it and delegates their validation to the child. The output of this method can be used directly as the input to the corresponding method as extra props. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the corresponding method. """ res = Bunch() if self._self_request: res.update( self._self_request._route_params( params=params, method=method, parent=parent, caller=caller, ) ) param_names = self._get_param_names( method=method, return_alias=True, ignore_self_request=True ) child_params = { key: value for key, value in params.items() if key in param_names } for key in set(res.keys()).intersection(child_params.keys()): # conflicts are okay if the passed objects are the same, but it's # an issue if they're different objects. if child_params[key] is not res[key]: raise ValueError( f"In {self.owner}, there is a conflict on {key} between what is" " requested for this estimator and what is requested by its" " children. You can resolve this conflict by using an alias for" " the child estimator(s) requested metadata." ) res.update(child_params) return res
Prepare the given parameters to be passed to the method. This is used when a router is used as a child object of another router. The parent router then passes all parameters understood by the child object to it and delegates their validation to the child. The output of this method can be used directly as the input to the corresponding method as extra props. Parameters ---------- params : dict A dictionary of provided metadata. method : str The name of the method for which the parameters are requested and routed. parent : object Parent class object, that routes the metadata. caller : str Method from the parent class object, where the metadata is routed from. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of {prop: value} which can be given to the corresponding method.
_route_params
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def route_params(self, *, caller, params): """Return the input parameters requested by child objects. The output of this method is a :class:`~sklearn.utils.Bunch`, which includes the metadata for all methods of each child object that is used in the router's `caller` method. If the router is also a consumer, it also checks for warnings of `self`'s/consumer's requested metadata. Parameters ---------- caller : str The name of the method for which the parameters are requested and routed. If called inside the :term:`fit` method of a router, it would be `"fit"`. params : dict A dictionary of provided metadata. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name": {params: value}}}`` which can be used to pass the required metadata to corresponding methods or corresponding child objects. """ if self._self_request: self._self_request._check_warnings(params=params, method=caller) res = Bunch() for name, route_mapping in self._route_mappings.items(): router, mapping = route_mapping.router, route_mapping.mapping res[name] = Bunch() for _caller, _callee in mapping: if _caller == caller: res[name][_callee] = router._route_params( params=params, method=_callee, parent=self.owner, caller=caller, ) return res
Return the input parameters requested by child objects. The output of this method is a :class:`~sklearn.utils.Bunch`, which includes the metadata for all methods of each child object that is used in the router's `caller` method. If the router is also a consumer, it also checks for warnings of `self`'s/consumer's requested metadata. Parameters ---------- caller : str The name of the method for which the parameters are requested and routed. If called inside the :term:`fit` method of a router, it would be `"fit"`. params : dict A dictionary of provided metadata. Returns ------- params : Bunch A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name": {params: value}}}`` which can be used to pass the required metadata to corresponding methods or corresponding child objects.
route_params
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def validate_metadata(self, *, method, params): """Validate given metadata for a method. This raises a ``TypeError`` if some of the passed metadata are not understood by child objects. Parameters ---------- method : str The name of the method for which the parameters are requested and routed. If called inside the :term:`fit` method of a router, it would be `"fit"`. params : dict A dictionary of provided metadata. """ param_names = self._get_param_names( method=method, return_alias=False, ignore_self_request=False ) if self._self_request: self_params = self._self_request._get_param_names( method=method, return_alias=False ) else: self_params = set() extra_keys = set(params.keys()) - param_names - self_params if extra_keys: raise TypeError( f"{self.owner}.{method} got unexpected argument(s) {extra_keys}, which" " are not routed to any object." )
Validate given metadata for a method. This raises a ``TypeError`` if some of the passed metadata are not understood by child objects. Parameters ---------- method : str The name of the method for which the parameters are requested and routed. If called inside the :term:`fit` method of a router, it would be `"fit"`. params : dict A dictionary of provided metadata.
validate_metadata
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def _serialize(self): """Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary. """ res = dict() if self._self_request: res["$self_request"] = self._self_request._serialize() for name, route_mapping in self._route_mappings.items(): res[name] = dict() res[name]["mapping"] = route_mapping.mapping._serialize() res[name]["router"] = route_mapping.router._serialize() return res
Serialize the object. Returns ------- obj : dict A serialized version of the instance in the form of a dictionary.
_serialize
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause
def get_routing_for_object(obj=None): """Get a ``Metadata{Router, Request}`` instance from the given object. This function returns a :class:`~sklearn.utils.metadata_routing.MetadataRouter` or a :class:`~sklearn.utils.metadata_routing.MetadataRequest` from the given input. This function always returns a copy or an instance constructed from the input, such that changing the output of this function will not change the original object. .. versionadded:: 1.3 Parameters ---------- obj : object - If the object provides a `get_metadata_routing` method, return a copy of the output of that method. - If the object is already a :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a :class:`~sklearn.utils.metadata_routing.MetadataRouter`, return a copy of that. - Returns an empty :class:`~sklearn.utils.metadata_routing.MetadataRequest` otherwise. Returns ------- obj : MetadataRequest or MetadataRouting A ``MetadataRequest`` or a ``MetadataRouting`` taken or created from the given object. """ # doing this instead of a try/except since an AttributeError could be raised # for other reasons. if hasattr(obj, "get_metadata_routing"): return deepcopy(obj.get_metadata_routing()) elif getattr(obj, "_type", None) in ["metadata_request", "metadata_router"]: return deepcopy(obj) return MetadataRequest(owner=None)
Get a ``Metadata{Router, Request}`` instance from the given object. This function returns a :class:`~sklearn.utils.metadata_routing.MetadataRouter` or a :class:`~sklearn.utils.metadata_routing.MetadataRequest` from the given input. This function always returns a copy or an instance constructed from the input, such that changing the output of this function will not change the original object. .. versionadded:: 1.3 Parameters ---------- obj : object - If the object provides a `get_metadata_routing` method, return a copy of the output of that method. - If the object is already a :class:`~sklearn.utils.metadata_routing.MetadataRequest` or a :class:`~sklearn.utils.metadata_routing.MetadataRouter`, return a copy of that. - Returns an empty :class:`~sklearn.utils.metadata_routing.MetadataRequest` otherwise. Returns ------- obj : MetadataRequest or MetadataRouting A ``MetadataRequest`` or a ``MetadataRouting`` taken or created from the given object.
get_routing_for_object
python
scikit-learn/scikit-learn
sklearn/utils/_metadata_requests.py
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/_metadata_requests.py
BSD-3-Clause