code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by PyTorch.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : Device, optional
The device to get the data types for.
Unused for PyTorch, as all devices use the same dtypes.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
PyTorch data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
"""
res = self._dtypes(kind)
for k, v in res.copy().items():
try:
torch.empty((0,), dtype=v, device=device)
except:
del res[k]
return res
|
The array API data types supported by PyTorch.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : Device, optional
The device to get the data types for.
Unused for PyTorch, as all devices use the same dtypes.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
PyTorch data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
|
dtypes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/torch/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/torch/_info.py
|
BSD-3-Clause
|
def devices(self):
"""
The devices supported by PyTorch.
Returns
-------
devices : list[Device]
The devices supported by PyTorch.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
[device(type='cpu'), device(type='mps', index=0), device(type='meta')]
"""
# Torch doesn't have a straightforward way to get the list of all
# currently supported devices. To do this, we first parse the error
# message of torch.device to get the list of all possible types of
# device:
try:
torch.device('notadevice')
raise AssertionError("unreachable") # pragma: nocover
except RuntimeError as e:
# The error message is something like:
# "Expected one of cpu, cuda, ipu, xpu, mkldnn, opengl, opencl, ideep, hip, ve, fpga, ort, xla, lazy, vulkan, mps, meta, hpu, mtia, privateuseone device type at start of device string: notadevice"
devices_names = e.args[0].split('Expected one of ')[1].split(' device type')[0].split(', ')
# Next we need to check for different indices for different devices.
# device(device_name, index=index) doesn't actually check if the
# device name or index is valid. We have to try to create a tensor
# with it (which is why this function is cached).
devices = []
for device_name in devices_names:
i = 0
while True:
try:
a = torch.empty((0,), device=torch.device(device_name, index=i))
if a.device in devices:
break
devices.append(a.device)
except:
break
i += 1
return devices
|
The devices supported by PyTorch.
Returns
-------
devices : list[Device]
The devices supported by PyTorch.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
[device(type='cpu'), device(type='mps', index=0), device(type='meta')]
|
devices
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_compat/torch/_info.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_compat/torch/_info.py
|
BSD-3-Clause
|
def lazy_xp_function( # type: ignore[explicit-any]
func: Callable[..., Any],
*,
allow_dask_compute: int = 0,
jax_jit: bool = True,
static_argnums: int | Sequence[int] | None = None,
static_argnames: str | Iterable[str] | None = None,
) -> None: # numpydoc ignore=GL07
"""
Tag a function to be tested on lazy backends.
Tag a function so that when any tests are executed with ``xp=jax.numpy`` the
function is replaced with a jitted version of itself, and when it is executed with
``xp=dask.array`` the function will raise if it attempts to materialize the graph.
This will be later expanded to provide test coverage for other lazy backends.
In order for the tag to be effective, the test or a fixture must call
:func:`patch_lazy_xp_functions`.
Parameters
----------
func : callable
Function to be tested.
allow_dask_compute : int, optional
Number of times `func` is allowed to internally materialize the Dask graph. This
is typically triggered by ``bool()``, ``float()``, or ``np.asarray()``.
Set to 1 if you are aware that `func` converts the input parameters to NumPy and
want to let it do so at least for the time being, knowing that it is going to be
extremely detrimental for performance.
If a test needs values higher than 1 to pass, it is a canary that the conversion
to NumPy/bool/float is happening multiple times, which translates to multiple
computations of the whole graph. Short of making the function fully lazy, you
should at least add explicit calls to ``np.asarray()`` early in the function.
*Note:* the counter of `allow_dask_compute` resets after each call to `func`, so
a test function that invokes `func` multiple times should still work with this
parameter set to 1.
Default: 0, meaning that `func` must be fully lazy and never materialize the
graph.
jax_jit : bool, optional
Set to True to replace `func` with ``jax.jit(func)`` after calling the
:func:`patch_lazy_xp_functions` test helper with ``xp=jax.numpy``. Set to False
if `func` is only compatible with eager (non-jitted) JAX. Default: True.
static_argnums : int | Sequence[int], optional
Passed to jax.jit. Positional arguments to treat as static (compile-time
constant). Default: infer from `static_argnames` using
`inspect.signature(func)`.
static_argnames : str | Iterable[str], optional
Passed to jax.jit. Named arguments to treat as static (compile-time constant).
Default: infer from `static_argnums` using `inspect.signature(func)`.
See Also
--------
patch_lazy_xp_functions : Companion function to call from the test or fixture.
jax.jit : JAX function to compile a function for performance.
Examples
--------
In ``test_mymodule.py``::
from array_api_extra.testing import lazy_xp_function from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
# When xp=jax.numpy, this is the same as `b = jax.jit(myfunc)(a)`
# When xp=dask.array, crash on compute() or persist()
b = myfunc(a)
Notes
-----
In order for this tag to be effective, the test function must be imported into the
test module globals without its namespace; alternatively its namespace must be
declared in a ``lazy_xp_modules`` list in the test module globals.
Example 1::
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
x = myfunc(xp.asarray([1, 2]))
Example 2::
import mymodule
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
x = mymodule.myfunc(xp.asarray([1, 2]))
A test function can circumvent this monkey-patching system by using a namespace
outside of the two above patterns. You need to sanitize your code to make sure this
only happens intentionally.
Example 1::
import mymodule
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = mymodule.myfunc(a) # This is not
Example 2::
import mymodule
class naked:
myfunc = mymodule.myfunc
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = mymodule.myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = naked.myfunc(a) # This is not
"""
tags = {
"allow_dask_compute": allow_dask_compute,
"jax_jit": jax_jit,
"static_argnums": static_argnums,
"static_argnames": static_argnames,
}
try:
func._lazy_xp_function = tags # type: ignore[attr-defined] # pylint: disable=protected-access # pyright: ignore[reportFunctionMemberAccess]
except AttributeError: # @cython.vectorize
_ufuncs_tags[func] = tags
|
Tag a function to be tested on lazy backends.
Tag a function so that when any tests are executed with ``xp=jax.numpy`` the
function is replaced with a jitted version of itself, and when it is executed with
``xp=dask.array`` the function will raise if it attempts to materialize the graph.
This will be later expanded to provide test coverage for other lazy backends.
In order for the tag to be effective, the test or a fixture must call
:func:`patch_lazy_xp_functions`.
Parameters
----------
func : callable
Function to be tested.
allow_dask_compute : int, optional
Number of times `func` is allowed to internally materialize the Dask graph. This
is typically triggered by ``bool()``, ``float()``, or ``np.asarray()``.
Set to 1 if you are aware that `func` converts the input parameters to NumPy and
want to let it do so at least for the time being, knowing that it is going to be
extremely detrimental for performance.
If a test needs values higher than 1 to pass, it is a canary that the conversion
to NumPy/bool/float is happening multiple times, which translates to multiple
computations of the whole graph. Short of making the function fully lazy, you
should at least add explicit calls to ``np.asarray()`` early in the function.
*Note:* the counter of `allow_dask_compute` resets after each call to `func`, so
a test function that invokes `func` multiple times should still work with this
parameter set to 1.
Default: 0, meaning that `func` must be fully lazy and never materialize the
graph.
jax_jit : bool, optional
Set to True to replace `func` with ``jax.jit(func)`` after calling the
:func:`patch_lazy_xp_functions` test helper with ``xp=jax.numpy``. Set to False
if `func` is only compatible with eager (non-jitted) JAX. Default: True.
static_argnums : int | Sequence[int], optional
Passed to jax.jit. Positional arguments to treat as static (compile-time
constant). Default: infer from `static_argnames` using
`inspect.signature(func)`.
static_argnames : str | Iterable[str], optional
Passed to jax.jit. Named arguments to treat as static (compile-time constant).
Default: infer from `static_argnums` using `inspect.signature(func)`.
See Also
--------
patch_lazy_xp_functions : Companion function to call from the test or fixture.
jax.jit : JAX function to compile a function for performance.
Examples
--------
In ``test_mymodule.py``::
from array_api_extra.testing import lazy_xp_function from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
# When xp=jax.numpy, this is the same as `b = jax.jit(myfunc)(a)`
# When xp=dask.array, crash on compute() or persist()
b = myfunc(a)
Notes
-----
In order for this tag to be effective, the test function must be imported into the
test module globals without its namespace; alternatively its namespace must be
declared in a ``lazy_xp_modules`` list in the test module globals.
Example 1::
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
x = myfunc(xp.asarray([1, 2]))
Example 2::
import mymodule
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
x = mymodule.myfunc(xp.asarray([1, 2]))
A test function can circumvent this monkey-patching system by using a namespace
outside of the two above patterns. You need to sanitize your code to make sure this
only happens intentionally.
Example 1::
import mymodule
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = mymodule.myfunc(a) # This is not
Example 2::
import mymodule
class naked:
myfunc = mymodule.myfunc
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = mymodule.myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = naked.myfunc(a) # This is not
|
lazy_xp_function
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/testing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/testing.py
|
BSD-3-Clause
|
def patch_lazy_xp_functions(
request: pytest.FixtureRequest, monkeypatch: pytest.MonkeyPatch, *, xp: ModuleType
) -> None:
"""
Test lazy execution of functions tagged with :func:`lazy_xp_function`.
If ``xp==jax.numpy``, search for all functions which have been tagged with
:func:`lazy_xp_function` in the globals of the module that defines the current test,
as well as in the ``lazy_xp_modules`` list in the globals of the same module,
and wrap them with :func:`jax.jit`. Unwrap them at the end of the test.
If ``xp==dask.array``, wrap the functions with a decorator that disables
``compute()`` and ``persist()`` and ensures that exceptions and warnings are raised
eagerly.
This function should be typically called by your library's `xp` fixture that runs
tests on multiple backends::
@pytest.fixture(params=[numpy, array_api_strict, jax.numpy, dask.array])
def xp(request, monkeypatch):
patch_lazy_xp_functions(request, monkeypatch, xp=request.param)
return request.param
but it can be otherwise be called by the test itself too.
Parameters
----------
request : pytest.FixtureRequest
Pytest fixture, as acquired by the test itself or by one of its fixtures.
monkeypatch : pytest.MonkeyPatch
Pytest fixture, as acquired by the test itself or by one of its fixtures.
xp : array_namespace
Array namespace to be tested.
See Also
--------
lazy_xp_function : Tag a function to be tested on lazy backends.
pytest.FixtureRequest : `request` test function parameter.
"""
mod = cast(ModuleType, request.module)
mods = [mod, *cast(list[ModuleType], getattr(mod, "lazy_xp_modules", []))]
def iter_tagged() -> ( # type: ignore[explicit-any]
Iterator[tuple[ModuleType, str, Callable[..., Any], dict[str, Any]]]
):
for mod in mods:
for name, func in mod.__dict__.items():
tags: dict[str, Any] | None = None # type: ignore[explicit-any]
with contextlib.suppress(AttributeError):
tags = func._lazy_xp_function # pylint: disable=protected-access
if tags is None:
with contextlib.suppress(KeyError, TypeError):
tags = _ufuncs_tags[func]
if tags is not None:
yield mod, name, func, tags
if is_dask_namespace(xp):
for mod, name, func, tags in iter_tagged():
n = tags["allow_dask_compute"]
wrapped = _dask_wrap(func, n)
monkeypatch.setattr(mod, name, wrapped)
elif is_jax_namespace(xp):
import jax
for mod, name, func, tags in iter_tagged():
if tags["jax_jit"]:
# suppress unused-ignore to run mypy in -e lint as well as -e dev
wrapped = cast( # type: ignore[explicit-any]
Callable[..., Any],
jax.jit(
func,
static_argnums=tags["static_argnums"],
static_argnames=tags["static_argnames"],
),
)
monkeypatch.setattr(mod, name, wrapped)
|
Test lazy execution of functions tagged with :func:`lazy_xp_function`.
If ``xp==jax.numpy``, search for all functions which have been tagged with
:func:`lazy_xp_function` in the globals of the module that defines the current test,
as well as in the ``lazy_xp_modules`` list in the globals of the same module,
and wrap them with :func:`jax.jit`. Unwrap them at the end of the test.
If ``xp==dask.array``, wrap the functions with a decorator that disables
``compute()`` and ``persist()`` and ensures that exceptions and warnings are raised
eagerly.
This function should be typically called by your library's `xp` fixture that runs
tests on multiple backends::
@pytest.fixture(params=[numpy, array_api_strict, jax.numpy, dask.array])
def xp(request, monkeypatch):
patch_lazy_xp_functions(request, monkeypatch, xp=request.param)
return request.param
but it can be otherwise be called by the test itself too.
Parameters
----------
request : pytest.FixtureRequest
Pytest fixture, as acquired by the test itself or by one of its fixtures.
monkeypatch : pytest.MonkeyPatch
Pytest fixture, as acquired by the test itself or by one of its fixtures.
xp : array_namespace
Array namespace to be tested.
See Also
--------
lazy_xp_function : Tag a function to be tested on lazy backends.
pytest.FixtureRequest : `request` test function parameter.
|
patch_lazy_xp_functions
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/testing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/testing.py
|
BSD-3-Clause
|
def _dask_wrap(
func: Callable[P, T], n: int
) -> Callable[P, T]: # numpydoc ignore=PR01,RT01
"""
Wrap `func` to raise if it attempts to call `dask.compute` more than `n` times.
After the function returns, materialize the graph in order to re-raise exceptions.
"""
import dask
func_name = getattr(func, "__name__", str(func))
n_str = f"only up to {n}" if n else "no"
msg = (
f"Called `dask.compute()` or `dask.persist()` {n + 1} times, "
f"but {n_str} calls are allowed. Set "
f"`lazy_xp_function({func_name}, allow_dask_compute={n + 1})` "
"to allow for more (but note that this will harm performance). "
)
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: # numpydoc ignore=GL08
scheduler = CountingDaskScheduler(n, msg)
with dask.config.set({"scheduler": scheduler}): # pyright: ignore[reportPrivateImportUsage]
out = func(*args, **kwargs)
# Block until the graph materializes and reraise exceptions. This allows
# `pytest.raises` and `pytest.warns` to work as expected. Note that this would
# not work on scheduler='distributed', as it would not block.
return dask.persist(out, scheduler="threads")[0] # type: ignore[attr-defined,no-untyped-call,func-returns-value,index] # pyright: ignore[reportPrivateImportUsage]
return wrapper
|
Wrap `func` to raise if it attempts to call `dask.compute` more than `n` times.
After the function returns, materialize the graph in order to re-raise exceptions.
|
_dask_wrap
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/testing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/testing.py
|
BSD-3-Clause
|
def isclose(
a: Array | complex,
b: Array | complex,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
xp: ModuleType | None = None,
) -> Array:
"""
Return a boolean array where two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The relative
difference ``(rtol * abs(b))`` and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
NaNs are treated as equal if they are in the same place and if ``equal_nan=True``.
Infs are treated as equal if they are in the same place and of the same sign in both
arrays.
Parameters
----------
a, b : Array | int | float | complex | bool
Input objects to compare. At least one must be an array.
rtol : array_like, optional
The relative tolerance parameter (see Notes).
atol : array_like, optional
The absolute tolerance parameter (see Notes).
equal_nan : bool, optional
Whether to compare NaN's as equal. If True, NaN's in `a` will be considered
equal to NaN's in `b` in the output array.
xp : array_namespace, optional
The standard-compatible namespace for `a` and `b`. Default: infer.
Returns
-------
Array
A boolean array of shape broadcasted from `a` and `b`, containing ``True`` where
`a` is close to `b`, and ``False`` otherwise.
Warnings
--------
The default `atol` is not appropriate for comparing numbers with magnitudes much
smaller than one (see notes).
See Also
--------
math.isclose : Similar function in stdlib for Python scalars.
Notes
-----
For finite values, `isclose` uses the following equation to test whether two
floating point values are equivalent::
absolute(a - b) <= (atol + rtol * absolute(b))
Unlike the built-in `math.isclose`,
the above equation is not symmetric in `a` and `b`,
so that ``isclose(a, b)`` might be different from ``isclose(b, a)`` in some rare
cases.
The default value of `atol` is not appropriate when the reference value `b` has
magnitude smaller than one. For example, it is unlikely that ``a = 1e-9`` and
``b = 2e-9`` should be considered "close", yet ``isclose(1e-9, 2e-9)`` is ``True``
with default settings. Be sure to select `atol` for the use case at hand, especially
for defining the threshold below which a non-zero value in `a` will be considered
"close" to a very small or zero value in `b`.
The comparison of `a` and `b` uses standard broadcasting, which means that `a` and
`b` need not have the same shape in order for ``isclose(a, b)`` to evaluate to
``True``.
`isclose` is not defined for non-numeric data types.
``bool`` is considered a numeric data-type for this purpose.
"""
xp = array_namespace(a, b) if xp is None else xp
if _delegate(xp, Backend.NUMPY, Backend.CUPY, Backend.DASK, Backend.JAX):
return xp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
if _delegate(xp, Backend.TORCH):
a, b = asarrays(a, b, xp=xp) # Array API 2024.12 support
return xp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
return _funcs.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, xp=xp)
|
Return a boolean array where two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The relative
difference ``(rtol * abs(b))`` and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
NaNs are treated as equal if they are in the same place and if ``equal_nan=True``.
Infs are treated as equal if they are in the same place and of the same sign in both
arrays.
Parameters
----------
a, b : Array | int | float | complex | bool
Input objects to compare. At least one must be an array.
rtol : array_like, optional
The relative tolerance parameter (see Notes).
atol : array_like, optional
The absolute tolerance parameter (see Notes).
equal_nan : bool, optional
Whether to compare NaN's as equal. If True, NaN's in `a` will be considered
equal to NaN's in `b` in the output array.
xp : array_namespace, optional
The standard-compatible namespace for `a` and `b`. Default: infer.
Returns
-------
Array
A boolean array of shape broadcasted from `a` and `b`, containing ``True`` where
`a` is close to `b`, and ``False`` otherwise.
Warnings
--------
The default `atol` is not appropriate for comparing numbers with magnitudes much
smaller than one (see notes).
See Also
--------
math.isclose : Similar function in stdlib for Python scalars.
Notes
-----
For finite values, `isclose` uses the following equation to test whether two
floating point values are equivalent::
absolute(a - b) <= (atol + rtol * absolute(b))
Unlike the built-in `math.isclose`,
the above equation is not symmetric in `a` and `b`,
so that ``isclose(a, b)`` might be different from ``isclose(b, a)`` in some rare
cases.
The default value of `atol` is not appropriate when the reference value `b` has
magnitude smaller than one. For example, it is unlikely that ``a = 1e-9`` and
``b = 2e-9`` should be considered "close", yet ``isclose(1e-9, 2e-9)`` is ``True``
with default settings. Be sure to select `atol` for the use case at hand, especially
for defining the threshold below which a non-zero value in `a` will be considered
"close" to a very small or zero value in `b`.
The comparison of `a` and `b` uses standard broadcasting, which means that `a` and
`b` need not have the same shape in order for ``isclose(a, b)`` to evaluate to
``True``.
`isclose` is not defined for non-numeric data types.
``bool`` is considered a numeric data-type for this purpose.
|
isclose
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_delegation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_delegation.py
|
BSD-3-Clause
|
def pad(
x: Array,
pad_width: int | tuple[int, int] | Sequence[tuple[int, int]],
mode: Literal["constant"] = "constant",
*,
constant_values: complex = 0,
xp: ModuleType | None = None,
) -> Array:
"""
Pad the input array.
Parameters
----------
x : array
Input array.
pad_width : int or tuple of ints or sequence of pairs of ints
Pad the input array with this many elements from each side.
If a sequence of tuples, ``[(before_0, after_0), ... (before_N, after_N)]``,
each pair applies to the corresponding axis of ``x``.
A single tuple, ``(before, after)``, is equivalent to a list of ``x.ndim``
copies of this tuple.
mode : str, optional
Only "constant" mode is currently supported, which pads with
the value passed to `constant_values`.
constant_values : python scalar, optional
Use this value to pad the input. Default is zero.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
The input array,
padded with ``pad_width`` elements equal to ``constant_values``.
"""
xp = array_namespace(x) if xp is None else xp
if mode != "constant":
msg = "Only `'constant'` mode is currently supported"
raise NotImplementedError(msg)
# https://github.com/pytorch/pytorch/blob/cf76c05b4dc629ac989d1fb8e789d4fac04a095a/torch/_numpy/_funcs_impl.py#L2045-L2056
if _delegate(xp, Backend.TORCH):
pad_width = xp.asarray(pad_width)
pad_width = xp.broadcast_to(pad_width, (x.ndim, 2))
pad_width = xp.flip(pad_width, axis=(0,)).flatten()
return xp.nn.functional.pad(x, tuple(pad_width), value=constant_values) # type: ignore[arg-type] # pyright: ignore[reportArgumentType]
if _delegate(xp, Backend.NUMPY, Backend.JAX, Backend.CUPY, Backend.SPARSE):
return xp.pad(x, pad_width, mode, constant_values=constant_values)
return _funcs.pad(x, pad_width, constant_values=constant_values, xp=xp)
|
Pad the input array.
Parameters
----------
x : array
Input array.
pad_width : int or tuple of ints or sequence of pairs of ints
Pad the input array with this many elements from each side.
If a sequence of tuples, ``[(before_0, after_0), ... (before_N, after_N)]``,
each pair applies to the corresponding axis of ``x``.
A single tuple, ``(before, after)``, is equivalent to a list of ``x.ndim``
copies of this tuple.
mode : str, optional
Only "constant" mode is currently supported, which pads with
the value passed to `constant_values`.
constant_values : python scalar, optional
Use this value to pad the input. Default is zero.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
The input array,
padded with ``pad_width`` elements equal to ``constant_values``.
|
pad
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_delegation.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_delegation.py
|
BSD-3-Clause
|
def __getitem__(self, idx: SetIndex, /) -> Self: # numpydoc ignore=PR01,RT01
"""
Allow for the alternate syntax ``at(x)[start:stop:step]``.
It looks prettier than ``at(x, slice(start, stop, step))``
and feels more intuitive coming from the JAX documentation.
"""
if self._idx is not _undef:
msg = "Index has already been set"
raise ValueError(msg)
return type(self)(self._x, idx)
|
Allow for the alternate syntax ``at(x)[start:stop:step]``.
It looks prettier than ``at(x, slice(start, stop, step))``
and feels more intuitive coming from the JAX documentation.
|
__getitem__
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def _op(
self,
at_op: _AtOp,
in_place_op: Callable[[Array, Array | complex], Array] | None,
out_of_place_op: Callable[[Array, Array], Array] | None,
y: Array | complex,
/,
copy: bool | None,
xp: ModuleType | None,
) -> Array:
"""
Implement all update operations.
Parameters
----------
at_op : _AtOp
Method of JAX's Array.at[].
in_place_op : Callable[[Array, Array | complex], Array] | None
In-place operation to apply on mutable backends::
x[idx] = in_place_op(x[idx], y)
If None::
x[idx] = y
out_of_place_op : Callable[[Array, Array], Array] | None
Out-of-place operation to apply when idx is a boolean mask and the backend
doesn't support in-place updates::
x = xp.where(idx, out_of_place_op(x, y), x)
If None::
x = xp.where(idx, y, x)
y : array or complex
Right-hand side of the operation.
copy : bool or None
Whether to copy the input array. See the class docstring for details.
xp : array_namespace, optional
The array namespace for the input array. Default: infer.
Returns
-------
Array
Updated `x`.
"""
from ._funcs import apply_where # pylint: disable=cyclic-import
x, idx = self._x, self._idx
xp = array_namespace(x, y) if xp is None else xp
if isinstance(idx, Undef):
msg = (
"Index has not been set.\n"
"Usage: either\n"
" at(x, idx).set(value)\n"
"or\n"
" at(x)[idx].set(value)\n"
"(same for all other methods)."
)
raise ValueError(msg)
if copy not in (True, False, None):
msg = f"copy must be True, False, or None; got {copy!r}"
raise ValueError(msg)
writeable = None if copy else is_writeable_array(x)
# JAX inside jax.jit doesn't support in-place updates with boolean
# masks; Dask exclusively supports __setitem__ but not iops.
# We can handle the common special case of 0-dimensional y
# with where(idx, y, x) instead.
if (
(is_dask_array(idx) or is_jax_array(idx))
and idx.dtype == xp.bool
and idx.shape == x.shape
):
y_xp = xp.asarray(y, dtype=x.dtype)
if y_xp.ndim == 0:
if out_of_place_op: # add(), subtract(), ...
# suppress inf warnings on Dask
out = apply_where(
idx, (x, y_xp), out_of_place_op, fill_value=x, xp=xp
)
# Undo int->float promotion on JAX after _AtOp.DIVIDE
out = xp.astype(out, x.dtype, copy=False)
else: # set()
out = xp.where(idx, y_xp, x)
if copy is False:
x[()] = out
return x
return out
# else: this will work on eager JAX and crash on jax.jit and Dask
if copy or (copy is None and not writeable):
if is_jax_array(x):
# Use JAX's at[]
func = cast(
Callable[[Array | complex], Array],
getattr(x.at[idx], at_op.value), # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue,reportUnknownArgumentType]
)
out = func(y)
# Undo int->float promotion on JAX after _AtOp.DIVIDE
return xp.astype(out, x.dtype, copy=False)
# Emulate at[] behaviour for non-JAX arrays
# with a copy followed by an update
x = xp.asarray(x, copy=True)
# A copy of a read-only numpy array is writeable
# Note: this assumes that a copy of a writeable array is writeable
assert not writeable
writeable = None
if writeable is None:
writeable = is_writeable_array(x)
if not writeable:
# sparse crashes here
msg = f"Can't update read-only array {x}"
raise ValueError(msg)
if in_place_op: # add(), subtract(), ...
x[idx] = in_place_op(x[idx], y)
else: # set()
x[idx] = y
return x
|
Implement all update operations.
Parameters
----------
at_op : _AtOp
Method of JAX's Array.at[].
in_place_op : Callable[[Array, Array | complex], Array] | None
In-place operation to apply on mutable backends::
x[idx] = in_place_op(x[idx], y)
If None::
x[idx] = y
out_of_place_op : Callable[[Array, Array], Array] | None
Out-of-place operation to apply when idx is a boolean mask and the backend
doesn't support in-place updates::
x = xp.where(idx, out_of_place_op(x, y), x)
If None::
x = xp.where(idx, y, x)
y : array or complex
Right-hand side of the operation.
copy : bool or None
Whether to copy the input array. See the class docstring for details.
xp : array_namespace, optional
The array namespace for the input array. Default: infer.
Returns
-------
Array
Updated `x`.
|
_op
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def set(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] = y`` and return the update array."""
return self._op(_AtOp.SET, None, None, y, copy=copy, xp=xp)
|
Apply ``x[idx] = y`` and return the update array.
|
set
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def add(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] += y`` and return the updated array."""
# Note for this and all other methods based on _iop:
# operator.iadd and operator.add subtly differ in behaviour, as
# only iadd will trigger exceptions when y has an incompatible dtype.
return self._op(_AtOp.ADD, operator.iadd, operator.add, y, copy=copy, xp=xp)
|
Apply ``x[idx] += y`` and return the updated array.
|
add
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def subtract(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] -= y`` and return the updated array."""
return self._op(
_AtOp.SUBTRACT, operator.isub, operator.sub, y, copy=copy, xp=xp
)
|
Apply ``x[idx] -= y`` and return the updated array.
|
subtract
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def multiply(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] *= y`` and return the updated array."""
return self._op(
_AtOp.MULTIPLY, operator.imul, operator.mul, y, copy=copy, xp=xp
)
|
Apply ``x[idx] *= y`` and return the updated array.
|
multiply
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def divide(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] /= y`` and return the updated array."""
return self._op(
_AtOp.DIVIDE, operator.itruediv, operator.truediv, y, copy=copy, xp=xp
)
|
Apply ``x[idx] /= y`` and return the updated array.
|
divide
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def power(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] **= y`` and return the updated array."""
return self._op(_AtOp.POWER, operator.ipow, operator.pow, y, copy=copy, xp=xp)
|
Apply ``x[idx] **= y`` and return the updated array.
|
power
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def min(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] = minimum(x[idx], y)`` and return the updated array."""
# On Dask, this function runs on the chunks, so we need to determine the
# namespace that Dask is wrapping.
# Note that da.minimum _incidentally_ works on NumPy, CuPy, and sparse
# thanks to all these meta-namespaces implementing the __array_ufunc__
# interface, but there's no guarantee that it will work for other
# wrapped libraries in the future.
xp = array_namespace(self._x) if xp is None else xp
mxp = meta_namespace(self._x, xp=xp)
y = xp.asarray(y)
return self._op(_AtOp.MIN, mxp.minimum, mxp.minimum, y, copy=copy, xp=xp)
|
Apply ``x[idx] = minimum(x[idx], y)`` and return the updated array.
|
min
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def max(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] = maximum(x[idx], y)`` and return the updated array."""
# See note on min()
xp = array_namespace(self._x) if xp is None else xp
mxp = meta_namespace(self._x, xp=xp)
y = xp.asarray(y)
return self._op(_AtOp.MAX, mxp.maximum, mxp.maximum, y, copy=copy, xp=xp)
|
Apply ``x[idx] = maximum(x[idx], y)`` and return the updated array.
|
max
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_at.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_at.py
|
BSD-3-Clause
|
def apply_where( # type: ignore[explicit-any] # numpydoc ignore=PR01,PR02
cond: Array,
args: Array | tuple[Array, ...],
f1: Callable[..., Array],
f2: Callable[..., Array] | None = None,
/,
*,
fill_value: Array | complex | None = None,
xp: ModuleType | None = None,
) -> Array:
"""
Run one of two elementwise functions depending on a condition.
Equivalent to ``f1(*args) if cond else fill_value`` performed elementwise
when `fill_value` is defined, otherwise to ``f1(*args) if cond else f2(*args)``.
Parameters
----------
cond : array
The condition, expressed as a boolean array.
args : Array or tuple of Arrays
Argument(s) to `f1` (and `f2`). Must be broadcastable with `cond`.
f1 : callable
Elementwise function of `args`, returning a single array.
Where `cond` is True, output will be ``f1(arg0[cond], arg1[cond], ...)``.
f2 : callable, optional
Elementwise function of `args`, returning a single array.
Where `cond` is False, output will be ``f2(arg0[cond], arg1[cond], ...)``.
Mutually exclusive with `fill_value`.
fill_value : Array or scalar, optional
If provided, value with which to fill output array where `cond` is False.
It does not need to be scalar; it needs however to be broadcastable with
`cond` and `args`.
Mutually exclusive with `f2`. You must provide one or the other.
xp : array_namespace, optional
The standard-compatible namespace for `cond` and `args`. Default: infer.
Returns
-------
Array
An array with elements from the output of `f1` where `cond` is True and either
the output of `f2` or `fill_value` where `cond` is False. The returned array has
data type determined by type promotion rules between the output of `f1` and
either `fill_value` or the output of `f2`.
Notes
-----
``xp.where(cond, f1(*args), f2(*args))`` requires explicitly evaluating `f1` even
when `cond` is False, and `f2` when cond is True. This function evaluates each
function only for their matching condition, if the backend allows for it.
On Dask, `f1` and `f2` are applied to the individual chunks and should use functions
from the namespace of the chunks.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> a = xp.asarray([5, 4, 3])
>>> b = xp.asarray([0, 2, 2])
>>> def f(a, b):
... return a // b
>>> xpx.apply_where(b != 0, (a, b), f, fill_value=xp.nan)
array([ nan, 2., 1.])
"""
# Parse and normalize arguments
if (f2 is None) == (fill_value is None):
msg = "Exactly one of `fill_value` or `f2` must be given."
raise TypeError(msg)
args_ = list(args) if isinstance(args, tuple) else [args]
del args
xp = array_namespace(cond, fill_value, *args_) if xp is None else xp
if isinstance(fill_value, int | float | complex | NoneType):
cond, *args_ = xp.broadcast_arrays(cond, *args_)
else:
cond, fill_value, *args_ = xp.broadcast_arrays(cond, fill_value, *args_)
if is_dask_namespace(xp):
meta_xp = meta_namespace(cond, fill_value, *args_, xp=xp)
# map_blocks doesn't descend into tuples of Arrays
return xp.map_blocks(_apply_where, cond, f1, f2, fill_value, *args_, xp=meta_xp)
return _apply_where(cond, f1, f2, fill_value, *args_, xp=xp)
|
Run one of two elementwise functions depending on a condition.
Equivalent to ``f1(*args) if cond else fill_value`` performed elementwise
when `fill_value` is defined, otherwise to ``f1(*args) if cond else f2(*args)``.
Parameters
----------
cond : array
The condition, expressed as a boolean array.
args : Array or tuple of Arrays
Argument(s) to `f1` (and `f2`). Must be broadcastable with `cond`.
f1 : callable
Elementwise function of `args`, returning a single array.
Where `cond` is True, output will be ``f1(arg0[cond], arg1[cond], ...)``.
f2 : callable, optional
Elementwise function of `args`, returning a single array.
Where `cond` is False, output will be ``f2(arg0[cond], arg1[cond], ...)``.
Mutually exclusive with `fill_value`.
fill_value : Array or scalar, optional
If provided, value with which to fill output array where `cond` is False.
It does not need to be scalar; it needs however to be broadcastable with
`cond` and `args`.
Mutually exclusive with `f2`. You must provide one or the other.
xp : array_namespace, optional
The standard-compatible namespace for `cond` and `args`. Default: infer.
Returns
-------
Array
An array with elements from the output of `f1` where `cond` is True and either
the output of `f2` or `fill_value` where `cond` is False. The returned array has
data type determined by type promotion rules between the output of `f1` and
either `fill_value` or the output of `f2`.
Notes
-----
``xp.where(cond, f1(*args), f2(*args))`` requires explicitly evaluating `f1` even
when `cond` is False, and `f2` when cond is True. This function evaluates each
function only for their matching condition, if the backend allows for it.
On Dask, `f1` and `f2` are applied to the individual chunks and should use functions
from the namespace of the chunks.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> a = xp.asarray([5, 4, 3])
>>> b = xp.asarray([0, 2, 2])
>>> def f(a, b):
... return a // b
>>> xpx.apply_where(b != 0, (a, b), f, fill_value=xp.nan)
array([ nan, 2., 1.])
|
apply_where
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def _apply_where( # type: ignore[explicit-any] # numpydoc ignore=PR01,RT01
cond: Array,
f1: Callable[..., Array],
f2: Callable[..., Array] | None,
fill_value: Array | int | float | complex | bool | None,
*args: Array,
xp: ModuleType,
) -> Array:
"""Helper of `apply_where`. On Dask, this runs on a single chunk."""
if is_jax_namespace(xp):
# jax.jit does not support assignment by boolean mask
return xp.where(cond, f1(*args), f2(*args) if f2 is not None else fill_value)
temp1 = f1(*(arr[cond] for arr in args))
if f2 is None:
dtype = xp.result_type(temp1, fill_value)
if isinstance(fill_value, int | float | complex):
out = xp.full_like(cond, dtype=dtype, fill_value=fill_value)
else:
out = xp.astype(fill_value, dtype, copy=True)
else:
ncond = ~cond
temp2 = f2(*(arr[ncond] for arr in args))
dtype = xp.result_type(temp1, temp2)
out = xp.empty_like(cond, dtype=dtype)
out = at(out, ncond).set(temp2)
return at(out, cond).set(temp1)
|
Helper of `apply_where`. On Dask, this runs on a single chunk.
|
_apply_where
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def atleast_nd(x: Array, /, *, ndim: int, xp: ModuleType | None = None) -> Array:
"""
Recursively expand the dimension of an array to at least `ndim`.
Parameters
----------
x : array
Input array.
ndim : int
The minimum number of dimensions for the result.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array with ``res.ndim`` >= `ndim`.
If ``x.ndim`` >= `ndim`, `x` is returned.
If ``x.ndim`` < `ndim`, `x` is expanded by prepending new axes
until ``res.ndim`` equals `ndim`.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([1])
>>> xpx.atleast_nd(x, ndim=3, xp=xp)
Array([[[1]]], dtype=array_api_strict.int64)
>>> x = xp.asarray([[[1, 2],
... [3, 4]]])
>>> xpx.atleast_nd(x, ndim=1, xp=xp) is x
True
"""
if xp is None:
xp = array_namespace(x)
if x.ndim < ndim:
x = xp.expand_dims(x, axis=0)
x = atleast_nd(x, ndim=ndim, xp=xp)
return x
|
Recursively expand the dimension of an array to at least `ndim`.
Parameters
----------
x : array
Input array.
ndim : int
The minimum number of dimensions for the result.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array with ``res.ndim`` >= `ndim`.
If ``x.ndim`` >= `ndim`, `x` is returned.
If ``x.ndim`` < `ndim`, `x` is expanded by prepending new axes
until ``res.ndim`` equals `ndim`.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([1])
>>> xpx.atleast_nd(x, ndim=3, xp=xp)
Array([[[1]]], dtype=array_api_strict.int64)
>>> x = xp.asarray([[[1, 2],
... [3, 4]]])
>>> xpx.atleast_nd(x, ndim=1, xp=xp) is x
True
|
atleast_nd
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def broadcast_shapes(*shapes: tuple[float | None, ...]) -> tuple[int | None, ...]:
"""
Compute the shape of the broadcasted arrays.
Duplicates :func:`numpy.broadcast_shapes`, with additional support for
None and NaN sizes.
This is equivalent to ``xp.broadcast_arrays(arr1, arr2, ...)[0].shape``
without needing to worry about the backend potentially deep copying
the arrays.
Parameters
----------
*shapes : tuple[int | None, ...]
Shapes of the arrays to broadcast.
Returns
-------
tuple[int | None, ...]
The shape of the broadcasted arrays.
See Also
--------
numpy.broadcast_shapes : Equivalent NumPy function.
array_api.broadcast_arrays : Function to broadcast actual arrays.
Notes
-----
This function accepts the Array API's ``None`` for unknown sizes,
as well as Dask's non-standard ``math.nan``.
Regardless of input, the output always contains ``None`` for unknown sizes.
Examples
--------
>>> import array_api_extra as xpx
>>> xpx.broadcast_shapes((2, 3), (2, 1))
(2, 3)
>>> xpx.broadcast_shapes((4, 2, 3), (2, 1), (1, 3))
(4, 2, 3)
"""
if not shapes:
return () # Match NumPy output
ndim = max(len(shape) for shape in shapes)
out: list[int | None] = []
for axis in range(-ndim, 0):
sizes = {shape[axis] for shape in shapes if axis >= -len(shape)}
# Dask uses NaN for unknown shape, which predates the Array API spec for None
none_size = None in sizes or math.nan in sizes
sizes -= {1, None, math.nan}
if len(sizes) > 1:
msg = (
"shape mismatch: objects cannot be broadcast to a single shape: "
f"{shapes}."
)
raise ValueError(msg)
out.append(None if none_size else cast(int, sizes.pop()) if sizes else 1)
return tuple(out)
|
Compute the shape of the broadcasted arrays.
Duplicates :func:`numpy.broadcast_shapes`, with additional support for
None and NaN sizes.
This is equivalent to ``xp.broadcast_arrays(arr1, arr2, ...)[0].shape``
without needing to worry about the backend potentially deep copying
the arrays.
Parameters
----------
*shapes : tuple[int | None, ...]
Shapes of the arrays to broadcast.
Returns
-------
tuple[int | None, ...]
The shape of the broadcasted arrays.
See Also
--------
numpy.broadcast_shapes : Equivalent NumPy function.
array_api.broadcast_arrays : Function to broadcast actual arrays.
Notes
-----
This function accepts the Array API's ``None`` for unknown sizes,
as well as Dask's non-standard ``math.nan``.
Regardless of input, the output always contains ``None`` for unknown sizes.
Examples
--------
>>> import array_api_extra as xpx
>>> xpx.broadcast_shapes((2, 3), (2, 1))
(2, 3)
>>> xpx.broadcast_shapes((4, 2, 3), (2, 1), (1, 3))
(4, 2, 3)
|
broadcast_shapes
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def cov(m: Array, /, *, xp: ModuleType | None = None) -> Array:
"""
Estimate a covariance matrix.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
This provides a subset of the functionality of ``numpy.cov``.
Parameters
----------
m : array
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
xp : array_namespace, optional
The standard-compatible namespace for `m`. Default: infer.
Returns
-------
array
The covariance matrix of the variables.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = xp.asarray([[0, 2], [1, 1], [2, 0]]).T
>>> x
Array([[0, 1, 2],
[2, 1, 0]], dtype=array_api_strict.int64)
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> xpx.cov(x, xp=xp)
Array([[ 1., -1.],
[-1., 1.]], dtype=array_api_strict.float64)
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = xp.asarray([-2.1, -1, 4.3])
>>> y = xp.asarray([3, 1.1, 0.12])
>>> X = xp.stack((x, y), axis=0)
>>> xpx.cov(X, xp=xp)
Array([[11.71 , -4.286 ],
[-4.286 , 2.14413333]], dtype=array_api_strict.float64)
>>> xpx.cov(x, xp=xp)
Array(11.71, dtype=array_api_strict.float64)
>>> xpx.cov(y, xp=xp)
Array(2.14413333, dtype=array_api_strict.float64)
"""
if xp is None:
xp = array_namespace(m)
m = xp.asarray(m, copy=True)
dtype = (
xp.float64 if xp.isdtype(m.dtype, "integral") else xp.result_type(m, xp.float64)
)
m = atleast_nd(m, ndim=2, xp=xp)
m = xp.astype(m, dtype)
avg = _helpers.mean(m, axis=1, xp=xp)
m_shape = eager_shape(m)
fact = m_shape[1] - 1
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2)
fact = 0
m -= avg[:, None]
m_transpose = m.T
if xp.isdtype(m_transpose.dtype, "complex floating"):
m_transpose = xp.conj(m_transpose)
c = m @ m_transpose
c /= fact
axes = tuple(axis for axis, length in enumerate(c.shape) if length == 1)
return xp.squeeze(c, axis=axes)
|
Estimate a covariance matrix.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
This provides a subset of the functionality of ``numpy.cov``.
Parameters
----------
m : array
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
xp : array_namespace, optional
The standard-compatible namespace for `m`. Default: infer.
Returns
-------
array
The covariance matrix of the variables.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = xp.asarray([[0, 2], [1, 1], [2, 0]]).T
>>> x
Array([[0, 1, 2],
[2, 1, 0]], dtype=array_api_strict.int64)
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> xpx.cov(x, xp=xp)
Array([[ 1., -1.],
[-1., 1.]], dtype=array_api_strict.float64)
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = xp.asarray([-2.1, -1, 4.3])
>>> y = xp.asarray([3, 1.1, 0.12])
>>> X = xp.stack((x, y), axis=0)
>>> xpx.cov(X, xp=xp)
Array([[11.71 , -4.286 ],
[-4.286 , 2.14413333]], dtype=array_api_strict.float64)
>>> xpx.cov(x, xp=xp)
Array(11.71, dtype=array_api_strict.float64)
>>> xpx.cov(y, xp=xp)
Array(2.14413333, dtype=array_api_strict.float64)
|
cov
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def create_diagonal(
x: Array, /, *, offset: int = 0, xp: ModuleType | None = None
) -> Array:
"""
Construct a diagonal array.
Parameters
----------
x : array
An array having shape ``(*batch_dims, k)``.
offset : int, optional
Offset from the leading diagonal (default is ``0``).
Use positive ints for diagonals above the leading diagonal,
and negative ints for diagonals below the leading diagonal.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having shape ``(*batch_dims, k+abs(offset), k+abs(offset))`` with `x`
on the diagonal (offset by `offset`).
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([2, 4, 8])
>>> xpx.create_diagonal(x, xp=xp)
Array([[2, 0, 0],
[0, 4, 0],
[0, 0, 8]], dtype=array_api_strict.int64)
>>> xpx.create_diagonal(x, offset=-2, xp=xp)
Array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[2, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 8, 0, 0]], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(x)
if x.ndim == 0:
err_msg = "`x` must be at least 1-dimensional."
raise ValueError(err_msg)
x_shape = eager_shape(x)
batch_dims = x_shape[:-1]
n = x_shape[-1] + abs(offset)
diag = xp.zeros((*batch_dims, n**2), dtype=x.dtype, device=_compat.device(x))
target_slice = slice(
offset if offset >= 0 else abs(offset) * n,
min(n * (n - offset), diag.shape[-1]),
n + 1,
)
for index in ndindex(*batch_dims):
diag = at(diag)[(*index, target_slice)].set(x[(*index, slice(None))])
return xp.reshape(diag, (*batch_dims, n, n))
|
Construct a diagonal array.
Parameters
----------
x : array
An array having shape ``(*batch_dims, k)``.
offset : int, optional
Offset from the leading diagonal (default is ``0``).
Use positive ints for diagonals above the leading diagonal,
and negative ints for diagonals below the leading diagonal.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having shape ``(*batch_dims, k+abs(offset), k+abs(offset))`` with `x`
on the diagonal (offset by `offset`).
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([2, 4, 8])
>>> xpx.create_diagonal(x, xp=xp)
Array([[2, 0, 0],
[0, 4, 0],
[0, 0, 8]], dtype=array_api_strict.int64)
>>> xpx.create_diagonal(x, offset=-2, xp=xp)
Array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[2, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 8, 0, 0]], dtype=array_api_strict.int64)
|
create_diagonal
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def expand_dims(
a: Array, /, *, axis: int | tuple[int, ...] = (0,), xp: ModuleType | None = None
) -> Array:
"""
Expand the shape of an array.
Insert (a) new axis/axes that will appear at the position(s) specified by
`axis` in the expanded array shape.
This is ``xp.expand_dims`` for `axis` an int *or a tuple of ints*.
Roughly equivalent to ``numpy.expand_dims`` for NumPy arrays.
Parameters
----------
a : array
Array to have its shape expanded.
axis : int or tuple of ints, optional
Position(s) in the expanded axes where the new axis (or axes) is/are placed.
If multiple positions are provided, they should be unique (note that a position
given by a positive index could also be referred to by a negative index -
that will also result in an error).
Default: ``(0,)``.
xp : array_namespace, optional
The standard-compatible namespace for `a`. Default: infer.
Returns
-------
array
`a` with an expanded shape.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([1, 2])
>>> x.shape
(2,)
The following is equivalent to ``x[xp.newaxis, :]`` or ``x[xp.newaxis]``:
>>> y = xpx.expand_dims(x, axis=0, xp=xp)
>>> y
Array([[1, 2]], dtype=array_api_strict.int64)
>>> y.shape
(1, 2)
The following is equivalent to ``x[:, xp.newaxis]``:
>>> y = xpx.expand_dims(x, axis=1, xp=xp)
>>> y
Array([[1],
[2]], dtype=array_api_strict.int64)
>>> y.shape
(2, 1)
``axis`` may also be a tuple:
>>> y = xpx.expand_dims(x, axis=(0, 1), xp=xp)
>>> y
Array([[[1, 2]]], dtype=array_api_strict.int64)
>>> y = xpx.expand_dims(x, axis=(2, 0), xp=xp)
>>> y
Array([[[1],
[2]]], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(a)
if not isinstance(axis, tuple):
axis = (axis,)
ndim = a.ndim + len(axis)
if axis != () and (min(axis) < -ndim or max(axis) >= ndim):
err_msg = (
f"a provided axis position is out of bounds for array of dimension {a.ndim}"
)
raise IndexError(err_msg)
axis = tuple(dim % ndim for dim in axis)
if len(set(axis)) != len(axis):
err_msg = "Duplicate dimensions specified in `axis`."
raise ValueError(err_msg)
for i in sorted(axis):
a = xp.expand_dims(a, axis=i)
return a
|
Expand the shape of an array.
Insert (a) new axis/axes that will appear at the position(s) specified by
`axis` in the expanded array shape.
This is ``xp.expand_dims`` for `axis` an int *or a tuple of ints*.
Roughly equivalent to ``numpy.expand_dims`` for NumPy arrays.
Parameters
----------
a : array
Array to have its shape expanded.
axis : int or tuple of ints, optional
Position(s) in the expanded axes where the new axis (or axes) is/are placed.
If multiple positions are provided, they should be unique (note that a position
given by a positive index could also be referred to by a negative index -
that will also result in an error).
Default: ``(0,)``.
xp : array_namespace, optional
The standard-compatible namespace for `a`. Default: infer.
Returns
-------
array
`a` with an expanded shape.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([1, 2])
>>> x.shape
(2,)
The following is equivalent to ``x[xp.newaxis, :]`` or ``x[xp.newaxis]``:
>>> y = xpx.expand_dims(x, axis=0, xp=xp)
>>> y
Array([[1, 2]], dtype=array_api_strict.int64)
>>> y.shape
(1, 2)
The following is equivalent to ``x[:, xp.newaxis]``:
>>> y = xpx.expand_dims(x, axis=1, xp=xp)
>>> y
Array([[1],
[2]], dtype=array_api_strict.int64)
>>> y.shape
(2, 1)
``axis`` may also be a tuple:
>>> y = xpx.expand_dims(x, axis=(0, 1), xp=xp)
>>> y
Array([[[1, 2]]], dtype=array_api_strict.int64)
>>> y = xpx.expand_dims(x, axis=(2, 0), xp=xp)
>>> y
Array([[[1],
[2]]], dtype=array_api_strict.int64)
|
expand_dims
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def kron(
a: Array | complex,
b: Array | complex,
/,
*,
xp: ModuleType | None = None,
) -> Array:
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Equivalent to ``numpy.kron`` for NumPy arrays.
Parameters
----------
a, b : Array | int | float | complex
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `a` and `b`. Default: infer.
Returns
-------
array
The Kronecker product of `a` and `b`.
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> xpx.kron(xp.asarray([1, 10, 100]), xp.asarray([5, 6, 7]), xp=xp)
Array([ 5, 6, 7, 50, 60, 70, 500,
600, 700], dtype=array_api_strict.int64)
>>> xpx.kron(xp.asarray([5, 6, 7]), xp.asarray([1, 10, 100]), xp=xp)
Array([ 5, 50, 500, 6, 60, 600, 7,
70, 700], dtype=array_api_strict.int64)
>>> xpx.kron(xp.eye(2), xp.ones((2, 2)), xp=xp)
Array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]], dtype=array_api_strict.float64)
>>> a = xp.reshape(xp.arange(100), (2, 5, 2, 5))
>>> b = xp.reshape(xp.arange(24), (2, 3, 4))
>>> c = xpx.kron(a, b, xp=xp)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1, 3, 0, 2)
>>> J = (0, 2, 1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(xp.asarray(I) * xp.asarray(S1) + xp.asarray(J1))
>>> c[K] == a[I]*b[J]
Array(True, dtype=array_api_strict.bool)
"""
if xp is None:
xp = array_namespace(a, b)
a, b = asarrays(a, b, xp=xp)
singletons = (1,) * (b.ndim - a.ndim)
a = cast(Array, xp.broadcast_to(a, singletons + a.shape))
nd_b, nd_a = b.ndim, a.ndim
nd_max = max(nd_b, nd_a)
if nd_a == 0 or nd_b == 0:
return xp.multiply(a, b)
a_shape = eager_shape(a)
b_shape = eager_shape(b)
# Equalise the shapes by prepending smaller one with 1s
a_shape = (1,) * max(0, nd_b - nd_a) + a_shape
b_shape = (1,) * max(0, nd_a - nd_b) + b_shape
# Insert empty dimensions
a_arr = expand_dims(a, axis=tuple(range(nd_b - nd_a)), xp=xp)
b_arr = expand_dims(b, axis=tuple(range(nd_a - nd_b)), xp=xp)
# Compute the product
a_arr = expand_dims(a_arr, axis=tuple(range(1, nd_max * 2, 2)), xp=xp)
b_arr = expand_dims(b_arr, axis=tuple(range(0, nd_max * 2, 2)), xp=xp)
result = xp.multiply(a_arr, b_arr)
# Reshape back and return
res_shape = tuple(a_s * b_s for a_s, b_s in zip(a_shape, b_shape, strict=True))
return xp.reshape(result, res_shape)
|
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Equivalent to ``numpy.kron`` for NumPy arrays.
Parameters
----------
a, b : Array | int | float | complex
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `a` and `b`. Default: infer.
Returns
-------
array
The Kronecker product of `a` and `b`.
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> xpx.kron(xp.asarray([1, 10, 100]), xp.asarray([5, 6, 7]), xp=xp)
Array([ 5, 6, 7, 50, 60, 70, 500,
600, 700], dtype=array_api_strict.int64)
>>> xpx.kron(xp.asarray([5, 6, 7]), xp.asarray([1, 10, 100]), xp=xp)
Array([ 5, 50, 500, 6, 60, 600, 7,
70, 700], dtype=array_api_strict.int64)
>>> xpx.kron(xp.eye(2), xp.ones((2, 2)), xp=xp)
Array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]], dtype=array_api_strict.float64)
>>> a = xp.reshape(xp.arange(100), (2, 5, 2, 5))
>>> b = xp.reshape(xp.arange(24), (2, 3, 4))
>>> c = xpx.kron(a, b, xp=xp)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1, 3, 0, 2)
>>> J = (0, 2, 1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(xp.asarray(I) * xp.asarray(S1) + xp.asarray(J1))
>>> c[K] == a[I]*b[J]
Array(True, dtype=array_api_strict.bool)
|
kron
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def nunique(x: Array, /, *, xp: ModuleType | None = None) -> Array:
"""
Count the number of unique elements in an array.
Compatible with JAX and Dask, whose laziness would be otherwise
problematic.
Parameters
----------
x : Array
Input array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array: 0-dimensional integer array
The number of unique elements in `x`. It can be lazy.
"""
if xp is None:
xp = array_namespace(x)
if is_jax_array(x):
# size= is JAX-specific
# https://github.com/data-apis/array-api/issues/883
_, counts = xp.unique_counts(x, size=_compat.size(x))
return xp.astype(counts, xp.bool).sum()
_, counts = xp.unique_counts(x)
n = _compat.size(counts)
# FIXME https://github.com/data-apis/array-api-compat/pull/231
if n is None: # e.g. Dask, ndonnx
return xp.astype(counts, xp.bool).sum()
return xp.asarray(n, device=_compat.device(x))
|
Count the number of unique elements in an array.
Compatible with JAX and Dask, whose laziness would be otherwise
problematic.
Parameters
----------
x : Array
Input array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array: 0-dimensional integer array
The number of unique elements in `x`. It can be lazy.
|
nunique
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def setdiff1d(
x1: Array | complex,
x2: Array | complex,
/,
*,
assume_unique: bool = False,
xp: ModuleType | None = None,
) -> Array:
"""
Find the set difference of two arrays.
Return the unique values in `x1` that are not in `x2`.
Parameters
----------
x1 : array | int | float | complex | bool
Input array.
x2 : array
Input comparison array.
assume_unique : bool
If ``True``, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is ``False``.
xp : array_namespace, optional
The standard-compatible namespace for `x1` and `x2`. Default: infer.
Returns
-------
array
1D array of values in `x1` that are not in `x2`. The result
is sorted when `assume_unique` is ``False``, but otherwise only sorted
if the input is sorted.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x1 = xp.asarray([1, 2, 3, 2, 4, 1])
>>> x2 = xp.asarray([3, 4, 5, 6])
>>> xpx.setdiff1d(x1, x2, xp=xp)
Array([1, 2], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(x1, x2)
# https://github.com/microsoft/pyright/issues/10103
x1_, x2_ = asarrays(x1, x2, xp=xp)
if assume_unique:
x1_ = xp.reshape(x1_, (-1,))
x2_ = xp.reshape(x2_, (-1,))
else:
x1_ = xp.unique_values(x1_)
x2_ = xp.unique_values(x2_)
return x1_[_helpers.in1d(x1_, x2_, assume_unique=True, invert=True, xp=xp)]
|
Find the set difference of two arrays.
Return the unique values in `x1` that are not in `x2`.
Parameters
----------
x1 : array | int | float | complex | bool
Input array.
x2 : array
Input comparison array.
assume_unique : bool
If ``True``, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is ``False``.
xp : array_namespace, optional
The standard-compatible namespace for `x1` and `x2`. Default: infer.
Returns
-------
array
1D array of values in `x1` that are not in `x2`. The result
is sorted when `assume_unique` is ``False``, but otherwise only sorted
if the input is sorted.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x1 = xp.asarray([1, 2, 3, 2, 4, 1])
>>> x2 = xp.asarray([3, 4, 5, 6])
>>> xpx.setdiff1d(x1, x2, xp=xp)
Array([1, 2], dtype=array_api_strict.int64)
|
setdiff1d
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_funcs.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_funcs.py
|
BSD-3-Clause
|
def _is_jax_jit_enabled(xp: ModuleType) -> bool: # numpydoc ignore=PR01,RT01
"""Return True if this function is being called inside ``jax.jit``."""
import jax # pylint: disable=import-outside-toplevel
x = xp.asarray(False)
try:
return bool(x)
except jax.errors.TracerBoolConversionError:
return True
|
Return True if this function is being called inside ``jax.jit``.
|
_is_jax_jit_enabled
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_lazy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_lazy.py
|
BSD-3-Clause
|
def _lazy_apply_wrapper( # type: ignore[explicit-any] # numpydoc ignore=PR01,RT01
func: Callable[..., Array | ArrayLike | Sequence[Array | ArrayLike]],
as_numpy: bool,
multi_output: bool,
xp: ModuleType,
) -> Callable[..., tuple[Array, ...]]:
"""
Helper of `lazy_apply`.
Given a function that accepts one or more arrays as positional arguments and returns
a single array-like or a sequence of array-likes, return a function that accepts the
same number of Array API arrays and always returns a tuple of Array API array.
Any keyword arguments are passed through verbatim to the wrapped function.
"""
# On Dask, @wraps causes the graph key to contain the wrapped function's name
@wraps(func)
def wrapper( # type: ignore[decorated-any,explicit-any]
*args: Array | complex | None, **kwargs: Any
) -> tuple[Array, ...]: # numpydoc ignore=GL08
args_list = []
device = None
for arg in args:
if arg is not None and not is_python_scalar(arg):
if device is None:
device = _compat.device(arg)
if as_numpy:
import numpy as np
arg = cast(Array, np.asarray(arg)) # type: ignore[bad-cast] # noqa: PLW2901
args_list.append(arg)
assert device is not None
out = func(*args_list, **kwargs)
if multi_output:
assert isinstance(out, Sequence)
return tuple(xp.asarray(o, device=device) for o in out)
return (xp.asarray(out, device=device),)
return wrapper
|
Helper of `lazy_apply`.
Given a function that accepts one or more arrays as positional arguments and returns
a single array-like or a sequence of array-likes, return a function that accepts the
same number of Array API arrays and always returns a tuple of Array API array.
Any keyword arguments are passed through verbatim to the wrapped function.
|
_lazy_apply_wrapper
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_lazy.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_lazy.py
|
BSD-3-Clause
|
def _check_ns_shape_dtype(
actual: Array, desired: Array
) -> ModuleType: # numpydoc ignore=RT03
"""
Assert that namespace, shape and dtype of the two arrays match.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
Returns
-------
Arrays namespace.
"""
actual_xp = array_namespace(actual) # Raises on scalars and lists
desired_xp = array_namespace(desired)
msg = f"namespaces do not match: {actual_xp} != f{desired_xp}"
assert actual_xp == desired_xp, msg
actual_shape = actual.shape
desired_shape = desired.shape
if is_dask_namespace(desired_xp):
# Dask uses nan instead of None for unknown shapes
if any(math.isnan(i) for i in cast(tuple[float, ...], actual_shape)):
actual_shape = actual.compute().shape # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
if any(math.isnan(i) for i in cast(tuple[float, ...], desired_shape)):
desired_shape = desired.compute().shape # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
msg = f"shapes do not match: {actual_shape} != f{desired_shape}"
assert actual_shape == desired_shape, msg
msg = f"dtypes do not match: {actual.dtype} != {desired.dtype}"
assert actual.dtype == desired.dtype, msg
return desired_xp
|
Assert that namespace, shape and dtype of the two arrays match.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
Returns
-------
Arrays namespace.
|
_check_ns_shape_dtype
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_testing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_testing.py
|
BSD-3-Clause
|
def xp_assert_equal(actual: Array, desired: Array, err_msg: str = "") -> None:
"""
Array-API compatible version of `np.testing.assert_array_equal`.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
err_msg : str, optional
Error message to display on failure.
See Also
--------
xp_assert_close : Similar function for inexact equality checks.
numpy.testing.assert_array_equal : Similar function for NumPy arrays.
"""
xp = _check_ns_shape_dtype(actual, desired)
if is_cupy_namespace(xp):
xp.testing.assert_array_equal(actual, desired, err_msg=err_msg)
elif is_torch_namespace(xp):
# PyTorch recommends using `rtol=0, atol=0` like this
# to test for exact equality
xp.testing.assert_close(
actual,
desired,
rtol=0,
atol=0,
equal_nan=True,
check_dtype=False,
msg=err_msg or None,
)
else:
import numpy as np # pylint: disable=import-outside-toplevel
if is_pydata_sparse_namespace(xp):
actual = actual.todense() # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
desired = desired.todense() # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
actual_np = None
desired_np = None
if is_array_api_strict_namespace(xp):
# __array__ doesn't work on array-api-strict device arrays
# We need to convert to the CPU device first
actual_np = np.asarray(xp.asarray(actual, device=xp.Device("CPU_DEVICE")))
desired_np = np.asarray(xp.asarray(desired, device=xp.Device("CPU_DEVICE")))
# JAX/Dask arrays work with `np.testing`
actual_np = actual if actual_np is None else actual_np
desired_np = desired if desired_np is None else desired_np
np.testing.assert_array_equal(actual_np, desired_np, err_msg=err_msg) # pyright: ignore[reportUnknownArgumentType]
|
Array-API compatible version of `np.testing.assert_array_equal`.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
err_msg : str, optional
Error message to display on failure.
See Also
--------
xp_assert_close : Similar function for inexact equality checks.
numpy.testing.assert_array_equal : Similar function for NumPy arrays.
|
xp_assert_equal
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_testing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_testing.py
|
BSD-3-Clause
|
def xp_assert_close(
actual: Array,
desired: Array,
*,
rtol: float | None = None,
atol: float = 0,
err_msg: str = "",
) -> None:
"""
Array-API compatible version of `np.testing.assert_allclose`.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
rtol : float, optional
Relative tolerance. Default: dtype-dependent.
atol : float, optional
Absolute tolerance. Default: 0.
err_msg : str, optional
Error message to display on failure.
See Also
--------
xp_assert_equal : Similar function for exact equality checks.
isclose : Public function for checking closeness.
numpy.testing.assert_allclose : Similar function for NumPy arrays.
Notes
-----
The default `atol` and `rtol` differ from `xp.all(xpx.isclose(a, b))`.
"""
xp = _check_ns_shape_dtype(actual, desired)
floating = xp.isdtype(actual.dtype, ("real floating", "complex floating"))
if rtol is None and floating:
# multiplier of 4 is used as for `np.float64` this puts the default `rtol`
# roughly half way between sqrt(eps) and the default for
# `numpy.testing.assert_allclose`, 1e-7
rtol = xp.finfo(actual.dtype).eps ** 0.5 * 4
elif rtol is None:
rtol = 1e-7
if is_cupy_namespace(xp):
xp.testing.assert_allclose(
actual, desired, rtol=rtol, atol=atol, err_msg=err_msg
)
elif is_torch_namespace(xp):
xp.testing.assert_close(
actual, desired, rtol=rtol, atol=atol, equal_nan=True, msg=err_msg or None
)
else:
import numpy as np # pylint: disable=import-outside-toplevel
if is_pydata_sparse_namespace(xp):
actual = actual.todense() # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
desired = desired.todense() # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
actual_np = None
desired_np = None
if is_array_api_strict_namespace(xp):
# __array__ doesn't work on array-api-strict device arrays
# We need to convert to the CPU device first
actual_np = np.asarray(xp.asarray(actual, device=xp.Device("CPU_DEVICE")))
desired_np = np.asarray(xp.asarray(desired, device=xp.Device("CPU_DEVICE")))
# JAX/Dask arrays work with `np.testing`
actual_np = actual if actual_np is None else actual_np
desired_np = desired if desired_np is None else desired_np
assert isinstance(rtol, float)
np.testing.assert_allclose( # pyright: ignore[reportCallIssue]
actual_np, # type: ignore[arg-type] # pyright: ignore[reportArgumentType]
desired_np, # type: ignore[arg-type] # pyright: ignore[reportArgumentType]
rtol=rtol,
atol=atol,
err_msg=err_msg,
)
|
Array-API compatible version of `np.testing.assert_allclose`.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
rtol : float, optional
Relative tolerance. Default: dtype-dependent.
atol : float, optional
Absolute tolerance. Default: 0.
err_msg : str, optional
Error message to display on failure.
See Also
--------
xp_assert_equal : Similar function for exact equality checks.
isclose : Public function for checking closeness.
numpy.testing.assert_allclose : Similar function for NumPy arrays.
Notes
-----
The default `atol` and `rtol` differ from `xp.all(xpx.isclose(a, b))`.
|
xp_assert_close
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_testing.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_testing.py
|
BSD-3-Clause
|
def in1d(
x1: Array,
x2: Array,
/,
*,
assume_unique: bool = False,
invert: bool = False,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""
Check whether each element of an array is also present in a second array.
Returns a boolean array the same length as `x1` that is True
where an element of `x1` is in `x2` and False otherwise.
This function has been adapted using the original implementation
present in numpy:
https://github.com/numpy/numpy/blob/v1.26.0/numpy/lib/arraysetops.py#L524-L758
"""
if xp is None:
xp = array_namespace(x1, x2)
x1_shape = eager_shape(x1)
x2_shape = eager_shape(x2)
# This code is run to make the code significantly faster
if x2_shape[0] < 10 * x1_shape[0] ** 0.145 and isinstance(x2, Iterable):
if invert:
mask = xp.ones(x1_shape[0], dtype=xp.bool, device=_compat.device(x1))
for a in x2:
mask &= x1 != a
else:
mask = xp.zeros(x1_shape[0], dtype=xp.bool, device=_compat.device(x1))
for a in x2:
mask |= x1 == a
return mask
rev_idx = xp.empty(0) # placeholder
if not assume_unique:
x1, rev_idx = xp.unique_inverse(x1)
x2 = xp.unique_values(x2)
ar = xp.concat((x1, x2))
device_ = _compat.device(ar)
# We need this to be a stable sort.
order = xp.argsort(ar, stable=True)
reverse_order = xp.argsort(order, stable=True)
sar = xp.take(ar, order, axis=0)
ar_size = _compat.size(sar)
assert ar_size is not None, "xp.unique*() on lazy backends raises"
if ar_size >= 1:
bool_ar = sar[1:] != sar[:-1] if invert else sar[1:] == sar[:-1]
else:
bool_ar = xp.asarray([False]) if invert else xp.asarray([True])
flag = xp.concat((bool_ar, xp.asarray([invert], device=device_)))
ret = xp.take(flag, reverse_order, axis=0)
if assume_unique:
return ret[: x1.shape[0]]
return xp.take(ret, rev_idx, axis=0)
|
Check whether each element of an array is also present in a second array.
Returns a boolean array the same length as `x1` that is True
where an element of `x1` is in `x2` and False otherwise.
This function has been adapted using the original implementation
present in numpy:
https://github.com/numpy/numpy/blob/v1.26.0/numpy/lib/arraysetops.py#L524-L758
|
in1d
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def mean(
x: Array,
/,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""
Complex mean, https://github.com/data-apis/array-api/issues/846.
"""
if xp is None:
xp = array_namespace(x)
if xp.isdtype(x.dtype, "complex floating"):
x_real = xp.real(x)
x_imag = xp.imag(x)
mean_real = xp.mean(x_real, axis=axis, keepdims=keepdims)
mean_imag = xp.mean(x_imag, axis=axis, keepdims=keepdims)
return mean_real + (mean_imag * xp.asarray(1j))
return xp.mean(x, axis=axis, keepdims=keepdims)
|
Complex mean, https://github.com/data-apis/array-api/issues/846.
|
mean
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def is_python_scalar(x: object) -> TypeIs[complex]: # numpydoc ignore=PR01,RT01
"""Return True if `x` is a Python scalar, False otherwise."""
# isinstance(x, float) returns True for np.float64
# isinstance(x, complex) returns True for np.complex128
# bool is a subclass of int
return isinstance(x, int | float | complex) and not is_numpy_array(x)
|
Return True if `x` is a Python scalar, False otherwise.
|
is_python_scalar
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def asarrays(
a: Array | complex,
b: Array | complex,
xp: ModuleType,
) -> tuple[Array, Array]:
"""
Ensure both `a` and `b` are arrays.
If `b` is a python scalar, it is converted to the same dtype as `a`, and vice versa.
Behavior is not specified when mixing a Python ``float`` and an array with an
integer data type; this may give ``float32``, ``float64``, or raise an exception.
Behavior is implementation-specific.
Similarly, behavior is not specified when mixing a Python ``complex`` and an array
with a real-valued data type; this may give ``complex64``, ``complex128``, or raise
an exception. Behavior is implementation-specific.
Parameters
----------
a, b : Array | int | float | complex | bool
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
Array, Array
The input arrays, possibly converted to arrays if they were scalars.
See Also
--------
mixing-arrays-with-python-scalars : Array API specification for the behavior.
"""
a_scalar = is_python_scalar(a)
b_scalar = is_python_scalar(b)
if not a_scalar and not b_scalar:
# This includes misc. malformed input e.g. str
return a, b # type: ignore[return-value]
swap = False
if a_scalar:
swap = True
b, a = a, b
if is_array_api_obj(a):
# a is an Array API object
# b is a int | float | complex | bool
xa = a
# https://data-apis.org/array-api/draft/API_specification/type_promotion.html#mixing-arrays-with-python-scalars
same_dtype = {
bool: "bool",
int: ("integral", "real floating", "complex floating"),
float: ("real floating", "complex floating"),
complex: "complex floating",
}
kind = same_dtype[type(cast(complex, b))] # type: ignore[index]
if xp.isdtype(a.dtype, kind):
xb = xp.asarray(b, dtype=a.dtype)
else:
# Undefined behaviour. Let the function deal with it, if it can.
xb = xp.asarray(b)
else:
# Neither a nor b are Array API objects.
# Note: we can only reach this point when one explicitly passes
# xp=xp to the calling function; otherwise we fail earlier on
# array_namespace(a, b).
xa, xb = xp.asarray(a), xp.asarray(b)
return (xb, xa) if swap else (xa, xb)
|
Ensure both `a` and `b` are arrays.
If `b` is a python scalar, it is converted to the same dtype as `a`, and vice versa.
Behavior is not specified when mixing a Python ``float`` and an array with an
integer data type; this may give ``float32``, ``float64``, or raise an exception.
Behavior is implementation-specific.
Similarly, behavior is not specified when mixing a Python ``complex`` and an array
with a real-valued data type; this may give ``complex64``, ``complex128``, or raise
an exception. Behavior is implementation-specific.
Parameters
----------
a, b : Array | int | float | complex | bool
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
Array, Array
The input arrays, possibly converted to arrays if they were scalars.
See Also
--------
mixing-arrays-with-python-scalars : Array API specification for the behavior.
|
asarrays
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def ndindex(*x: int) -> Generator[tuple[int, ...]]:
"""
Generate all N-dimensional indices for a given array shape.
Given the shape of an array, an ndindex instance iterates over the N-dimensional
index of the array. At each iteration a tuple of indices is returned, the last
dimension is iterated over first.
This has an identical API to numpy.ndindex.
Parameters
----------
*x : int
The shape of the array.
"""
if not x:
yield ()
return
for i in ndindex(*x[:-1]):
for j in range(x[-1]):
yield *i, j
|
Generate all N-dimensional indices for a given array shape.
Given the shape of an array, an ndindex instance iterates over the N-dimensional
index of the array. At each iteration a tuple of indices is returned, the last
dimension is iterated over first.
This has an identical API to numpy.ndindex.
Parameters
----------
*x : int
The shape of the array.
|
ndindex
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def eager_shape(x: Array, /) -> tuple[int, ...]:
"""
Return shape of an array. Raise if shape is not fully defined.
Parameters
----------
x : Array
Input array.
Returns
-------
tuple[int, ...]
Shape of the array.
"""
shape = x.shape
# Dask arrays uses non-standard NaN instead of None
if any(s is None or math.isnan(s) for s in shape):
msg = "Unsupported lazy shape"
raise TypeError(msg)
return cast(tuple[int, ...], shape)
|
Return shape of an array. Raise if shape is not fully defined.
Parameters
----------
x : Array
Input array.
Returns
-------
tuple[int, ...]
Shape of the array.
|
eager_shape
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def meta_namespace(
*arrays: Array | complex | None, xp: ModuleType | None = None
) -> ModuleType:
"""
Get the namespace of Dask chunks.
On all other backends, just return the namespace of the arrays.
Parameters
----------
*arrays : Array | int | float | complex | bool | None
Input arrays.
xp : array_namespace, optional
The standard-compatible namespace for the input arrays. Default: infer.
Returns
-------
array_namespace
If xp is Dask, the namespace of the Dask chunks;
otherwise, the namespace of the arrays.
"""
xp = array_namespace(*arrays) if xp is None else xp
if not is_dask_namespace(xp):
return xp
# Quietly skip scalars and None's
metas = [cast(Array | None, getattr(a, "_meta", None)) for a in arrays]
return array_namespace(*metas)
|
Get the namespace of Dask chunks.
On all other backends, just return the namespace of the arrays.
Parameters
----------
*arrays : Array | int | float | complex | bool | None
Input arrays.
xp : array_namespace, optional
The standard-compatible namespace for the input arrays. Default: infer.
Returns
-------
array_namespace
If xp is Dask, the namespace of the Dask chunks;
otherwise, the namespace of the arrays.
|
meta_namespace
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py
|
BSD-3-Clause
|
def parse(version: str) -> Union["LegacyVersion", "Version"]:
"""Parse the given version from a string to an appropriate class.
Parameters
----------
version : str
Version in a string format, eg. "0.9.1" or "1.2.dev0".
Returns
-------
version : :class:`Version` object or a :class:`LegacyVersion` object
Returned class depends on the given version: if is a valid
PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
|
Parse the given version from a string to an appropriate class.
Parameters
----------
version : str
Version in a string format, eg. "0.9.1" or "1.2.dev0".
Returns
-------
version : :class:`Version` object or a :class:`LegacyVersion` object
Returned class depends on the given version: if is a valid
PEP 440 version or a legacy version.
|
parse
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_packaging/version.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_packaging/version.py
|
BSD-3-Clause
|
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
|
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
|
_parse_local_version
|
python
|
scikit-learn/scikit-learn
|
sklearn/externals/_packaging/version.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/externals/_packaging/version.py
|
BSD-3-Clause
|
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
n_z : integer, default=1
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
|
Returns a list of edges for a 3D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
n_z : integer, default=1
The size of the grid in the z direction, defaults to 1
|
_make_edges_3d
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
|
Apply a mask to edges (weighted or not)
|
_mask_edges_weights
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections.
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : array-like of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or \
(height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=None
The data of the returned sparse matrix. By default it is the
dtype of img.
Returns
-------
graph : ndarray or a sparse matrix class
The computed adjacency matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_extraction.image import img_to_graph
>>> img = np.array([[0, 0], [0, 1]])
>>> img_to_graph(img, return_as=np.ndarray)
array([[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 1, 1, 1]])
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
|
Graph of the pixel-to-pixel gradient connections.
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : array-like of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or (height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=None
The data of the returned sparse matrix. By default it is the
dtype of img.
Returns
-------
graph : ndarray or a sparse matrix class
The computed adjacency matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_extraction.image import img_to_graph
>>> img = np.array([[0, 0], [0, 1]])
>>> img_to_graph(img, return_as=np.ndarray)
array([[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 1, 1, 1]])
|
img_to_graph
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def grid_to_graph(
n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
):
"""Graph of the pixel-to-pixel connections.
Edges exist if 2 voxels are connected.
Read more in the :ref:`User Guide <connectivity_graph_image>`.
Parameters
----------
n_x : int
Dimension in x axis.
n_y : int
Dimension in y axis.
n_z : int, default=1
Dimension in z axis.
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int.
Returns
-------
graph : np.ndarray or a sparse matrix class
The computed adjacency matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_extraction.image import grid_to_graph
>>> shape_img = (4, 4, 1)
>>> mask = np.zeros(shape=shape_img, dtype=bool)
>>> mask[[1, 2], [1, 2], :] = True
>>> graph = grid_to_graph(*shape_img, mask=mask)
>>> print(graph)
<COOrdinate sparse matrix of dtype 'int64'
with 2 stored elements and shape (2, 2)>
Coords Values
(0, 0) 1
(1, 1) 1
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
|
Graph of the pixel-to-pixel connections.
Edges exist if 2 voxels are connected.
Read more in the :ref:`User Guide <connectivity_graph_image>`.
Parameters
----------
n_x : int
Dimension in x axis.
n_y : int
Dimension in y axis.
n_z : int, default=1
Dimension in z axis.
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int.
Returns
-------
graph : np.ndarray or a sparse matrix class
The computed adjacency matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_extraction.image import grid_to_graph
>>> shape_img = (4, 4, 1)
>>> mask = np.zeros(shape=shape_img, dtype=bool)
>>> mask[[1, 2], [1, 2], :] = True
>>> graph = grid_to_graph(*shape_img, mask=mask)
>>> print(graph)
<COOrdinate sparse matrix of dtype 'int64'
with 2 stored elements and shape (2, 2)>
Coords Values
(0, 0) 1
(1, 1) 1
|
grid_to_graph
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None, all possible patches are extracted.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, (Integral)) and max_patches < all_patches:
return max_patches
elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
return all_patches
elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
|
Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None, all possible patches are extracted.
|
_compute_n_patches
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = (
(np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
|
Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
|
_extract_patches
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or \
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None it corresponds to the total number
of patches that can be extracted.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError(
"Height of the patch should be less than the height of the image."
)
if p_w > i_w:
raise ValueError(
"Width of the patch should be less than the width of the image."
)
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(
image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
|
Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or (image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None it corresponds to the total number
of patches that can be extracted.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
|
extract_patches_2d
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : ndarray of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of int (image_height, image_width) or \
(image_height, image_width, n_channels)
The size of the image that will be reconstructed.
Returns
-------
image : ndarray of shape image_size
The reconstructed image.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> image_patches = image.extract_patches_2d(image=one_image, patch_size=(10, 10))
>>> print('Patches shape: {}'.format(image_patches.shape))
Patches shape: (263758, 10, 10, 3)
>>> image_reconstructed = image.reconstruct_from_patches_2d(
... patches=image_patches,
... image_size=one_image.shape
... )
>>> print(f"Reconstructed shape: {image_reconstructed.shape}")
Reconstructed shape: (427, 640, 3)
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i : i + p_h, j : j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
return img
|
Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : ndarray of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of int (image_height, image_width) or (image_height, image_width, n_channels)
The size of the image that will be reconstructed.
Returns
-------
image : ndarray of shape image_size
The reconstructed image.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> image_patches = image.extract_patches_2d(image=one_image, patch_size=(10, 10))
>>> print('Patches shape: {}'.format(image_patches.shape))
Patches shape: (263758, 10, 10, 3)
>>> image_reconstructed = image.reconstruct_from_patches_2d(
... patches=image_patches,
... image_size=one_image.shape
... )
>>> print(f"Reconstructed shape: {image_reconstructed.shape}")
Reconstructed shape: (427, 640, 3)
|
reconstruct_from_patches_2d
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
X = validate_data(
self,
X=X,
ensure_2d=False,
allow_nd=True,
ensure_min_samples=1,
ensure_min_features=1,
reset=False,
)
random_state = check_random_state(self.random_state)
n_imgs, img_height, img_width = X.shape[:3]
if self.patch_size is None:
patch_size = img_height // 10, img_width // 10
else:
if len(self.patch_size) != 2:
raise ValueError(
"patch_size must be a tuple of two integers. Got"
f" {self.patch_size} instead."
)
patch_size = self.patch_size
n_imgs, img_height, img_width = X.shape[:3]
X = np.reshape(X, (n_imgs, img_height, img_width, -1))
n_channels = X.shape[-1]
# compute the dimensions of the patches array
patch_height, patch_width = patch_size
n_patches = _compute_n_patches(
img_height, img_width, patch_height, patch_width, self.max_patches
)
patches_shape = (n_imgs * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
image,
patch_size,
max_patches=self.max_patches,
random_state=random_state,
)
return patches
|
Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or (n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or (n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/image.py
|
BSD-3-Clause
|
def _preprocess(doc, accent_function=None, lower=False):
"""Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all of the text
Returns
-------
doc: str
preprocessed string
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
|
Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all of the text
Returns
-------
doc: str
preprocessed string
|
_preprocess
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
|
Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
|
_analyze
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart.
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
|
Transform accentuated unicode symbols into their simple counterpart.
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
|
strip_accents_unicode
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing.
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
|
Transform accentuated unicode symbols into ascii or nothing.
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
|
strip_accents_ascii
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def decode(self, doc):
"""Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : bytes or str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
|
Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : bytes or str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
|
decode
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
|
Turn tokens into a sequence of n-grams after stop words filtering
|
_word_ngrams
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
|
Tokenize text_document into a sequence of character n-grams
|
_char_ngrams
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
|
Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space.
|
_char_wb_ngrams
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
|
Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
|
build_preprocessor
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError(
"More than 1 capturing group in token pattern. Only a single "
"group should be captured."
)
return token_pattern.findall
|
Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
|
build_tokenizer
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words." % sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
|
Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
|
_check_stop_words_consistency
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
|
Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
|
build_analyzer
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, "vocabulary_"):
self._validate_vocabulary()
if not self.fixed_vocabulary_:
raise NotFittedError("Vocabulary not fitted or provided")
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
|
Check if vocabulary is empty or missing (not fitted)
|
_check_vocabulary
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
# triggers a parameter validation
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._warn_for_unused_params()
self._validate_ngram_range()
self._get_hasher().fit(X, y=y)
return self
|
Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
|
Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.issparse(X) and X.format == "csr":
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
|
Count the number of non-zero values for each feature in sparse X.
|
_document_frequency
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(vocabulary.items())
map_index = np.empty(len(sorted_features), dtype=X.indices.dtype)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode="clip")
return X
|
Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
|
_sort_features
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = []
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError(
"empty vocabulary; perhaps the documents only contain stop words"
)
if indptr[-1] > np.iinfo(np.int32).max: # = 2**31 - 1
if _IS_32BIT:
raise ValueError(
(
"sparse CSR array has {} non-zero "
"elements and requires 64 bit indexing, "
"which is unsupported with 32 bit Python."
).format(indptr[-1])
)
indices_dtype = np.int64
else:
indices_dtype = np.int32
j_indices = np.asarray(j_indices, dtype=indices_dtype)
indptr = np.asarray(indptr, dtype=indices_dtype)
values = np.frombuffer(values, dtype=np.intc)
X = sp.csr_matrix(
(values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype,
)
X.sort_indices()
return vocabulary, X
|
Create sparse feature matrix, and vocabulary where fixed_vocab=False
|
_count_vocab
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : array of shape (n_samples, n_features)
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
self._warn_for_unused_params()
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
if self.fixed_vocabulary_ and self.lowercase:
for term in self.vocabulary:
if any(map(str.isupper, term)):
warnings.warn(
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
break
vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
n_doc = X.shape[0]
max_doc_count = max_df if isinstance(max_df, Integral) else max_df * n_doc
min_doc_count = min_df if isinstance(min_df, Integral) else min_df * n_doc
if max_doc_count < min_doc_count:
raise ValueError("max_df corresponds to < documents than min_df")
if max_features is not None:
X = self._sort_features(X, vocabulary)
X = self._limit_features(
X, vocabulary, max_doc_count, min_doc_count, max_features
)
if max_features is None:
X = self._sort_features(X, vocabulary)
self.vocabulary_ = vocabulary
return X
|
Learn the vocabulary dictionary and return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : array of shape (n_samples, n_features)
Document-term matrix.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(raw_documents, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
|
Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_original : list of arrays of shape (n_samples,)
List of arrays of terms.
"""
self._check_vocabulary()
# We need CSR format for fast row manipulations.
X = check_array(X, accept_sparse="csr")
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
if sp.issparse(X):
return [
inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)
]
else:
return [
inverse_vocabulary[np.flatnonzero(X[i, :])].ravel()
for i in range(n_samples)
]
|
Return terms per document with nonzero entries in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document-term matrix.
Returns
-------
X_original : list of arrays of shape (n_samples,)
List of arrays of terms.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
self._check_vocabulary()
return np.asarray(
[t for t, i in sorted(self.vocabulary_.items(), key=itemgetter(1))],
dtype=object,
)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Learn the idf vector (global term weights).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A matrix of term/token counts.
y : None
This parameter is not needed to compute tf-idf.
Returns
-------
self : object
Fitted transformer.
"""
# large sparse data is not supported for 32bit platforms because
# _document_frequency uses np.bincount which works on arrays of
# dtype NPY_INTP which is int32 for 32bit platforms. See #20923
X = validate_data(
self, X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT
)
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = X.dtype if X.dtype in (np.float64, np.float32) else np.float64
if self.use_idf:
n_samples, _ = X.shape
df = _document_frequency(X)
df = df.astype(dtype, copy=False)
# perform idf smoothing if required
df += float(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
# Force the dtype of `idf_` to be the same as `df`. In NumPy < 2, the dtype
# was depending on the value of `n_samples`.
self.idf_ = np.full_like(df, fill_value=n_samples, dtype=dtype)
self.idf_ /= df
# `np.log` preserves the dtype of `df` and thus `dtype`.
np.log(self.idf_, out=self.idf_)
self.idf_ += 1.0
return self
|
Learn the idf vector (global term weights).
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A matrix of term/token counts.
y : None
This parameter is not needed to compute tf-idf.
Returns
-------
self : object
Fitted transformer.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations. `copy=False` will only be effective with CSR sparse matrix.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
copy=copy,
reset=False,
)
if not sp.issparse(X):
X = sp.csr_matrix(X, dtype=X.dtype)
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1.0
if hasattr(self, "idf_"):
# the columns of X (CSR matrix) can be accessed with `X.indices `and
# multiplied with the corresponding `idf` value
X.data *= self.idf_[X.indices]
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
|
Transform a count matrix to a tf or tf-idf representation.
Parameters
----------
X : sparse matrix of (n_samples, n_features)
A matrix of term/token counts.
copy : bool, default=True
Whether to copy X and operate on the copy or perform in-place
operations. `copy=False` will only be effective with CSR sparse matrix.
Returns
-------
vectors : sparse matrix of shape (n_samples, n_features)
Tf-idf-weighted document-term matrix.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def idf_(self):
"""Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
"""
if not hasattr(self, "_tfidf"):
raise NotFittedError(
f"{self.__class__.__name__} is not fitted yet. Call 'fit' with "
"appropriate arguments before using this attribute."
)
return self._tfidf.idf_
|
Inverse document frequency vector, only defined if `use_idf=True`.
Returns
-------
ndarray of shape (n_features,)
|
idf_
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
"""
self._check_params()
self._warn_for_unused_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
return self
|
Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is not needed to compute tfidf.
Returns
-------
self : object
Fitted vectorizer.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
self._check_params()
self._tfidf = TfidfTransformer(
norm=self.norm,
use_idf=self.use_idf,
smooth_idf=self.smooth_idf,
sublinear_tf=self.sublinear_tf,
)
X = super().fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
|
Learn vocabulary and idf, return document-term matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
y : None
This parameter is ignored.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
|
fit_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, msg="The TF-IDF vectorizer is not fitted")
X = super().transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
An iterable which generates either str, unicode or file objects.
Returns
-------
X : sparse matrix of (n_samples, n_features)
Tf-idf-weighted document-term matrix.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py
|
BSD-3-Clause
|
def _add_iterable_element(
self,
f,
v,
feature_names,
vocab,
*,
fitting=True,
transforming=False,
indices=None,
values=None,
):
"""Add feature names for iterable of strings"""
for vv in v:
if isinstance(vv, str):
feature_name = "%s%s%s" % (f, self.separator, vv)
vv = 1
else:
raise TypeError(
f"Unsupported type {type(vv)} in iterable "
"value. Only iterables of string are "
"supported."
)
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if transforming and feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(vv))
|
Add feature names for iterable of strings
|
_add_iterable_element
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_dict_vectorizer.py
|
BSD-3-Clause
|
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
self : object
DictVectorizer class instance.
"""
feature_names = []
vocab = {}
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
elif isinstance(v, Number) or (v is None):
feature_name = f
elif isinstance(v, Mapping):
raise TypeError(
f"Unsupported value type {type(v)} "
f"for {f}: {v}.\n"
"Mapping objects are not supported."
)
elif isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(f, v, feature_names, vocab)
if feature_name is not None:
if feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if self.sort:
feature_names.sort()
vocab = {f: i for i, f in enumerate(feature_names)}
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
|
Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
self : object
DictVectorizer class instance.
|
fit
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_dict_vectorizer.py
|
BSD-3-Clause
|
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
X_original : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
check_is_fitted(self, "feature_names_")
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=["csr", "csc"])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
|
Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
X_original : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
|
inverse_transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_dict_vectorizer.py
|
BSD-3-Clause
|
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings of shape (n_samples,)
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
check_is_fitted(self, ["feature_names_", "vocabulary_"])
return self._transform(X, fitting=False)
|
Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings of shape (n_samples,)
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_dict_vectorizer.py
|
BSD-3-Clause
|
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "feature_names_")
if any(not isinstance(name, str) for name in self.feature_names_):
feature_names = [str(name) for name in self.feature_names_]
else:
feature_names = self.feature_names_
return np.asarray(feature_names, dtype=object)
|
Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_dict_vectorizer.py
|
BSD-3-Clause
|
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self : object
DictVectorizer class instance.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names_out()
array(['bar', 'baz', 'foo'], ...)
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names_out()
array(['bar', 'foo'], ...)
"""
check_is_fitted(self, "feature_names_")
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [
f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
]
return self
|
Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self : object
DictVectorizer class instance.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names_out()
array(['bar', 'baz', 'foo'], ...)
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names_out()
array(['bar', 'foo'], ...)
|
restrict
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_dict_vectorizer.py
|
BSD-3-Clause
|
def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
first_raw_X = next(raw_X)
if isinstance(first_raw_X, str):
raise ValueError(
"Samples can not be a single string. The input must be an iterable"
" over iterables of strings."
)
raw_X_ = chain([first_raw_X], raw_X)
raw_X = (((f, 1) for f in x) for x in raw_X_)
indices, indptr, values = _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix(
(values, indices, indptr),
dtype=self.dtype,
shape=(n_samples, self.n_features),
)
X.sum_duplicates() # also sorts the indices
return X
|
Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Feature matrix, for use with estimators or further transformers.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/_hash.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/_hash.py
|
BSD-3-Clause
|
def test_dictvectorizer_dense_sparse_equivalence():
"""Check the equivalence between between sparse and dense DictVectorizer.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19978
"""
movie_entry_fit = [
{"category": ["thriller", "drama"], "year": 2003},
{"category": ["animation", "family"], "year": 2011},
{"year": 1974},
]
movie_entry_transform = [{"category": ["thriller"], "unseen_feature": "3"}]
dense_vectorizer = DictVectorizer(sparse=False)
sparse_vectorizer = DictVectorizer(sparse=True)
dense_vector_fit = dense_vectorizer.fit_transform(movie_entry_fit)
sparse_vector_fit = sparse_vectorizer.fit_transform(movie_entry_fit)
assert not sp.issparse(dense_vector_fit)
assert sp.issparse(sparse_vector_fit)
assert_allclose(dense_vector_fit, sparse_vector_fit.toarray())
dense_vector_transform = dense_vectorizer.transform(movie_entry_transform)
sparse_vector_transform = sparse_vectorizer.transform(movie_entry_transform)
assert not sp.issparse(dense_vector_transform)
assert sp.issparse(sparse_vector_transform)
assert_allclose(dense_vector_transform, sparse_vector_transform.toarray())
dense_inverse_transform = dense_vectorizer.inverse_transform(dense_vector_transform)
sparse_inverse_transform = sparse_vectorizer.inverse_transform(
sparse_vector_transform
)
expected_inverse = [{"category=thriller": 1.0}]
assert dense_inverse_transform == expected_inverse
assert sparse_inverse_transform == expected_inverse
|
Check the equivalence between between sparse and dense DictVectorizer.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19978
|
test_dictvectorizer_dense_sparse_equivalence
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
BSD-3-Clause
|
def test_dict_vectorizer_unsupported_value_type():
"""Check that we raise an error when the value associated to a feature
is not supported.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19489
"""
class A:
pass
vectorizer = DictVectorizer(sparse=True)
X = [{"foo": A()}]
err_msg = "Unsupported value Type"
with pytest.raises(TypeError, match=err_msg):
vectorizer.fit_transform(X)
|
Check that we raise an error when the value associated to a feature
is not supported.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19489
|
test_dict_vectorizer_unsupported_value_type
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
BSD-3-Clause
|
def test_dict_vectorizer_get_feature_names_out():
"""Check that integer feature names are converted to strings in
feature_names_out."""
X = [{1: 2, 3: 4}, {2: 4}]
dv = DictVectorizer(sparse=False).fit(X)
feature_names = dv.get_feature_names_out()
assert isinstance(feature_names, np.ndarray)
assert feature_names.dtype == object
assert_array_equal(feature_names, ["1", "2", "3"])
|
Check that integer feature names are converted to strings in
feature_names_out.
|
test_dict_vectorizer_get_feature_names_out
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
BSD-3-Clause
|
def test_dict_vectorizer_not_fitted_error(method, input):
"""Check that unfitted DictVectorizer instance raises NotFittedError.
This should be part of the common test but currently they test estimator accepting
text input.
"""
dv = DictVectorizer(sparse=False)
with pytest.raises(NotFittedError):
getattr(dv, method)(input)
|
Check that unfitted DictVectorizer instance raises NotFittedError.
This should be part of the common test but currently they test estimator accepting
text input.
|
test_dict_vectorizer_not_fitted_error
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
BSD-3-Clause
|
def test_feature_hasher_single_string(raw_X):
"""FeatureHasher raises error when a sample is a single string.
Non-regression test for gh-13199.
"""
msg = "Samples can not be a single string"
feature_hasher = FeatureHasher(n_features=10, input_type="string")
with pytest.raises(ValueError, match=msg):
feature_hasher.transform(raw_X)
|
FeatureHasher raises error when a sample is a single string.
Non-regression test for gh-13199.
|
test_feature_hasher_single_string
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_feature_hasher.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_feature_hasher.py
|
BSD-3-Clause
|
def test_patch_extractor_wrong_input(orange_face):
"""Check that an informative error is raised if the patch_size is not valid."""
faces = _make_images(orange_face)
err_msg = "patch_size must be a tuple of two integers"
extractor = PatchExtractor(patch_size=(8, 8, 8))
with pytest.raises(ValueError, match=err_msg):
extractor.transform(faces)
|
Check that an informative error is raised if the patch_size is not valid.
|
test_patch_extractor_wrong_input
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_image.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_image.py
|
BSD-3-Clause
|
def test_countvectorizer_custom_token_pattern():
"""Check `get_feature_names_out()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
vectorizer = CountVectorizer(token_pattern=token_pattern)
vectorizer.fit_transform(corpus)
expected = ["document", "one", "sample"]
feature_names_out = vectorizer.get_feature_names_out()
assert_array_equal(feature_names_out, expected)
|
Check `get_feature_names_out()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
|
test_countvectorizer_custom_token_pattern
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_text.py
|
BSD-3-Clause
|
def test_countvectorizer_custom_token_pattern_with_several_group():
"""Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
err_msg = "More than 1 capturing group in token pattern"
vectorizer = CountVectorizer(token_pattern=token_pattern)
with pytest.raises(ValueError, match=err_msg):
vectorizer.fit(corpus)
|
Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
|
test_countvectorizer_custom_token_pattern_with_several_group
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_text.py
|
BSD-3-Clause
|
def test_countvectorizer_sort_features_64bit_sparse_indices(csr_container):
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = csr_container((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
|
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
|
test_countvectorizer_sort_features_64bit_sparse_indices
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_text.py
|
BSD-3-Clause
|
def test_vectorizers_do_not_have_set_output(Estimator):
"""Check that vectorizers do not define set_output."""
est = Estimator()
assert not hasattr(est, "set_output")
|
Check that vectorizers do not define set_output.
|
test_vectorizers_do_not_have_set_output
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_text.py
|
BSD-3-Clause
|
def test_tfidf_transformer_copy(csr_container):
"""Check the behaviour of TfidfTransformer.transform with the copy parameter."""
X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
X_csr = csr_container(X)
# keep a copy of the original matrix for later comparison
X_csr_original = X_csr.copy()
transformer = TfidfTransformer().fit(X_csr)
X_transform = transformer.transform(X_csr, copy=True)
assert_allclose_dense_sparse(X_csr, X_csr_original)
assert X_transform is not X_csr
X_transform = transformer.transform(X_csr, copy=False)
assert X_transform is X_csr
with pytest.raises(AssertionError):
assert_allclose_dense_sparse(X_csr, X_csr_original)
|
Check the behaviour of TfidfTransformer.transform with the copy parameter.
|
test_tfidf_transformer_copy
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_text.py
|
BSD-3-Clause
|
def test_tfidf_vectorizer_perserve_dtype_idf(dtype):
"""Check that `idf_` has the same dtype as the input data.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/30016
"""
X = [str(uuid.uuid4()) for i in range(100_000)]
vectorizer = TfidfVectorizer(dtype=dtype).fit(X)
assert vectorizer.idf_.dtype == dtype
|
Check that `idf_` has the same dtype as the input data.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/30016
|
test_tfidf_vectorizer_perserve_dtype_idf
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_extraction/tests/test_text.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/tests/test_text.py
|
BSD-3-Clause
|
def get_support(self, indices=False):
"""
Get a mask, or integer index, of the features selected.
Parameters
----------
indices : bool, default=False
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.
"""
mask = self._get_support_mask()
return mask if not indices else np.nonzero(mask)[0]
|
Get a mask, or integer index, of the features selected.
Parameters
----------
indices : bool, default=False
If True, the return value will be an array of integers, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector.
|
get_support
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_base.py
|
BSD-3-Clause
|
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
|
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
|
_get_support_mask
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_base.py
|
BSD-3-Clause
|
def transform(self, X):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
# Preserve X when X is a dataframe and the output is configured to
# be pandas.
output_config_dense = _get_output_config("transform", estimator=self)["dense"]
preserve_X = output_config_dense != "default" and _is_pandas_df(X)
# note: we use get_tags instead of __sklearn_tags__ because this is a
# public Mixin.
X = validate_data(
self,
X,
dtype=None,
accept_sparse="csr",
ensure_all_finite=not get_tags(self).input_tags.allow_nan,
skip_check_array=preserve_X,
reset=False,
)
return self._transform(X)
|
Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
|
transform
|
python
|
scikit-learn/scikit-learn
|
sklearn/feature_selection/_base.py
|
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_selection/_base.py
|
BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.