text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
# global
import abc
from typing import Optional, Union
# local
import ivy
class _ArrayWithRandomExperimental(abc.ABC):
def dirichlet(
self: ivy.Array,
/,
*,
size: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.dirichlet. This method
simply wraps the function, and so the docstring for ivy.shuffle also
applies to this method with minimal changes.
Parameters
----------
self
Sequence of floats of length k
size
optional int or tuple of ints, Output shape. If the given shape is,
e.g., (m, n), then m * n * k samples are drawn. Default is None,
in which case a vector of length k is returned.
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to.
Returns
-------
ret
The drawn samples, of shape (size, k).
Examples
--------
>>> alpha = ivy.array([1.0, 2.0, 3.0])
>>> alpha.dirichlet()
ivy.array([0.10598304, 0.21537054, 0.67864642])
>>> alpha = ivy.array([1.0, 2.0, 3.0])
>>> alpha.dirichlet(size = (2,3))
ivy.array([[[0.48006698, 0.07472073, 0.44521229],
[0.55479872, 0.05426367, 0.39093761],
[0.19531053, 0.51675832, 0.28793114]],
[[0.12315625, 0.29823365, 0.5786101 ],
[0.15564976, 0.50542368, 0.33892656],
[0.1325352 , 0.44439589, 0.42306891]]])
"""
return ivy.dirichlet(self, size=size, dtype=dtype, seed=seed, out=out)
def beta(
self: ivy.Array,
beta: Union[int, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.beta. This method simply
wraps the function, and so the docstring for ivy.beta also applies to
this method with minimal changes.
Parameters
----------
self
Input Array.
alpha
The first parameter of the beta distribution.
beta
The second parameter of the beta distribution.
device
device on which to create the array.
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized beta distribution with the shape of
the array.
"""
return ivy.beta(
self,
beta,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def gamma(
self: ivy.Array,
beta: Union[int, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.gamma. This method simply
wraps the function, and so the docstring for ivy.gamma also applies to
this method with minimal changes.
Parameters
----------
self
Input Array and the first parameter of the gamma distribution.
beta
The second parameter of the gamma distribution.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
device
device on which to create the array.
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized gamma distribution with the shape of
the input array.
"""
return ivy.gamma(
self,
beta,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def poisson(
self: ivy.Array,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
fill_value: Optional[Union[float, int]] = 0,
out: Optional[ivy.Array] = None,
):
"""
Parameters
----------
self
Input Array of rate parameter(s). It must have a shape that is broadcastable
to the requested shape
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(lam)' samples are drawn)
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
fill_value
if lam is negative, fill the output array with this value
on that specific dimension.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized poisson distribution.
Examples
--------
>>> lam = ivy.array([1.0, 2.0, 3.0])
>>> lam.poisson()
ivy.array([1., 4., 4.])
>>> lam = ivy.array([1.0, 2.0, 3.0])
>>> lam.poisson(shape=(2,3))
ivy.array([[0., 2., 2.],
[1., 2., 3.]])
"""
return ivy.poisson(
self,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
fill_value=fill_value,
out=out,
)
def bernoulli(
self: ivy.Array,
*,
logits: Optional[Union[float, ivy.Array, ivy.NativeArray]] = None,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
):
"""
Parameters
----------
self
An N-D Array representing the probability of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution. Only one of logits or probs should be passed in
logits
An N-D Array representing the log-odds of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution where the probability of an event is sigmoid
(logits). Only one of logits or probs should be passed in.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the Bernoulli distribution
"""
return ivy.bernoulli(
self,
logits=logits,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
| ivy/ivy/data_classes/array/experimental/random.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/random.py",
"repo_id": "ivy",
"token_count": 4448
} | 9 |
# global
import abc
from typing import Optional, Tuple
import ivy
class _ArrayWithSet(abc.ABC):
def unique_counts(self: ivy.Array) -> Tuple[ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.unique_counts. This method
simply wraps the function, and so the docstring for ivy.unique_counts
also applies to this method with minimal changes.
Parameters
----------
self
input array. If ``x`` has more than one dimension, the function must flatten
``x`` and return the unique elements of the flattened array.
Returns
-------
ret
a namedtuple ``(values, counts)`` whose
- first element must have the field name ``values`` and must be an
array containing the unique elements of ``x``.
The array must have the same data type as ``x``.
- second element must have the field name ``counts`` and must be an array
containing the number of times each unique element occurs in ``x``.
The returned array must have same shape as ``values`` and must
have the default array index data type.
Examples
--------
>>> x = ivy.array([0., 1., 2. , 1. , 0.])
>>> y = x.unique_counts()
>>> print(y)
Results(values=ivy.array([0.,1.,2.]),counts=ivy.array([2,2,1]))
"""
return ivy.unique_counts(self._data)
def unique_values(
self: ivy.Array, /, *, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""Return the unique elements of an input array `x`.
.. admonition:: Data-dependent output shape
:class: important
The shapes of two of the output arrays for this function depend on the
data values in the input array; hence, array libraries which build
computation graphs (e.g., JAX, Dask, etc.) may find this function
difficult to implement without knowing array values. Accordingly,
such libraries may choose to omit this function.
See :ref:`data-dependent-output-shapes` section for more details.
.. note::
Uniqueness should be determined based on value equality
(i.e., ``x_i == x_j``). For input arrays having floating-point
data types, value-based equality implies the following behavior.
- As ``nan`` values compare as ``False``, ``nan`` values
should be considered distinct.
- As ``-0`` and ``+0`` compare as ``True``, signed zeros should
not be considered distinct, and the corresponding unique
element will be implementation-dependent (e.g., an
implementation could choose to return ``-0`` if ``-0`` occurs
before ``+0``).
Parameters
----------
x : ivy.Array or ivy.NativeArray
Input array. If `x` has more than one dimension, the function must flatten
`x` and return the unique elements of the flattened array.
out : ivy.Array, optional
Optional output array, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
ivy.Array
An array containing the set of unique elements in `x`. The returned
array must have the same data type as `x`.
.. note::
The order of unique elements is not specified and may vary
between implementations.
Raises
------
TypeError
If `x` is not an instance of `ivy.Array` or `ivy.NativeArray`.
Examples
--------
>>> import ivy
>>> x = ivy.array([1, 2, 2, 3, 4, 4, 4])
>>> print(x.unique_values())
ivy.array([1, 2, 3, 4])
>>> x = ivy.array([[1, 2], [3, 4]])
>>> print(x.unique_values())
ivy.array([1, 2, 3, 4])
"""
return ivy.unique_values(self._data, out=out)
def unique_all(
self: ivy.Array,
/,
*,
axis: Optional[int] = None,
by_value: bool = True,
) -> Tuple[ivy.Array, ivy.Array, ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.unique_all. This method
simply wraps the function, and so the docstring for ivy.unique_all also
applies to this method with minimal changes.
Parameters
----------
self
input array.
axis
the axis to apply unique on. If None, the unique elements of the flattened
``x`` are returned.
by_value
If False, the unique elements will be sorted in the same order that they
occur in ''x''. Otherwise, they will be sorted by value.
Returns
-------
ret
a namedtuple ``(values, indices, inverse_indices, counts)``.
The details can be found in the docstring for ivy.unique_all.
Examples
--------
>>> x = ivy.randint(0, 10, shape=(2, 2), seed=0)
>>> z = x.unique_all()
>>> print(z)
Results(values=ivy.array([1, 2, 5, 9]),
indices=ivy.array([3, 2, 1, 0]),
inverse_indices=ivy.array([[3, 2], [1, 0]]),
counts=ivy.array([1, 1, 1, 1]))
"""
return ivy.unique_all(self._data, axis=axis, by_value=by_value)
def unique_inverse(self: ivy.Array) -> Tuple[ivy.Array, ivy.Array]:
"""ivy.Array instance method variant of ivy.unique_inverse. This method
simply wraps the function, and so the docstring for ivy.unique_inverse
also applies to this method with minimal changes.
Parameters
----------
self
input array. If ``x`` has more than one dimension, the function must
flatten ``x`` and return the unique elements of the flattened array.
Returns
-------
ret
a namedtuple ``(values, inverse_indices)`` whose
- first element must have the field name ``values`` and must be an array
containing the unique elements of ``x``. The array must have the same data
type as ``x``.
- second element must have the field name ``inverse_indices`` and must be
an array containing the indices of ``values`` that reconstruct ``x``.
The array must have the same shape as ``x`` and must have the default
array index data type.
Examples
--------
>>> x = ivy.array([0.3,0.4,0.7,0.4,0.2,0.8,0.5])
>>> y = x.unique_inverse()
>>> print(y)
Results(values=ivy.array([0.2, 0.3, 0.4, 0.5, 0.7, 0.8]),
inverse_indices=ivy.array([1, 2, 4, 2, 0, 5, 3]))
"""
return ivy.unique_inverse(self._data)
| ivy/ivy/data_classes/array/set.py/0 | {
"file_path": "ivy/ivy/data_classes/array/set.py",
"repo_id": "ivy",
"token_count": 2978
} | 10 |
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithConversionExperimental(ContainerBase):
pass
| ivy/ivy/data_classes/container/experimental/conversions.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/conversions.py",
"repo_id": "ivy",
"token_count": 34
} | 11 |
# global
from typing import Optional, List, Union, Dict
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
class _ContainerWithSortingExperimental(ContainerBase):
@staticmethod
def static_invert_permutation(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.invert_permutation.
This method simply wraps the function, and so the docstring for
ivy.invert_permutation also applies to this method with minimal
changes.
"""
return ContainerBase.cont_multi_map_in_function(
"invert_permutation",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def invert_permutation(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.invert_permutation.
This method simply wraps the function, and so the docstring for
ivy.invert_permutation also applies to this method with minimal
changes.
"""
return self.static_invert_permutation(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_lexsort(
a: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.lexsort. This method
simply wraps the function, and so the docstring for ivy.lexsort also
applies to this method with minimal changes.
Parameters
----------
a
array-like or container input to sort as keys.
axis
axis of each key to be indirectly sorted.
By default, sort over the last axis of each key.
out
optional output container, for writing the result to.
Returns
-------
ret
a container containing sorted input arrays.
Examples
--------
With :class:`ivy.Container` input:
>>> a = ivy.Container(x = ivy.asarray([[9,4,0,4,0,2,1],[1,5,1,4,3,4,4]]),
... y = ivy.asarray([[1, 5, 2],[3, 4, 4]])
>>> ivy.Container.static_lexsort(a)
{
x: ivy.array([2, 0, 4, 6, 5, 3, 1])),
y: ivy.array([0, 2, 1])
}
"""
return ContainerBase.cont_multi_map_in_function(
"lexsort",
a,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def lexsort(
self: ivy.Container,
/,
*,
axis: Union[int, ivy.Container] = -1,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.lexsort. This method
simply wraps the function, and so the docstring for ivy.lexsort also
applies to this method with minimal changes.
Parameters
----------
self
input container with array-like inputs to sort as keys.
axis
axis of each key to be indirectly sorted.
By default, sort over the last axis of each key.
out
optional output container, for writing the result to.
Returns
-------
ret
a container containing the sorted input arrays.
Examples
--------
>>> a = ivy.Container(x = ivy.asarray([[9,4,0,4,0,2,1],[1,5,1,4,3,4,4]]),
... y = ivy.asarray([[1, 5, 2],[3, 4, 4]])
>>> a.lexsort()
{
x: ivy.array([2, 0, 4, 6, 5, 3, 1])),
y: ivy.array([0, 2, 1])
}
"""
return self.static_lexsort(
self,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/experimental/sorting.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/sorting.py",
"repo_id": "ivy",
"token_count": 2619
} | 12 |
# global
from typing import Optional, Union, Dict, Sequence
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
# noinspection PyMissingConstructor
class _ContainerWithUtility(ContainerBase):
@staticmethod
def _static_all(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[
Union[Sequence[str], Dict[str, str], ivy.Container]
] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.all. This method simply
wraps the function, and so the docstring for ivy.all also applies to
this method with minimal changes.
Parameters
----------
x
input container.
axis
axis or axes along which to perform a logical AND reduction. By default, a
logical AND reduction must be performed over the entire array. If a tuple of
integers, logical AND reductions must be performed over multiple axes. A
valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N``
is the rank(number of dimensions) of ``self``. If an ``axis`` is specified
as a negative integer, the function must determine the axis along which to
perform a reduction by counting backward from the last dimension (where
``-1`` refers to the last dimension). If provided an invalid ``axis``, the
function must raise an exception. Default ``None``.
keepdims
If ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with
the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the
reduced axes(dimensions) must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
if a logical AND reduction was performed over the entire array, the returned
container must be a zero-dimensional array containing the test result;
otherwise, the returned container must be a non-zero-dimensional array
containing the test results. The returned container must have a data type of
``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1, 2]), b=ivy.array([0, 1, 1]))
>>> y = ivy.Container.static_all(x)
>>> print(y)
{
a: ivy.array(False),
b: ivy.array(False)
}
"""
return ContainerBase.cont_multi_map_in_function(
"all",
x,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def all(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[
Union[Sequence[str], Dict[str, str], ivy.Container]
] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.all. This method simply
wraps the function, and so the docstring for ivy.all also applies to
this method with minimal changes.
Parameters
----------
self
input container.
axis
axis or axes along which to perform a logical AND reduction. By default, a
logical AND reduction must be performed over the entire array. If a tuple of
integers, logical AND reductions must be performed over multiple axes. A
valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N``
is the rank(number of dimensions) of ``self``. If an ``axis`` is specified
as a negative integer, the function must determine the axis along which to
perform a reduction by counting backward from the last dimension (where
``-1`` refers to the last dimension). If provided an invalid ``axis``, the
function must raise an exception. Default ``None``.
keepdims
If ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with
the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the
reduced axes(dimensions) must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
if a logical AND reduction was performed over the entire array, the returned
container must be a zero-dimensional array containing the test result;
otherwise, the returned container must have non-zero-dimensional arrays
containing the test results. The returned container must have a data type of
``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1, 2]), b=ivy.array([0, 1, 1]))
>>> y = x.all()
>>> print(y)
{
a: ivy.array(False),
b: ivy.array(False)
}
"""
return self._static_all(
self,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_any(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[
Union[Sequence[str], Dict[str, str], ivy.Container]
] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.any. This method simply
wraps the function, and so the docstring for ivy.any also applies to
this method with minimal changes.
Parameters
----------
x
input container.
axis
axis or axes along which to perform a logical OR reduction. By default, a
logical OR reduction must be performed over the entire array. If a tuple of
integers, logical OR reductions must be performed over multiple axes. A
valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N``
is the rank(number of dimensions) of ``self``. If an ``axis`` is specified
as a negative integer, the function must determine the axis along which to
perform a reduction by counting backward from the last dimension (where
``-1`` refers to the last dimension). If provided an invalid ``axis``, the
function must raise an exception. Default: ``None``.
keepdims
If ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with
the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the
reduced axes(dimensions) must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
if a logical OR reduction was performed over the entire array, the returned
container must be a zero-dimensional array containing the test result;
otherwise, the returned container must have non-zero-dimensional arrays
containing the test results. The returned container must have a data type of
``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1, 2]), b=ivy.array([0, 0, 0]))
>>> y = ivy.Container.static_any(x)
>>> print(y)
{
a: ivy.array(True),
b: ivy.array(False)
}
"""
return ContainerBase.cont_multi_map_in_function(
"any",
x,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def any(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, Sequence[int], ivy.Container]] = None,
keepdims: Union[bool, ivy.Container] = False,
key_chains: Optional[
Union[Sequence[str], Dict[str, str], ivy.Container]
] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.any. This method simply
wraps the function, and so the docstring for ivy.any also applies to
this method with minimal changes.
Parameters
----------
self
input container.
axis
axis or axes along which to perform a logical OR reduction. By default, a
logical OR reduction must be performed over the entire array. If a tuple of
integers, logical OR reductions must be performed over multiple axes. A
valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N``
is the rank(number of dimensions) of ``self``. If an ``axis`` is specified
as a negative integer, the function must determine the axis along which to
perform a reduction by counting backward from the last dimension (where
``-1`` refers to the last dimension). If provided an invalid ``axis``, the
function must raise an exception. Default: ``None``.
keepdims
If ``True``, the reduced axes (dimensions) must be included in the result as
singleton dimensions, and, accordingly, the result must be compatible with
the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the
reduced axes(dimensions) must not be included in the result.
Default: ``False``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
if a logical OR reduction was performed over the entire array, the returned
container must be a zero-dimensional array containing the test result;
otherwise, the returned container must have non-zero-dimensional arrays
containing the test results. The returned container must have a data type of
``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1, 2]), b=ivy.array([0, 0, 0]))
>>> y = x.any()
>>> print(y)
{
a: ivy.array(True),
b: ivy.array(False)
}
"""
return self._static_any(
self,
axis=axis,
keepdims=keepdims,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/utility.py/0 | {
"file_path": "ivy/ivy/data_classes/container/utility.py",
"repo_id": "ivy",
"token_count": 6264
} | 13 |
[build-system]
requires = ["maturin>=1,<2"]
build-backend = "maturin"
[project]
name = "pyo3_example"
requires-python = ">=3.7"
classifiers = [
"Programming Language :: Rust",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
| ivy/ivy/engines/XLA/rust_api/pyproject.toml/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/pyproject.toml",
"repo_id": "ivy",
"token_count": 108
} | 14 |
#!/bin/bash
#pip install virtualenv
cd XLA/rust_api/
#mkdir xla_build && virtualenv xla_build
#source xla_build/bin/activate
wget https://github.com/elixir-nx/xla/releases/download/v0.4.4/xla_extension-x86_64-linux-gnu-cuda111.tar.gz
tar -xzvf xla_extension-x86_64-linux-gnu-cuda111.tar.gz
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
source "$HOME/.cargo/env"
pip install maturin
apt-get update
apt install llvm-dev libclang-dev clang
export LIBCLANG_PATH=/usr/local/lib
# maturin develop
| ivy/ivy/engines/setup_xla.sh/0 | {
"file_path": "ivy/ivy/engines/setup_xla.sh",
"repo_id": "ivy",
"token_count": 221
} | 15 |
"""Collection of Jax general functions, wrapped to fit Ivy syntax and
signature."""
# global
import jax
import numpy as np
import jax.numpy as jnp
from numbers import Number
from operator import mul
from functools import reduce as _reduce
from typing import Optional, Union, Sequence, Callable, Tuple
import multiprocessing as _multiprocessing
import importlib
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.backends.jax.device import _to_array, _to_device
from ivy.functional.ivy.general import _broadcast_to
from ivy.functional.backends.jax import JaxArray, NativeArray
from ivy.utils.exceptions import _check_inplace_update_support
from . import backend_version
def container_types():
flat_mapping_spec = importlib.util.find_spec(
"FlatMapping", "haiku._src.data_structures"
)
if not flat_mapping_spec:
from haiku._src.data_structures import FlatMapping
else:
FlatMapping = importlib.util.module_from_spec(flat_mapping_spec)
return [FlatMapping]
def current_backend_str() -> str:
return "jax"
def is_native_array(x, /, *, exclusive=False):
if exclusive:
return isinstance(x, NativeArray)
return isinstance(
x,
(
NativeArray,
jax.interpreters.ad.JVPTracer,
jax.core.ShapedArray,
jax.interpreters.partial_eval.DynamicJaxprTracer,
),
)
def _mask_to_index(query, x):
if query.shape != x.shape:
if len(query.shape) > len(x.shape):
raise ivy.exceptions.IvyException("too many indices")
elif not len(query.shape):
query = jnp.tile(query, x.shape[0])
return jnp.where(query)
def get_item(
x: JaxArray,
/,
query: Union[JaxArray, Tuple],
*,
copy: Optional[bool] = None,
) -> JaxArray:
if ivy.is_array(query) and ivy.is_bool_dtype(query):
if not len(query.shape):
if not query:
return jnp.array([], dtype=x.dtype)
else:
return jnp.expand_dims(x, 0)
query = _mask_to_index(query, x)
elif isinstance(query, list):
query = (query,)
return x.__getitem__(query)
def set_item(
x: JaxArray,
query: Union[JaxArray, Tuple],
val: JaxArray,
/,
*,
copy: Optional[bool] = False,
) -> JaxArray:
if ivy.is_array(query) and ivy.is_bool_dtype(query):
query = _mask_to_index(query, x)
expected_shape = x[query].shape
if ivy.is_array(val):
val = _broadcast_to(val, expected_shape)._data
ret = x.at[query].set(val)
if copy:
return ret
return ivy.inplace_update(x, _to_device(ret))
def array_equal(x0: JaxArray, x1: JaxArray, /) -> bool:
return bool(jnp.array_equal(x0, x1))
@with_unsupported_dtypes({"0.4.24 and below": ("bfloat16",)}, backend_version)
def to_numpy(x: JaxArray, /, *, copy: bool = True) -> np.ndarray:
if copy:
return np.array(_to_array(x))
else:
return np.asarray(_to_array(x))
def to_scalar(x: JaxArray, /) -> Number:
if isinstance(x, Number):
return x
else:
return _to_array(x).item()
def to_list(x: JaxArray, /) -> list:
return _to_array(x).tolist()
def gather(
params: JaxArray,
indices: JaxArray,
/,
*,
axis: int = -1,
batch_dims: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
axis %= len(params.shape)
batch_dims %= len(params.shape)
ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims)
result = []
if batch_dims == 0:
result = jnp.take(params, indices, axis)
else:
for b in range(batch_dims):
if b == 0:
zip_list = [(p, i) for p, i in zip(params, indices)]
else:
zip_list = [
(p, i) for z in [zip(p1, i1) for p1, i1 in zip_list] for p, i in z
]
for z in zip_list:
p, i = z
r = jnp.take(p, i, axis - batch_dims)
result.append(r)
result = jnp.array(result)
result = result.reshape([*params.shape[0:batch_dims], *result.shape[1:]])
return result
def gather_nd_helper(params, indices):
indices_shape = indices.shape
params_shape = params.shape
if len(indices.shape) == 0:
num_index_dims = 1
else:
num_index_dims = indices_shape[-1]
res_dim_sizes_list = [
_reduce(mul, params_shape[i + 1 :], 1) for i in range(len(params_shape) - 1)
] + [1]
result_dim_sizes = jnp.array(res_dim_sizes_list)
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_params = jnp.reshape(params, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = jnp.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = jnp.tile(
jnp.reshape(jnp.sum(indices * indices_scales, -1, keepdims=True), (-1, 1)),
(1, implicit_indices_factor),
)
implicit_indices = jnp.tile(
jnp.expand_dims(jnp.arange(implicit_indices_factor), 0),
(indices_for_flat_tiled.shape[0], 1),
)
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = jnp.reshape(indices_for_flat, (-1,)).astype(jnp.int32)
flat_gather = jnp.take(flat_params, flat_indices_for_flat, 0)
new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
ret = jnp.reshape(flat_gather, new_shape)
return ret
def gather_nd(
params: JaxArray,
indices: JaxArray,
/,
*,
batch_dims: int = 0,
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions.check_gather_nd_input_valid(params, indices, batch_dims)
batch_dims = batch_dims % len(params.shape)
result = []
if batch_dims == 0:
result = gather_nd_helper(params, indices)
else:
for b in range(batch_dims):
if b == 0:
zip_list = [(p, i) for p, i in zip(params, indices)]
else:
zip_list = [
(p, i) for z in [zip(p1, i1) for p1, i1 in zip_list] for p, i in z
]
for z in zip_list:
p, i = z
r = gather_nd_helper(p, i)
result.append(r)
result = jnp.array(result)
result = result.reshape([*params.shape[0:batch_dims], *result.shape[1:]])
return result
def get_num_dims(x: JaxArray, /, *, as_array: bool = False) -> Union[JaxArray, int]:
return jnp.asarray(len(jnp.shape(x))) if as_array else len(x.shape)
def inplace_arrays_supported():
return False
def inplace_decrement(
x: Union[ivy.Array, JaxArray], val: Union[ivy.Array, JaxArray]
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data -= val_native
else:
x = ivy.Array(x_native - val_native)
return x
def inplace_increment(
x: Union[ivy.Array, JaxArray], val: Union[ivy.Array, JaxArray]
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data += val_native
else:
x = ivy.Array(x_native + val_native)
return x
def inplace_update(
x: Union[ivy.Array, JaxArray],
val: Union[ivy.Array, JaxArray],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
if ivy.is_array(x) and ivy.is_array(val):
_check_inplace_update_support(x, ensure_in_backend)
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data = val_native
# Handle view updates
if ivy.exists(x._base):
base = x._base
base_idx = ivy.arange(base.size).reshape(base.shape)
for fn, args, kwargs, index in x._manipulation_stack:
kwargs["copy"] = True
base_idx = ivy.__dict__[fn](base_idx, *args, **kwargs)
base_idx = base_idx[index] if ivy.exists(index) else base_idx
base_flat = base.data.flatten()
base_flat = base_flat.at[base_idx.data.flatten()].set(
val_native.flatten()
)
base.data = base_flat.reshape(base.shape)
for ref in base._view_refs:
view = ref()
if ivy.exists(view) and view is not x:
_update_view(view, base)
else:
for ref in x._view_refs:
view = ref()
if ivy.exists(view):
_update_view(view, x)
return x
else:
return val
def _update_view(view, base):
for fn, args, kwargs, index in view._manipulation_stack:
base = ivy.__dict__[fn](base, *args, **kwargs)
base = base[index] if ivy.exists(index) else base
view.data = base.data
return view
def inplace_variables_supported():
return False
def multiprocessing(context: Optional[str] = None):
return (
_multiprocessing if context is None else _multiprocessing.get_context(context)
)
def scatter_flat(
indices: JaxArray,
updates: JaxArray,
/,
*,
size: Optional[int] = None,
reduction: str = "sum",
out: Optional[JaxArray] = None,
) -> JaxArray:
target = out
target_given = ivy.exists(target)
if ivy.exists(size) and ivy.exists(target):
ivy.utils.assertions.check_equal(len(target.shape), 1, as_array=False)
ivy.utils.assertions.check_equal(target.shape[0], size, as_array=False)
if not target_given:
reduction = "replace"
if reduction == "sum":
target = target.at[indices].add(updates)
elif reduction == "replace":
if not target_given:
target = jnp.zeros([size], dtype=updates.dtype)
target = target.at[indices].set(updates)
elif reduction == "min":
target = target.at[indices].min(updates)
elif reduction == "max":
target = target.at[indices].max(updates)
else:
raise ivy.utils.exceptions.IvyException(
f'reduction is {reduction}, but it must be one of "sum", "min", "max" or'
' "replace"'
)
if target_given:
return ivy.inplace_update(out, target)
return target
scatter_flat.support_native_out = True
def scatter_nd(
indices: JaxArray,
updates: JaxArray,
/,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
*,
reduction: str = "sum",
out: Optional[JaxArray] = None,
) -> JaxArray:
updates = jnp.array(
updates,
dtype=(
ivy.dtype(out, as_native=True)
if ivy.exists(out)
else ivy.default_dtype(item=updates)
),
)
indices_flat = indices.reshape(-1, indices.shape[-1]).T
indices_tuple = tuple(indices_flat) + (Ellipsis,)
target = out
target_given = ivy.exists(target)
if ivy.exists(shape) and ivy.exists(target):
ivy.utils.assertions.check_equal(
ivy.Shape(target.shape), ivy.Shape(shape), as_array=False
)
shape = list(shape) if ivy.exists(shape) else list(out.shape)
if not target_given:
target = jnp.zeros(shape, dtype=updates.dtype)
updates = _broadcast_to(updates, target[indices_tuple].shape)._data
if reduction == "sum":
target = target.at[indices_tuple].add(updates)
elif reduction == "replace":
target = target.at[indices_tuple].set(updates)
elif reduction == "min":
target = target.at[indices_tuple].min(updates)
elif reduction == "max":
target = target.at[indices_tuple].max(updates)
elif reduction == "mul":
target = target.at[indices_tuple].mul(updates)
else:
raise ivy.utils.exceptions.IvyException(
f'reduction is {reduction}, but it must be one of "sum", "min", "max",'
' "mul" or "replace"'
)
if ivy.exists(out):
return ivy.inplace_update(out, target)
return target
scatter_nd.support_native_out = True
def shape(
x: JaxArray,
/,
*,
as_array: bool = False,
) -> Union[ivy.Shape, ivy.Array]:
if as_array:
return ivy.array(jnp.shape(x), dtype=ivy.default_int_dtype())
else:
return ivy.Shape(x.shape)
def vmap(
func: Callable,
in_axes: Union[int, Sequence[int], Sequence[None]] = 0,
out_axes: int = 0,
) -> Callable:
func = ivy.output_to_native_arrays(func)
return ivy.inputs_to_native_arrays(
jax.vmap(func, in_axes=in_axes, out_axes=out_axes)
)
@with_unsupported_dtypes({"0.4.24 and below": ("float16", "bfloat16")}, backend_version)
def isin(
elements: JaxArray,
test_elements: JaxArray,
/,
*,
assume_unique: bool = False,
invert: bool = False,
) -> JaxArray:
return jnp.isin(elements, test_elements, assume_unique=assume_unique, invert=invert)
def itemsize(x: JaxArray) -> int:
return x.itemsize
| ivy/ivy/functional/backends/jax/general.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/general.py",
"repo_id": "ivy",
"token_count": 6235
} | 16 |
# global
import mxnet as mx
import numpy as np
from numbers import Number
from typing import Union, List, Optional, Sequence, Tuple
# local
import ivy
from ivy.utils.exceptions import IvyNotImplementedException
from ivy.functional.ivy.creation import (
_asarray_to_native_arrays_and_back,
_asarray_infer_device,
_asarray_handle_nestable,
NestedSequence,
SupportsBufferProtocol,
_asarray_inputs_to_native_shapes,
)
def arange(
start: float,
/,
stop: Optional[float] = None,
step: float = 1,
*,
dtype: Optional[None] = None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
@_asarray_to_native_arrays_and_back
@_asarray_infer_device
@_asarray_handle_nestable
@_asarray_inputs_to_native_shapes
def asarray(
obj: Union[
(
None,
mx.ndarray.NDArray,
bool,
int,
float,
NestedSequence,
SupportsBufferProtocol,
np.ndarray,
)
],
/,
*,
copy: Optional[bool] = None,
dtype: Optional[None] = None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
ret = mx.nd.array(obj, device, dtype=dtype)
if copy:
return mx.numpy.copy(ret)
return ret
array = asarray
def empty(
*size: Union[(int, Sequence[int])],
shape: Optional[ivy.NativeShape] = None,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def empty_like(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def eye(
n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
batch_shape: Optional[Union[(int, Sequence[int])]] = None,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def to_dlpack(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
):
raise IvyNotImplementedException()
def from_dlpack(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def full(
shape: Union[(ivy.NativeShape, Sequence[int])],
fill_value: Union[(int, float, bool)],
*,
dtype: Optional[Union[(ivy.Dtype, None)]] = None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def full_like(
x: Union[(None, mx.ndarray.NDArray)],
/,
fill_value: Number,
*,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def linspace(
start: Union[(None, mx.ndarray.NDArray, float)],
stop: Union[(None, mx.ndarray.NDArray, float)],
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
):
raise IvyNotImplementedException()
def meshgrid(
*arrays: Union[(None, mx.ndarray.NDArray)],
sparse: bool = False,
indexing: str = "xy",
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> List[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
def ones(
shape: Optional[ivy.NativeShape] = None,
*,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
return mx.nd.ones(shape, dtype=dtype, ctx=device)
def ones_like(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
return mx.nd.ones_like(x, dtype=dtype, ctx=device)
def tril(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
k: int = 0,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def triu(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
k: int = 0,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def zeros(
*size: Union[(int, Sequence[int])],
shape: Optional[ivy.NativeShape] = None,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def zeros_like(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
dtype: None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
if x.shape == ():
ret = mx.nd.array(0, dtype=dtype)
else:
ret = mx.ndarray.zeros_like(x, dtype=dtype)
return ivy.to_device(ret, device)
def copy_array(
x: Union[(None, mx.ndarray.NDArray)],
*,
to_ivy_array: bool = True,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
if to_ivy_array:
return ivy.to_ivy(x.copy())
return x.copy()
def one_hot(
indices: Union[(None, mx.ndarray.NDArray)],
depth: int,
/,
*,
on_value: Optional[Number] = None,
off_value: Optional[Number] = None,
axis: Optional[int] = None,
dtype: Optional[None] = None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def frombuffer(
buffer: bytes,
dtype: Optional[None] = float,
count: Optional[int] = (-1),
offset: Optional[int] = 0,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def triu_indices(
n_rows: int, n_cols: Optional[int] = None, k: int = 0, /, *, device: str
) -> Tuple[Union[(None, mx.ndarray.NDArray)]]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/creation.py",
"repo_id": "ivy",
"token_count": 2947
} | 17 |
from typing import Union, Optional, Sequence
import mxnet as mx
import ivy
from ivy.utils.exceptions import IvyNotImplementedException
def dirichlet(
alpha: Union[(None, mx.ndarray.NDArray, float, Sequence[float])],
/,
*,
size: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
seed: Optional[int] = None,
dtype: Optional[None] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def beta(
alpha: Union[(float, None, mx.ndarray.NDArray)],
beta: Union[(float, None, mx.ndarray.NDArray)],
/,
*,
shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
device: Optional[str] = None,
dtype: Optional[Union[(None, ivy.Dtype)]] = None,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def gamma(
alpha: Union[(float, None, mx.ndarray.NDArray)],
beta: Union[(float, None, mx.ndarray.NDArray)],
/,
*,
shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
device: Optional[str] = None,
dtype: Optional[Union[(None, ivy.Dtype)]] = None,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def poisson(
lam: Union[(float, None, mx.ndarray.NDArray)],
*,
shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
device: str,
dtype: None,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def bernoulli(
probs: Union[(float, None, mx.ndarray.NDArray)],
*,
logits: Union[(float, None, mx.ndarray.NDArray)] = None,
shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
device: str,
dtype: None,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/random.py",
"repo_id": "ivy",
"token_count": 901
} | 18 |
from typing import Union, Optional, Literal, List
import mxnet as mx
import ivy
from ivy.utils.exceptions import IvyNotImplementedException
def argsort(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: int = (-1),
descending: bool = False,
stable: bool = True,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def sort(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
axis: int = (-1),
descending: bool = False,
stable: bool = True,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def msort(
a: Union[(None, mx.ndarray.NDArray, list, tuple)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def searchsorted(
x: Union[(None, mx.ndarray.NDArray)],
v: Union[(None, mx.ndarray.NDArray)],
/,
*,
side: Literal[("left", "right")] = "left",
sorter: Optional[Union[(ivy.Array, ivy.NativeArray, List[int])]] = None,
ret_dtype: None = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/sorting.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/sorting.py",
"repo_id": "ivy",
"token_count": 572
} | 19 |
from typing import Optional, Union, Tuple, List, Sequence
import numpy as np
import numpy.typing as npt
import ivy
from ivy import promote_types_of_inputs
from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
def amax(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
axis = tuple(axis) if isinstance(axis, list) else axis
ret = np.amax(a=x, axis=axis, out=out, keepdims=keepdims)
return np.asarray(ret) if np.isscalar(ret) else ret
amax.support_native_out = True
def amin(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
axis = tuple(axis) if isinstance(axis, list) else axis
ret = np.amin(a=x, axis=axis, out=out, keepdims=keepdims)
return np.asarray(ret) if np.isscalar(ret) else ret
amin.support_native_out = True
@_scalar_output_to_0d_array
@with_unsupported_dtypes({"1.26.3 and below": ("bfloat16",)}, backend_version)
def sinc(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.sinc(x).astype(x.dtype)
@_scalar_output_to_0d_array
def fmax(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = promote_types_of_inputs(x1, x2)
return np.fmax(
x1,
x2,
out=None,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
)
fmax.support_native_out = True
@_scalar_output_to_0d_array
def float_power(
x1: Union[np.ndarray, float, list, tuple],
x2: Union[np.ndarray, float, list, tuple],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = promote_types_of_inputs(x1, x2)
return np.float_power(x1, x2, out=out)
float_power.support_native_out = True
@_scalar_output_to_0d_array
def copysign(
x1: npt.ArrayLike,
x2: npt.ArrayLike,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = promote_types_of_inputs(x1, x2)
if not ivy.is_float_dtype(x1):
x1 = x1.astype(ivy.default_float_dtype(as_native=True))
x2 = x2.astype(ivy.default_float_dtype(as_native=True))
return np.copysign(x1, x2, out=out)
copysign.support_native_out = True
@_scalar_output_to_0d_array
def count_nonzero(
a: np.ndarray,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(axis, list):
axis = tuple(axis)
ret = np.count_nonzero(a, axis=axis, keepdims=keepdims)
if np.isscalar(ret):
return np.array(ret, dtype=dtype)
return ret.astype(dtype)
count_nonzero.support_native_out = False
def nansum(
x: np.ndarray,
/,
*,
axis: Optional[Union[Tuple[int, ...], int]] = None,
dtype: Optional[np.dtype] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if isinstance(axis, list):
axis = tuple(axis)
return np.nansum(x, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
nansum.support_native_out = True
def isclose(
a: np.ndarray,
b: np.ndarray,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ret = np.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
if np.isscalar(ret):
return np.array(ret, dtype="bool")
return ret
isclose.support_native_out = False
def signbit(
x: Union[np.ndarray, float, int, list, tuple],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.signbit(x, out=out)
signbit.support_native_out = True
def hypot(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.hypot(x1, x2)
def diff(
x: Union[np.ndarray, list, tuple],
/,
*,
n: int = 1,
axis: int = -1,
prepend: Optional[Union[np.ndarray, int, float, list, tuple]] = None,
append: Optional[Union[np.ndarray, int, float, list, tuple]] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
prepend = prepend if prepend is not None else np._NoValue
append = append if append is not None else np._NoValue
return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)
diff.support_native_out = False
@_scalar_output_to_0d_array
def allclose(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
out: Optional[np.ndarray] = None,
) -> bool:
return np.allclose(x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan)
allclose.support_native_out = False
def fix(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.fix(x, out=out)
fix.support_native_out = True
def nextafter(
x1: np.ndarray,
x2: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.nextafter(x1, x2)
nextafter.support_natvie_out = True
def zeta(
x: np.ndarray,
q: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
temp = np.logical_and(np.greater(x, 0), np.equal(np.remainder(x, 2), 0))
temp = np.logical_and(temp, np.less_equal(q, 0))
temp = np.logical_and(temp, np.equal(np.remainder(q, 1), 0))
inf_indices = np.logical_or(temp, np.equal(x, 1))
temp = np.logical_and(np.not_equal(np.remainder(x, 2), 0), np.greater(x, 1))
temp = np.logical_and(temp, np.less_equal(q, 0))
nan_indices = np.logical_or(temp, np.less(x, 1))
n, res = 1, 1 / q**x
while n < 10000:
term = 1 / (q + n) ** x
n, res = n + 1, res + term
ret = np.round(res, decimals=4)
ret[nan_indices] = np.nan
ret[inf_indices] = np.inf
return ret
zeta.support_native_out = False
def gradient(
x: np.ndarray,
/,
*,
spacing: Union[int, list, tuple] = 1,
axis: Optional[Union[int, list, tuple]] = None,
edge_order: int = 1,
) -> Union[np.ndarray, List[np.ndarray]]:
if type(spacing) in (int, float):
return np.gradient(x, spacing, axis=axis, edge_order=edge_order)
return np.gradient(x, *spacing, axis=axis, edge_order=edge_order)
def xlogy(
x: np.ndarray, y: np.ndarray, /, *, out: Optional[np.ndarray] = None
) -> np.ndarray:
x, y = promote_types_of_inputs(x, y)
if (x == 0).all():
return 0.0
else:
return x * np.log(y)
def conj(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ret = np.conj(x, out=out)
if x.dtype == bool:
return ret.astype("bool")
return ret
def ldexp(
x1: np.ndarray,
x2: Union[np.ndarray, int, list, tuple],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.ldexp(x1, x2, out=out)
def frexp(
x: np.ndarray, /, *, out: Optional[Tuple[np.ndarray, np.ndarray]] = None
) -> Tuple[np.ndarray, np.ndarray]:
if out is None:
return np.frexp(x, out=(None, None))
else:
return np.frexp(x, out=out)
def modf(
x: np.ndarray,
/,
*,
out: Optional[Tuple[np.ndarray, np.ndarray]] = None,
) -> np.ndarray:
if out:
return np.modf(x, out=out)
return np.modf(x)
# ---digamma---#
kLanczosGamma = 7 # aka g
kBaseLanczosCoeff = 0.99999999999980993227684700473478
kLanczosCoefficients = np.array(
[
676.520368121885098567009190444019,
-1259.13921672240287047156078755283,
771.3234287776530788486528258894,
-176.61502916214059906584551354,
12.507343278686904814458936853,
-0.13857109526572011689554707,
9.984369578019570859563e-6,
1.50563273514931155834e-7,
]
)
def digamma(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
# Using `np.errstate` to ignore divide by zero error
# to maintain the same behaviour as other frameworks.
with np.errstate(divide="ignore", invalid="ignore"):
x = np.asarray(x, dtype=x.dtype)
zero = np.zeros_like(x)
one_half = 0.5 * np.ones_like(x)
one = np.ones_like(x)
pi = np.pi * np.ones_like(x)
lanczos_gamma = kLanczosGamma * np.ones_like(x)
lanczos_gamma_plus_one_half = (kLanczosGamma + 0.5) * np.ones_like(x)
log_lanczos_gamma_plus_one_half = np.log(kLanczosGamma + 0.5) * np.ones_like(x)
base_lanczos_coeff = kBaseLanczosCoeff * np.ones_like(x)
need_to_reflect = x < one_half
z = np.where(need_to_reflect, -x, x - one)
num = zero
denom = base_lanczos_coeff
for i in range(len(kLanczosCoefficients)):
lanczos_coefficient = kLanczosCoefficients[i] * np.ones_like(x)
index = i * np.ones_like(x)
num = num - lanczos_coefficient / ((z + index + one) * (z + index + one))
denom = denom + lanczos_coefficient / (z + index + one)
t = lanczos_gamma_plus_one_half + z
log_t = log_lanczos_gamma_plus_one_half + np.log1p(
z / lanczos_gamma_plus_one_half
)
y = log_t + num / denom - lanczos_gamma / t
reduced_x = x + np.abs(np.floor(x + 0.5))
reflection = y - pi * np.cos(pi * reduced_x) / np.sin(pi * reduced_x)
real_result = np.where(need_to_reflect, reflection, y)
return np.where(
np.logical_and(x <= zero, x == np.floor(x)), np.nan, real_result
)
# --- LGAMMA --- #
LANCZOS_N = 13
lanczos_g = 6.024680040776729583740234375
lanczos_num_coeffs = np.array(
[
23531376880.410759688572007674451636754734846804940,
42919803642.649098768957899047001988850926355848959,
35711959237.355668049440185451547166705960488635843,
17921034426.037209699919755754458931112671403265390,
6039542586.3520280050642916443072979210699388420708,
1439720407.3117216736632230727949123939715485786772,
248874557.86205415651146038641322942321632125127801,
31426415.585400194380614231628318205362874684987640,
2876370.6289353724412254090516208496135991145378768,
186056.26539522349504029498971604569928220784236328,
8071.6720023658162106380029022722506138218516325024,
210.82427775157934587250973392071336271166969580291,
2.5066282746310002701649081771338373386264310793408,
]
)
lanczos_den_coeffs = np.array(
[
0.0,
39916800.0,
120543840.0,
150917976.0,
105258076.0,
45995730.0,
13339535.0,
2637558.0,
357423.0,
32670.0,
1925.0,
66.0,
1.0,
]
)
def sinpi(x):
y = np.abs(x) % 2.0
n = np.round(2.0 * y)
assert n >= 0
assert n <= 4
if n == 0:
r = np.sin(np.pi * y)
elif n == 1:
r = np.cos(np.pi * (y - 0.5))
elif n == 2:
r = np.sin(np.pi * (1.0 - y))
elif n == 3:
r = -np.cos(np.pi * (y - 1.5))
elif n == 4:
r = np.sin(np.pi * (y - 2.0))
else:
raise Exception("Unreachable code")
return np.copysign(1.0, x) * r
def lanczos_sum(x):
num = 0.0
den = 0.0
if x < 5.0:
for i in range(LANCZOS_N - 1, -1, -1):
num = num * x + lanczos_num_coeffs[i]
den = den * x + lanczos_den_coeffs[i]
else:
for i in range(LANCZOS_N):
num = num / x + lanczos_num_coeffs[i]
den = den / x + lanczos_den_coeffs[i]
return num / den
# TODO: Replace with native lgamma implementation when available
def lgamma(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
def func(x):
if not np.isfinite(x):
if np.isnan(x):
return x # lgamma(nan) = nan
else:
return np.inf # lgamma(+-inf) = +inf
if x == np.floor(x) and x <= 2.0:
if x <= 0.0:
return np.inf # lgamma(n) = inf for integers n <= 0
else:
return 0.0 # lgamma(1) = lgamma(2) = 0.0
absx = np.abs(x)
if absx < 1e-20:
return -np.log(absx)
# Lanczos' formula
r = np.log(lanczos_sum(absx)) - lanczos_g
r += (absx - 0.5) * (np.log(absx + lanczos_g - 0.5) - 1)
if x < 0.0:
# Use reflection formula to get value for negative x.
r = np.log(np.pi) - np.log(np.abs(sinpi(absx))) - np.log(absx) - r
if np.isinf(r):
raise OverflowError("Range error in lgamma")
return r
# Vectorize 'func' for element-wise operations on 'x', output matching 'x' dtype.
vfunc = np.vectorize(func, otypes=[x.dtype])
return vfunc(x)
# --- erfc --- #
# Polynomials for computing erf/erfc. Originally from cephes library.
# https://netlib.org/cephes/doubldoc.html
kErfcPCoefficient = np.array(
[
2.46196981473530512524e-10,
5.64189564831068821977e-1,
7.46321056442269912687e0,
4.86371970985681366614e1,
1.96520832956077098242e2,
5.26445194995477358631e2,
9.34528527171957607540e2,
1.02755188689515710272e3,
5.57535335369399327526e2,
]
)
kErfcQCoefficient = np.array(
[
1.00000000000000000000e0,
1.32281951154744992508e1,
8.67072140885989742329e1,
3.54937778887819891062e2,
9.75708501743205489753e2,
1.82390916687909736289e3,
2.24633760818710981792e3,
1.65666309194161350182e3,
5.57535340817727675546e2,
]
)
kErfcRCoefficient = np.array(
[
5.64189583547755073984e-1,
1.27536670759978104416e0,
5.01905042251180477414e0,
6.16021097993053585195e0,
7.40974269950448939160e0,
2.97886665372100240670e0,
]
)
kErfcSCoefficient = np.array(
[
1.00000000000000000000e0,
2.26052863220117276590e0,
9.39603524938001434673e0,
1.20489539808096656605e1,
1.70814450747565897222e1,
9.60896809063285878198e0,
3.36907645100081516050e0,
]
)
# Evaluate the polynomial given coefficients and `x`.
# N.B. Coefficients should be supplied in decreasing order.
def _EvaluatePolynomial(x, coefficients):
poly = np.full_like(x, 0.0)
for c in coefficients:
poly = poly * x + c
return poly
# TODO: Remove this once native function is available.
# Compute an approximation of the error function complement (1 - erf(x)).
def erfc(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if x.dtype not in [np.float16, np.float32, np.float64]:
raise TypeError("Input must be of type float16, float32, or float64.")
input_dtype = x.dtype
abs_x = np.abs(x)
z = np.exp(-x * x)
pp = _EvaluatePolynomial(abs_x, kErfcPCoefficient)
pq = _EvaluatePolynomial(abs_x, kErfcQCoefficient)
pr = _EvaluatePolynomial(abs_x, kErfcRCoefficient)
ps = _EvaluatePolynomial(abs_x, kErfcSCoefficient)
abs_x_small = abs_x < 8.0
y = np.where(abs_x_small, z * pp / pq, z * pr / ps)
result_no_underflow = np.where(x < 0.0, 2.0 - y, y)
def is_pos_inf(op):
return np.logical_and(np.isinf(op), op > 0)
underflow = np.logical_or(
z == 0,
np.logical_or(
np.logical_and(is_pos_inf(pq), abs_x_small),
np.logical_and(is_pos_inf(ps), np.logical_not(abs_x_small)),
),
)
result_underflow = np.where(x < 0, np.full_like(x, 2), np.full_like(x, 0))
return np.where(underflow, result_underflow, result_no_underflow).astype(
input_dtype
)
# TODO: Remove this once native function is available.
# Compute an approximation of the error function complement (1 - erf(x)).
def erfinv(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
with ivy.ArrayMode(False):
return np.sqrt(2) * erfc(x)
| ivy/ivy/functional/backends/numpy/experimental/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/elementwise.py",
"repo_id": "ivy",
"token_count": 8161
} | 20 |
"""Collection of Numpy general functions, wrapped to fit Ivy syntax and
signature."""
# global
from typing import Optional, Union, Sequence, Callable, Tuple
import numpy as np
from operator import mul
from functools import reduce as _reduce
import multiprocessing as _multiprocessing
from numbers import Number
# local
import ivy
from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
from ...ivy.general import _broadcast_to
def array_equal(x0: np.ndarray, x1: np.ndarray, /) -> bool:
return np.array_equal(x0, x1)
def container_types():
return []
def current_backend_str() -> str:
return "numpy"
@_scalar_output_to_0d_array
def get_item(
x: np.ndarray,
/,
query: Union[np.ndarray, Tuple],
*,
copy: Optional[bool] = None,
) -> np.ndarray:
return x.__getitem__(query)
@_scalar_output_to_0d_array
def set_item(
x: np.ndarray,
query: Union[np.ndarray, Tuple],
val: np.ndarray,
/,
*,
copy: bool = False,
) -> np.ndarray:
if copy:
x = np.copy(x)
x.__setitem__(query, val)
return x
def to_numpy(x: np.ndarray, /, *, copy: bool = True) -> np.ndarray:
if copy:
return x.copy()
else:
return x
def to_scalar(x: np.ndarray, /) -> Number:
if isinstance(x, (float, int)):
return x
return x.item()
def to_list(x: np.ndarray, /) -> list:
return x.tolist()
def gather(
params: np.ndarray,
indices: np.ndarray,
/,
*,
axis: int = -1,
batch_dims: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
axis %= len(params.shape)
batch_dims %= len(params.shape)
ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims)
result = []
if batch_dims == 0:
result = np.take(params, indices, axis)
else:
for b in range(batch_dims):
if b == 0:
zip_list = [(p, i) for p, i in zip(params, indices)]
else:
zip_list = [
(p, i) for z in [zip(p1, i1) for p1, i1 in zip_list] for p, i in z
]
for z in zip_list:
p, i = z
r = np.take(p, i, axis - batch_dims)
result.append(r)
result = np.array(result)
result = result.reshape([*params.shape[0:batch_dims], *result.shape[1:]])
return result
def gather_nd_helper(params, indices):
indices_shape = indices.shape
params_shape = params.shape
if len(indices.shape) == 0:
num_index_dims = 1
else:
num_index_dims = indices_shape[-1]
result_dim_sizes_list = [
_reduce(mul, params_shape[i + 1 :], 1) for i in range(len(params_shape) - 1)
] + [1]
result_dim_sizes = np.array(result_dim_sizes_list)
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_params = np.reshape(params, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = np.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = np.tile(
np.reshape(np.sum(indices * indices_scales, -1, keepdims=True), (-1, 1)),
(1, implicit_indices_factor),
)
implicit_indices = np.tile(
np.expand_dims(np.arange(implicit_indices_factor), 0),
(indices_for_flat_tiled.shape[0], 1),
)
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = np.reshape(indices_for_flat, (-1,)).astype(np.int32)
flat_gather = np.take(flat_params, flat_indices_for_flat, 0)
new_shape = list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
res = np.reshape(flat_gather, new_shape)
return res
def gather_nd(
params: np.ndarray,
indices: np.ndarray,
/,
*,
batch_dims: int = 0,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ivy.utils.assertions.check_gather_nd_input_valid(params, indices, batch_dims)
batch_dims %= len(params.shape)
result = []
if batch_dims == 0:
result = gather_nd_helper(params, indices)
else:
for b in range(batch_dims):
if b == 0:
zip_list = [(p, i) for p, i in zip(params, indices)]
else:
zip_list = [
(p, i) for z in [zip(p1, i1) for p1, i1 in zip_list] for p, i in z
]
for z in zip_list:
p, i = z
r = gather_nd_helper(p, np.asarray(i, indices.dtype))
result.append(r)
result = np.array(result)
result = result.reshape([*params.shape[0:batch_dims], *result.shape[1:]])
return result
def get_num_dims(x, /, *, as_array=False):
return np.asarray(len(np.shape(x))) if as_array else len(x.shape)
def inplace_arrays_supported():
return True
def inplace_decrement(
x: Union[ivy.Array, np.ndarray], val: Union[ivy.Array, np.ndarray]
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native -= val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def inplace_increment(
x: Union[ivy.Array, np.ndarray], val: Union[ivy.Array, np.ndarray]
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native += val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def inplace_update(
x: Union[ivy.Array, np.ndarray],
val: Union[ivy.Array, np.ndarray],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
ivy.utils.assertions.check_inplace_sizes_valid(x, val)
if ivy.is_array(x) and ivy.is_array(val):
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
# make both arrays contiguous if not already
if not x_native.flags.c_contiguous:
x_native = np.ascontiguousarray(x_native)
if not val_native.flags.c_contiguous:
val_native = np.ascontiguousarray(val_native)
if val_native.shape == x_native.shape:
if x_native.dtype != val_native.dtype:
x_native = x_native.astype(val_native.dtype)
np.copyto(x_native, val_native)
else:
x_native = val_native
if ivy.is_native_array(x):
return x_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
else:
return val
def inplace_variables_supported():
return True
def is_native_array(x, /, *, exclusive=False):
if isinstance(x, (np.ndarray, np.generic)):
return True
return False
def multiprocessing(context: Optional[str] = None):
return (
_multiprocessing if context is None else _multiprocessing.get_context(context)
)
def scatter_flat(
indices: np.ndarray,
updates: np.ndarray,
/,
*,
size: Optional[int] = None,
reduction: str = "sum",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
target = out
target_given = ivy.exists(target)
if ivy.exists(size) and ivy.exists(target):
ivy.utils.assertions.check_equal(len(target.shape), 1, as_array=False)
ivy.utils.assertions.check_equal(target.shape[0], size, as_array=False)
if not target_given:
reduction = "replace"
if reduction == "sum":
np.add.at(target, indices, updates)
elif reduction == "replace":
if not target_given:
target = np.zeros([size], dtype=updates.dtype)
target = np.asarray(target).copy()
target.setflags(write=1)
target[indices] = updates
elif reduction == "min":
np.minimum.at(target, indices, updates)
elif reduction == "max":
np.maximum.at(target, indices, updates)
else:
raise ivy.utils.exceptions.IvyException(
f'reduction is {reduction}, but it must be one of "sum", "min", "max" or'
' "replace"'
)
if target_given:
return ivy.inplace_update(out, target)
return target
scatter_flat.support_native_out = True
def scatter_nd(
indices: np.ndarray,
updates: np.ndarray,
/,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
*,
reduction: str = "sum",
out: Optional[np.ndarray] = None,
) -> np.ndarray:
target = out
target_given = ivy.exists(target)
if ivy.exists(shape) and target_given:
ivy.utils.assertions.check_equal(
ivy.Shape(target.shape), ivy.Shape(shape), as_array=False
)
indices_flat = indices.reshape(-1, indices.shape[-1]).T
indices_tuple = tuple(indices_flat) + (Ellipsis,)
if not target_given:
shape = list(shape) if ivy.exists(shape) else list(out.shape)
target = np.zeros(shape, dtype=updates.dtype)
updates = _broadcast_to(updates, target[indices_tuple].shape)
if reduction == "sum":
np.add.at(target, indices_tuple, updates)
elif reduction == "replace":
target = np.asarray(target).copy()
target.setflags(write=1)
target[indices_tuple] = updates
elif reduction == "min":
np.minimum.at(target, indices_tuple, updates)
elif reduction == "max":
np.maximum.at(target, indices_tuple, updates)
elif reduction == "mul":
np.multiply.at(target, indices_tuple, updates)
else:
raise ivy.utils.exceptions.IvyException(
f'reduction is {reduction}, but it must be one of "sum", "min", "max",'
' "mul" or "replace"'
)
if ivy.exists(out):
return ivy.inplace_update(out, target)
return target
scatter_nd.support_native_out = True
def shape(
x: np.ndarray,
/,
*,
as_array: bool = False,
) -> Union[ivy.Shape, ivy.Array]:
if as_array:
return ivy.array(np.shape(x), dtype=ivy.default_int_dtype())
else:
return ivy.Shape(x.shape)
def vmap(
func: Callable,
in_axes: Union[int, Sequence[int], Sequence[None]] = 0,
out_axes: int = 0,
) -> Callable:
@ivy.output_to_native_arrays
@ivy.inputs_to_native_arrays
def _vmap(*args):
# convert args tuple to list to allow mutability using moveaxis ahead.
args = list(args)
# if in_axis is a non-integer, its length should be equal to pos args.
if isinstance(in_axes, (list, tuple)):
ivy.utils.assertions.check_equal(
len(args),
len(in_axes),
message="""in_axes should have a length equivalent to the number
of positional arguments to the function being vectorized or it
should be an integer""",
as_array=False,
)
# checking uniqueness of axis_size
axis_size = set()
if isinstance(in_axes, int):
for arg in args:
axis_size.add(arg.shape[in_axes])
elif isinstance(in_axes, (list, tuple)):
for arg, axis in zip(args, in_axes):
if axis is not None:
axis_size.add(arg.shape[axis])
if len(axis_size) > 1:
raise ivy.utils.exceptions.IvyException(
"""Inconsistent sizes. All mapped axes should have the same size"""
)
# Making sure not all in_axes are None
if isinstance(in_axes, (list, tuple)):
ivy.utils.assertions.check_any(
[ivy.exists(ax) for ax in in_axes],
message="At least one of the axes should be specified (not None)",
as_array=False,
)
else:
ivy.utils.assertions.check_exists(
in_axes, message="single value in_axes should not be None"
)
# Handling None in in_axes by broadcasting the axis_size
if isinstance(in_axes, (tuple, list)) and None in in_axes:
none_axis_index = []
for index, axis in enumerate(in_axes):
if axis is None:
none_axis_index.append(index)
for none_mapped_axis in none_axis_index:
args[none_mapped_axis] = np.broadcast_to(
args[none_mapped_axis],
(tuple(axis_size) + args[none_mapped_axis].shape),
)
# set up the axis to be mapped to index zero.
if isinstance(in_axes, (tuple, list)):
for i in range(len(in_axes)):
if in_axes[i] is not None:
args[i] = np.moveaxis(args[i], in_axes[i], 0)
elif isinstance(in_axes, int):
args[0] = np.moveaxis(args[0], in_axes, 0)
# vectorisation. To be optimized.
arr_results = []
for arrays in zip(*args):
single_op = func(*arrays)
arr_results.append(single_op)
res = np.stack(arr_results)
if out_axes:
res = np.moveaxis(res, 0, out_axes)
return res
return _vmap
@with_unsupported_dtypes({"1.26.3 and below": ("bfloat16",)}, backend_version)
def isin(
elements: np.ndarray,
test_elements: np.ndarray,
/,
*,
assume_unique: bool = False,
invert: bool = False,
) -> np.ndarray:
return np.isin(
elements,
test_elements,
assume_unique=assume_unique,
invert=invert,
)
isin.support_native_out = True
def itemsize(x: np.ndarray) -> int:
return x.itemsize
| ivy/ivy/functional/backends/numpy/general.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/general.py",
"repo_id": "ivy",
"token_count": 6455
} | 21 |
# def if_exp(cond, if_true, if_false):
# return if_true() if cond else if_false()
def if_else(cond, body_fn, orelse_fn, vars):
cond = cond(*vars)
if cond:
return body_fn(*vars)
else:
return orelse_fn(*vars)
def while_loop(test_fn, body_fn, vars):
result = vars
if isinstance(vars, dict):
result = list(vars.values())
while test_fn(*result):
result = body_fn(*result)
if not isinstance(result, tuple):
result = (result,)
return result
| ivy/ivy/functional/backends/paddle/control_flow_ops.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/control_flow_ops.py",
"repo_id": "ivy",
"token_count": 236
} | 22 |
# global
import paddle
from typing import Optional, Tuple, Union, Any
# local
from ivy.functional.ivy.experimental.linear_algebra import _check_valid_dimension_size
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_device_and_dtypes,
)
from ivy.utils.exceptions import IvyNotImplementedException
from .. import backend_version
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8", "int16", "uint8", "float16", "bfloat16")}},
backend_version,
)
def diagflat(
x: paddle.Tensor,
/,
*,
offset: Optional[int] = 0,
padding_value: Optional[float] = 0,
align: Optional[str] = "RIGHT_LEFT",
num_rows: Optional[int] = None,
num_cols: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
):
diag = paddle.diag(x.flatten(), padding_value=padding_value, offset=offset)
num_rows = num_rows if num_rows is not None else diag.shape[0]
num_cols = num_cols if num_cols is not None else diag.shape[1]
if num_rows < diag.shape[0]:
diag = diag[:num_rows, :]
if num_cols < diag.shape[1]:
diag = diag[:, :num_cols]
if diag.shape == [num_rows, num_cols]:
return diag
else:
return paddle.nn.Pad2D(
padding=(0, num_rows - diag.shape[0], 0, num_cols - diag.shape[1]),
mode="constant",
value=padding_value,
)(diag)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8", "uint8", "int16")}}, backend_version
)
def kron(
a: paddle.Tensor,
b: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.kron(a, b)
def matrix_exp(
x: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
# TODO: this is elementwise exp, should be changed to matrix exp ASAP
# return paddle.exp(x)
raise IvyNotImplementedException()
def eig(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> Tuple[paddle.Tensor]:
return paddle.linalg.eig(x)
def eigvals(x: paddle.Tensor, /) -> paddle.Tensor:
return paddle.linalg.eig(x)[0]
def adjoint(
x: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
_check_valid_dimension_size(x)
return paddle.moveaxis(x, -2, -1).conj()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8", "uint8", "int16", "float16")}},
backend_version,
)
def solve_triangular(
x1: paddle.Tensor,
x2: paddle.Tensor,
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
# Paddle does not support complex tensors for this operation (cpu and gpu),
# so adjoint always equals transpose.
return paddle.linalg.triangular_solve(
x1, x2, upper=upper, transpose=adjoint, unitriangular=unit_diagonal
)
def cond(
x: paddle.Tensor,
/,
*,
p: Optional[Union[None, int, str]] = None,
out: Optional[paddle.Tensor] = None,
) -> Any:
raise IvyNotImplementedException()
def lu_factor(
x: paddle.Tensor,
/,
*,
pivot: Optional[bool] = True,
out: Optional[paddle.Tensor] = None,
) -> Any:
raise IvyNotImplementedException()
def lu_solve(
lu: paddle.Tensor,
p: paddle.Tensor,
b: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
),
"gpu": (
"float16",
"float32",
"float64",
),
}
},
backend_version,
)
def dot(
a: paddle.Tensor,
b: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if len(a.shape) == 0 or len(b.shape) == 0:
return paddle.multiply(a, b)
if (
len(a.shape) in [1, 2]
and len(b.shape) in [1, 2]
or (len(a.shape) >= 1 and len(b.shape) == 1)
):
return paddle.matmul(a, b)
return paddle.tensordot(a, b, axes=[[-1], [-2]])
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": (
"float32",
"float64",
),
"gpu": (
"float16",
"float32",
"float64",
),
}
},
backend_version,
)
def multi_dot(
x: paddle.Tensor,
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.linalg.multi_dot(x)
| ivy/ivy/functional/backends/paddle/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 2264
} | 23 |
from typing import Optional, Union, Literal
# global
import tensorflow as tf
from tensorflow.python.types.core import Tensor
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from . import backend_version
def logit(
x: Union[tf.Tensor, tf.Variable],
/,
*,
eps: Optional[float] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[Tensor] = None,
) -> Tensor:
x_dtype = x.dtype
if eps is None:
x = tf.where(tf.math.logical_or(x > 1, x < 0), ivy.nan, x)
else:
x = tf.clip_by_value(x, eps, 1 - eps)
return tf.cast(tf.math.log(x / (1 - x)), x_dtype)
@with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version)
def thresholded_relu(
x: Tensor,
/,
*,
threshold: Union[int, float] = 0,
out: Optional[Tensor] = None,
) -> Tensor:
threshold = tf.cast(threshold, x.dtype)
return tf.cast(tf.where(x > threshold, x, 0), x.dtype)
def relu6(x: Tensor, /, *, complex_mode="jax", out: Optional[Tensor] = None) -> Tensor:
return tf.nn.relu6(x)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def logsigmoid(
input: Tensor, /, *, complex_mode="jax", out: Optional[Tensor] = None
) -> Tensor:
if input.dtype in [tf.complex64, tf.complex128]:
return tf.math.log(tf.nn.sigmoid(input))
return tf.math.log_sigmoid(input)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def selu(x: Tensor, /, *, out: Optional[Tensor] = None) -> Tensor:
ret = tf.nn.selu(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def silu(
x: Tensor,
/,
*,
out: Optional[Tensor] = None,
) -> Tensor:
ret = tf.nn.silu(x)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def elu(x: Tensor, /, *, alpha: float = 1.0, out: Optional[Tensor] = None) -> Tensor:
ret = tf.keras.activations.elu(x, alpha)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def hardtanh(
x: Tensor,
/,
*,
max_val: float = 1.0,
min_val: float = -1.0,
out: Optional[Tensor] = None,
) -> Tensor:
ret = tf.where(
tf.math.greater(x, max_val),
max_val,
tf.where(tf.math.less(x, min_val), min_val, x),
)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def tanhshrink(
x: Tensor,
/,
*,
out: Optional[Tensor] = None,
) -> Tensor:
ret = tf.math.subtract(x, tf.math.tanh(x))
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def threshold(
x: Tensor,
/,
*,
threshold: Union[int, float],
value: Union[int, float],
out: Optional[Tensor] = None,
) -> Tensor:
ret = tf.where(x > threshold, x, value)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def softshrink(
x: Tensor,
/,
*,
lambd: float = 0.5,
out: Optional[Tensor] = None,
) -> Tensor:
ret = tf.where(
tf.math.greater(x, lambd),
x - lambd,
tf.where(tf.math.less(x, -lambd), x + lambd, 0),
)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def celu(
x: Tensor,
/,
*,
alpha: float = 1.0,
complex_mode="jax",
out: Optional[Tensor] = None,
) -> Tensor:
return tf.math.maximum(0, x) + alpha * tf.math.expm1(tf.math.minimum(0, x) / alpha)
@with_unsupported_dtypes({"2.15.0 and below": ("uint16",)}, backend_version)
def scaled_tanh(
x: Tensor,
/,
*,
alpha: float = 1.7159,
beta: float = 0.67,
out: Optional[Tensor] = None,
) -> Tensor:
return alpha * tf.nn.tanh(beta * x)
@with_supported_dtypes({"2.15.0 and below": ("float",)}, backend_version)
def hardshrink(
x: Tensor,
/,
*,
lambd: float = 0.5,
out: Optional[Tensor] = None,
) -> Tensor:
ret = tf.where(
tf.math.greater(x, lambd),
x,
tf.where(tf.math.less(x, -lambd), x, 0),
)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
@with_unsupported_dtypes({"2.14.0 and below": ("complex",)}, backend_version)
def hardsilu(
x: Tensor, /, *, complex_mode="jax", out: Optional[Tensor] = None
) -> Tensor:
ret = tf.multiply(x, tf.nn.relu6(tf.math.add(x, 3)) / 6)
if ivy.exists(out):
return ivy.inplace_update(out, ret).astype(x.dtype)
return ivy.astype(ret, x.dtype)
| ivy/ivy/functional/backends/tensorflow/experimental/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/activations.py",
"repo_id": "ivy",
"token_count": 2488
} | 24 |
# global
import tensorflow as tf
import logging
# local
import ivy
from ivy.functional.ivy.experimental.sparse_array import (
_is_data_not_indices_values_and_shape,
_verify_bsc_components,
_verify_bsr_components,
_verify_coo_components,
_verify_csc_components,
_verify_csr_components,
)
def is_native_sparse_array(x):
return isinstance(x, tf.SparseTensor)
def native_sparse_array(
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format="coo",
):
if _is_data_not_indices_values_and_shape(
data,
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
format,
):
ivy.utils.assertions.check_true(
ivy.is_native_sparse_array(data), message="not a sparse array"
)
return data
format = format.lower()
if format == "coo":
_verify_coo_components(
coo_indices,
values,
dense_shape,
)
all_coordinates = []
for i in range(values.shape[0]):
coordinate = ivy.gather(coo_indices, ivy.array([[i]]))
coordinate = ivy.reshape(coordinate, (coo_indices.shape[0],))
all_coordinates.append(coordinate.to_list())
return tf.SparseTensor(
indices=all_coordinates, values=values, dense_shape=dense_shape
)
elif format == "csr":
_verify_csr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
elif format == "bsr":
_verify_bsr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
elif format == "csc":
_verify_csc_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
)
else:
_verify_bsc_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
)
logging.warning(
f"Tensorflow does not support {format.upper()} sparse array natively. None is"
" returned."
)
return None
def native_sparse_array_to_indices_values_and_shape(x):
if isinstance(x, tf.SparseTensor):
return {"coo_indices": x.indices}, x.values, x.dense_shape
raise ivy.utils.exceptions.IvyException("not a SparseTensor")
| ivy/ivy/functional/backends/tensorflow/experimental/sparse_array.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/sparse_array.py",
"repo_id": "ivy",
"token_count": 1395
} | 25 |
from typing import Optional, Union
import tensorflow_probability as tfp
import tensorflow as tf
def trapz(
y: Union[tf.Tensor, tf.Variable],
/,
*,
x: Optional[Union[tf.Tensor, tf.Variable]] = None,
dx: float = 1.0,
axis: int = -1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tfp.math.trapz(y, x=x, dx=dx, axis=axis, name=None)
| ivy/ivy/functional/backends/tensorflow/sub_backends/tf_probability/elementwise.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/sub_backends/tf_probability/elementwise.py",
"repo_id": "ivy",
"token_count": 173
} | 26 |
"""Collection of PyTorch general functions, wrapped to fit Ivy syntax and
signature."""
# global
from functools import reduce as _reduce
from numbers import Number
from operator import mul
from typing import Optional, Union, Sequence, Callable, List, Tuple
try:
import functorch
except ImportError:
functorch = () # for torch 1.10.1
import numpy as np
import torch
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes, _update_torch_views
from . import backend_version, is_variable
from ...ivy.general import _broadcast_to
torch_scatter = None
def _parse_index(indices, ndims):
ind = []
for so in indices:
pre = []
for s in so:
if s == -1:
break
pre.append(s.item())
post = []
for s in reversed(so):
if s == -1:
break
post.append(s.item())
ind.append(
tuple(
pre
+ [slice(None, None, None) for _ in range(ndims - len(pre) - len(post))]
+ list(reversed(post))
)
)
return ind
def is_native_array(x, /, *, exclusive=False):
if isinstance(x, torch.Tensor):
if exclusive and x.requires_grad:
return False
return True
return False
@with_unsupported_dtypes({"2.2 and below": ("complex", "bfloat16")}, backend_version)
def array_equal(x0: torch.Tensor, x1: torch.Tensor, /) -> bool:
x0, x1 = ivy.promote_types_of_inputs(x0, x1)
return torch.equal(x0, x1)
def container_types():
return []
def current_backend_str() -> str:
return "torch"
def neg_step(query):
return (
not isinstance(query, (int, bool))
and not ivy.is_array(query)
and query is not None
and query is not Ellipsis
and (
(isinstance(query, slice) and query.step is not None and query.step < 0)
or (
not isinstance(query, slice)
and any(
isinstance(q, slice) and q.step is not None and q.step < 0
for q in query
)
)
)
)
def get_item(
x: torch.Tensor,
/,
query: Union[torch.Tensor, Tuple],
*,
copy: Optional[bool] = None,
) -> torch.Tensor:
return x.__getitem__(query)
get_item.partial_mixed_handler = lambda x, query, **kwargs: not neg_step(query)
def set_item(
x: torch.Tensor,
query: Union[torch.Tensor, Tuple],
val: torch.Tensor,
/,
*,
copy: Optional[bool] = False,
) -> torch.Tensor:
if hasattr(x, "dtype") and hasattr(val, "dtype") and x.dtype != val.dtype:
val = val.to(x.dtype)
if copy:
x = x.clone()
x.__setitem__(query, val)
return x
set_item.partial_mixed_handler = (
lambda x, query, val, **kwargs: not neg_step(query) and not x.requires_grad
)
def to_numpy(
x: Union[torch.Tensor, List[torch.Tensor]], /, *, copy: bool = True
) -> Union[np.ndarray, List[np.ndarray]]:
if isinstance(x, (float, int, bool)):
return x
elif isinstance(x, np.ndarray):
if copy:
return x.copy()
else:
return x
elif torch.is_tensor(x):
x = x.resolve_neg().resolve_conj()
if copy:
# we don't use inbuilt numpy() because it blocks for
# bfloat16, which we are supporting here by importing
# ml_dtypes
# TODO: use torch's numpy() method once this feature is accepted
# https://github.com/pytorch/pytorch/issues/109873
if 0 in x.shape:
# this is necessary because tolist converts all empty shapes to (0,)
return np.empty(x.shape, dtype=ivy.as_ivy_dtype(x.dtype))
return np.array(x.tolist(), dtype=ivy.as_ivy_dtype(x.dtype))
else:
raise ivy.utils.exceptions.IvyException(
"Overwriting the same address is not supported for torch."
)
elif isinstance(x, list):
return [ivy.to_numpy(u) for u in x]
raise ivy.utils.exceptions.IvyException("Expected a pytorch tensor.")
def to_scalar(x: torch.Tensor, /) -> Number:
if isinstance(x, (float, int)):
return x
return x.item()
def to_list(x: torch.Tensor, /) -> list:
if isinstance(x, np.ndarray):
return x.tolist()
elif torch.is_tensor(x):
if x.dtype is torch.bfloat16:
default_dtype = ivy.default_float_dtype(as_native=True)
if default_dtype is torch.bfloat16:
x = x.to(torch.float32)
else:
x = x.to(default_dtype)
return x.detach().cpu().numpy().astype("bfloat16").tolist()
else:
return x.detach().cpu().numpy().tolist()
raise ivy.utils.exceptions.IvyException("Expected a pytorch tensor.")
def gather(
params: torch.Tensor,
indices: torch.Tensor,
/,
*,
axis: int = -1,
batch_dims: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
axis %= len(params.shape)
batch_dims %= len(params.shape)
ivy.utils.assertions.check_gather_input_valid(params, indices, axis, batch_dims)
result = []
if batch_dims == 0:
result = torch.gather(params, axis, indices, sparse_grad=False, out=out)
else:
for b in range(batch_dims):
if b == 0:
zip_list = [(p, i) for p, i in zip(params, indices)]
else:
zip_list = [
(p, i) for z in [zip(p1, i1) for p1, i1 in zip_list] for p, i in z
]
for z in zip_list:
p, i = z
r = torch.gather(
p, (axis - batch_dims) % p.ndim, i, sparse_grad=False, out=False
)
result.append(r)
result = torch.stack(result)
result = result.reshape([*params.shape[0:batch_dims], *result.shape[1:]])
if ivy.exists(out):
return ivy.inplace_update(out, result)
return result
def gather_nd_helper(params, indices):
indices_shape = indices.shape
params_shape = params.shape
if len(indices.shape) == 0:
num_index_dims = 1
else:
num_index_dims = indices_shape[-1]
result_dim_sizes_list = [
_reduce(mul, params_shape[i + 1 :], 1) for i in range(len(params_shape) - 1)
] + [1]
result_dim_sizes = torch.tensor(result_dim_sizes_list)
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_params = torch.reshape(params, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = torch.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = torch.reshape(
torch.sum(indices * indices_scales, -1, keepdim=True), (-1, 1)
).repeat(*[1, implicit_indices_factor])
implicit_indices = torch.unsqueeze(torch.arange(implicit_indices_factor), 0).repeat(
*[indices_for_flat_tiled.shape[0], 1]
)
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = torch.reshape(indices_for_flat, (-1,)).type(torch.long)
flat_gather = torch.gather(flat_params, 0, flat_indices_for_flat)
res = torch.reshape(
flat_gather, list(indices_shape[:-1]) + list(params_shape[num_index_dims:])
)
return res
def gather_nd(
params: torch.Tensor,
indices: torch.Tensor,
/,
*,
batch_dims: int = 0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
ivy.utils.assertions.check_gather_nd_input_valid(params, indices, batch_dims)
batch_dims %= len(params.shape)
result = []
if batch_dims == 0:
result = gather_nd_helper(params, indices)
else:
for b in range(batch_dims):
if b == 0:
zip_list = [(p, i) for p, i in zip(params, indices)]
else:
zip_list = [
(p, i) for z in [zip(p1, i1) for p1, i1 in zip_list] for p, i in z
]
for z in zip_list:
p, i = z
r = gather_nd_helper(p, i)
result.append(r)
result = torch.stack(result)
result = result.reshape([*params.shape[0:batch_dims], *result.shape[1:]])
return result
def get_num_dims(
x: torch.Tensor, /, *, as_array: bool = False
) -> Union[torch.Tensor, int]:
return torch.tensor(len(x.shape)) if as_array else len(x.shape)
def inplace_arrays_supported():
return True
def inplace_decrement(
x: Union[ivy.Array, torch.Tensor],
val: Union[ivy.Array, torch.Tensor],
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native.data -= val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def inplace_increment(
x: Union[ivy.Array, torch.Tensor],
val: Union[ivy.Array, torch.Tensor],
) -> ivy.Array:
(x_native, val_native), _ = ivy.args_to_native(x, val)
x_native.data += val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
def inplace_update(
x: Union[ivy.Array, torch.Tensor],
val: Union[ivy.Array, torch.Tensor],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
ivy.utils.assertions.check_inplace_sizes_valid(x, val)
if ivy.is_array(x) and ivy.is_array(val):
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
if is_variable(x_native):
x_native.copy_ = val_native
else:
x_native[()] = val_native
x_native = x_native.to(val_native.device)
if ivy.is_native_array(x):
return x_native
if ivy.is_ivy_array(x):
x.data = x_native
_update_torch_views(x)
else:
x = ivy.to_ivy(x_native)
if ensure_in_backend:
x._data = val_native
return x
else:
return val
def inplace_variables_supported():
return True
def multiprocessing(context: Optional[str] = None):
import torch.multiprocessing
if context is None:
return torch.multiprocessing
return torch.multiprocessing.get_context(context)
@with_unsupported_dtypes(
{
"2.2 and below": ("bfloat16",),
},
backend_version,
)
def scatter_flat(
indices: torch.Tensor,
updates: torch.Tensor,
/,
*,
size: Optional[int] = None,
reduction: str = "sum",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
target = out
target_given = ivy.exists(target)
if ivy.exists(size) and ivy.exists(target):
ivy.utils.assertions.check_equal(len(target.shape), 1, as_array=False)
ivy.utils.assertions.check_equal(target.shape[0], size, as_array=False)
dtype = updates.dtype
if reduction not in ["sum", "replace", "min", "max"]:
raise ivy.utils.exceptions.IvyException(
f'reduction is {reduction}, but it must be one of "sum", "min", "max" or'
' "replace"'
)
if target_given:
output = out
else:
reduction = "replace"
output = torch.zeros([size], dtype=dtype)
global torch_scatter
if torch_scatter is None:
try:
import torch_scatter as torch_scatter
except ImportError as e:
raise ivy.utils.exceptions.IvyException(
"Unable to import torch_scatter, verify this is correctly installed."
) from e
if reduction == "replace":
output[indices.type(torch.int64)] = updates
res = output
else:
res = torch_scatter.scatter(
updates, indices.type(torch.int64), out=output, reduce=reduction
)
return res
scatter_flat.support_native_out = True
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def scatter_nd(
indices: torch.Tensor,
updates: torch.Tensor,
/,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
*,
reduction: str = "sum",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
updates = torch.tensor(
updates,
dtype=(
ivy.dtype(out, as_native=True)
if ivy.exists(out)
else ivy.default_dtype(item=updates, as_native=True)
),
)
expected_shape = (
list(indices.shape[:-1]) + list(out.shape[indices.shape[-1] :])
if ivy.exists(out)
else list(indices.shape[:-1]) + list(shape[indices.shape[-1] :])
)
updates = _broadcast_to(updates, expected_shape)._data
# implementation
target_given = ivy.exists(out)
if ivy.exists(shape) and target_given:
ivy.utils.assertions.check_equal(
ivy.Shape(out.shape), ivy.Shape(shape), as_array=False
)
shape = list(shape) if ivy.exists(shape) else list(out.shape)
dtype = updates.dtype
indices_shape = indices.shape
num_index_dims = indices_shape[-1]
result_dim_sizes_list = [
_reduce(mul, shape[i + 1 :], 1) for i in range(len(shape) - 1)
] + [1]
result_dim_sizes = torch.tensor(result_dim_sizes_list)
implicit_indices_factor = int(result_dim_sizes[num_index_dims - 1].item())
flat_result_size = _reduce(mul, shape, 1)
if reduction not in ["sum", "replace", "min", "max"]:
raise ivy.utils.exceptions.IvyException(
f'reduction is {reduction}, but it must be one of "sum", "min", "max" or'
' "replace"'
)
if target_given:
flat_output = torch.reshape(out, (flat_result_size,)).detach()
else:
flat_output = torch.zeros(flat_result_size, dtype=dtype)
flat_updates = torch.reshape(updates, (-1,))
new_shape = [1] * (len(indices_shape) - 1) + [num_index_dims]
indices_scales = torch.reshape(result_dim_sizes[0:num_index_dims], new_shape)
indices_for_flat_tiled = torch.reshape(
torch.sum(indices * indices_scales, -1, keepdim=True), (-1, 1)
).repeat(*[1, implicit_indices_factor])
implicit_indices = torch.unsqueeze(torch.arange(implicit_indices_factor), 0).repeat(
*[indices_for_flat_tiled.shape[0], 1]
)
indices_for_flat = indices_for_flat_tiled + implicit_indices
flat_indices_for_flat = torch.reshape(indices_for_flat, (-1,)).type(torch.long)
global torch_scatter
if torch_scatter is None:
try:
import torch_scatter as torch_scatter
except ImportError as e:
raise ivy.utils.exceptions.IvyException(
"Unable to import torch_scatter, verify this is correctly installed."
) from e
if reduction == "replace":
flat_output[flat_indices_for_flat] = flat_updates
flat_scatter = flat_output
else:
flat_scatter = torch_scatter.scatter(
flat_updates,
flat_indices_for_flat,
out=flat_output.clone(),
reduce=reduction,
)
res = torch.reshape(flat_scatter, list(shape))
if ivy.exists(out):
return ivy.inplace_update(out, res)
return res
scatter_nd.support_native_out = True
def shape(
x: torch.Tensor,
/,
*,
as_array: bool = False,
) -> Union[ivy.Shape, ivy.Array]:
if as_array:
return ivy.array(x.shape, dtype=ivy.default_int_dtype())
else:
return ivy.Shape(x.shape)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
def vmap_v_1p13p1_and_below(
func: Callable,
in_axes: Union[int, Sequence[int], Sequence[None]] = 0,
out_axes: int = 0,
) -> Callable:
@ivy.output_to_native_arrays
@ivy.inputs_to_native_arrays
def _vmap(*args):
def new_fun(*args):
return ivy.to_native(func(*args))
new_func = functorch.vmap(new_fun, in_axes, out_axes)
return new_func(*args)
return _vmap
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, backend_version)
def vmap_v_2p0p0_and_above(
func: Callable,
in_axes: Union[int, Sequence[int], Sequence[None]] = 0,
out_axes: int = 0,
) -> Callable:
@ivy.output_to_native_arrays
@ivy.inputs_to_native_arrays
def _vmap(*args):
def new_fun(*args):
return ivy.to_native(func(*args))
new_func = torch.vmap(new_fun, in_axes, out_axes)
return new_func(*args)
return _vmap
@with_unsupported_dtypes(
{"2.2 and below": ("bfloat16", "float16", "complex", "bool")}, backend_version
)
def isin(
elements: torch.tensor,
test_elements: torch.tensor,
/,
*,
assume_unique: bool = False,
invert: bool = False,
) -> torch.tensor:
return torch.isin(
elements,
test_elements,
assume_unique=assume_unique,
invert=invert,
)
isin.support_native_out = True
def itemsize(x: torch.tensor) -> int:
return x.element_size()
| ivy/ivy/functional/backends/torch/general.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/general.py",
"repo_id": "ivy",
"token_count": 8060
} | 27 |
import torch
import xformers.ops as xops
from ivy.func_wrapper import to_native_arrays_and_back
@to_native_arrays_and_back
def scaled_dot_product_attention(
q,
k,
v,
scale: float,
/,
*,
mask=None,
out=None,
):
if isinstance(mask, torch.Tensor):
mask = torch.where(mask == 0, -torch.inf, 0)
return xops.memory_efficient_attention(q, k, v, scale=scale, attn_bias=mask)
| ivy/ivy/functional/backends/torch/sub_backends/xformers/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/sub_backends/xformers/layers.py",
"repo_id": "ivy",
"token_count": 186
} | 28 |
import ivy
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def stop_gradient(x):
return ivy.stop_gradient(x)
| ivy/ivy/functional/frontends/jax/lax/custom_gradient_operators.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/lax/custom_gradient_operators.py",
"repo_id": "ivy",
"token_count": 68
} | 29 |
# global
import logging
# local
import ivy
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
)
from ivy.functional.frontends.numpy.func_wrapper import from_zero_dim_arrays_to_scalar
from ivy.func_wrapper import (
with_supported_device_and_dtypes,
with_unsupported_dtypes,
)
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{
"0.4.24 and below": (
"float16",
"bfloat16",
)
},
"jax",
)
def argmax(a, axis=None, out=None, keepdims=False):
return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out, dtype=ivy.int64)
# argmin
@to_ivy_arrays_and_back
@with_supported_device_and_dtypes(
{
"0.4.20 and below": {
"cpu": (
"int16",
"int32",
"int64",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
)
}
},
"jax",
)
def argmin(a, axis=None, out=None, keepdims=None):
if a is not None:
if isinstance(a, list):
if all(isinstance(elem, ivy.Array) for elem in a):
if len(a) == 1:
a = a[0]
else:
return [
ivy.argmin(
ivy.to_native_arrays(elem),
axis=axis,
out=out,
keepdims=keepdims,
)
for elem in a
]
else:
raise ValueError(
"Input 'a' must be an Ivy array or a list of Ivy arrays."
)
if not isinstance(a, ivy.Array):
raise TypeError("Input 'a' must be an array.")
if a.size == 0:
raise ValueError("Input 'a' must not be empty.")
return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)
else:
raise ValueError("argmin takes at least 1 argument.")
@to_ivy_arrays_and_back
def argsort(a, axis=-1, kind="stable", order=None):
if kind != "stable":
logging.warning(
"'kind' argument to argsort is ignored; only 'stable' sorts are supported."
)
if order is not None:
raise ivy.utils.exceptions.IvyError(
"'order' argument to argsort is not supported."
)
return ivy.argsort(a, axis=axis)
@to_ivy_arrays_and_back
def argwhere(a, /, *, size=None, fill_value=None):
if size is None and fill_value is None:
return ivy.argwhere(a)
result = ivy.matrix_transpose(
ivy.vstack(ivy.nonzero(a, size=size, fill_value=fill_value))
)
num_of_dimensions = a.ndim
if num_of_dimensions == 0:
return result[:0].reshape(result.shape[0], 0)
return result.reshape(result.shape[0], num_of_dimensions)
@with_unsupported_dtypes(
{
"0.4.24 and below": (
"uint8",
"int8",
"bool",
)
},
"jax",
)
@to_ivy_arrays_and_back
def count_nonzero(a, axis=None, keepdims=False):
return ivy.astype(ivy.count_nonzero(a, axis=axis, keepdims=keepdims), "int64")
@to_ivy_arrays_and_back
def extract(condition, arr):
if condition.dtype is not bool:
condition = condition != 0
return arr[condition]
@to_ivy_arrays_and_back
def flatnonzero(a):
return ivy.nonzero(ivy.reshape(a, (-1,)))
@to_ivy_arrays_and_back
def lexsort(keys, /, *, axis=-1):
return ivy.lexsort(keys, axis=axis)
@to_ivy_arrays_and_back
def msort(a):
return ivy.msort(a)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanargmax(a, /, *, axis=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError(
"The 'out' argument to jnp.nanargmax is not supported."
)
nan_mask = ivy.isnan(a)
if not ivy.any(nan_mask):
return ivy.argmax(a, axis=axis, keepdims=keepdims)
a = ivy.where(nan_mask, -ivy.inf, a)
res = ivy.argmax(a, axis=axis, keepdims=keepdims)
return ivy.where(ivy.all(nan_mask, axis=axis, keepdims=keepdims), -1, res)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanargmin(a, /, *, axis=None, out=None, keepdims=None):
if out is not None:
raise NotImplementedError(
"The 'out' argument to jnp.nanargmax is not supported."
)
nan_mask = ivy.isnan(a)
if not ivy.any(nan_mask):
return ivy.argmin(a, axis=axis, keepdims=keepdims)
a = ivy.where(nan_mask, ivy.inf, a)
res = ivy.argmin(a, axis=axis, keepdims=keepdims)
return ivy.where(ivy.all(nan_mask, axis=axis, keepdims=keepdims), -1, res)
@to_ivy_arrays_and_back
def nonzero(a, *, size=None, fill_value=None):
return ivy.nonzero(a, size=size, fill_value=fill_value)
@to_ivy_arrays_and_back
def searchsorted(a, v, side="left", sorter=None, *, method="scan"):
return ivy.searchsorted(a, v, side=side, sorter=sorter, ret_dtype="int32")
@to_ivy_arrays_and_back
def sort(a, axis=-1, kind="quicksort", order=None):
# todo: handle case where order is not None
return ivy.sort(a, axis=axis)
@to_ivy_arrays_and_back
def sort_complex(a):
return ivy.sort(a)
@to_ivy_arrays_and_back
def unique(
ar,
return_index=False,
return_inverse=False,
return_counts=False,
axis=None,
*,
size=None,
fill_value=None,
):
uniques = list(ivy.unique_all(ar, axis=axis))
if size is not None:
fill_value = fill_value if fill_value is not None else 1 # default fill_value 1
pad_len = size - len(uniques[0])
if pad_len > 0:
# padding
num_dims = len(uniques[0].shape) - 1
padding = [(0, 0)] * num_dims + [(0, pad_len)]
uniques[0] = ivy.pad(uniques[0], padding, constant_values=fill_value)
# padding the indices and counts with zeros
for i in range(1, len(uniques)):
if i == 2:
continue
uniques[i] = ivy.pad(uniques[i], padding[-1], constant_values=0)
else:
for i in range(len(uniques)):
uniques[i] = uniques[i][..., :size]
# constructing a list of bools for indexing
bools = [return_index, return_inverse, return_counts]
# indexing each element whose condition is True except for the values
uniques = [uniques[0]] + [uni for idx, uni in enumerate(uniques[1:]) if bools[idx]]
return uniques[0] if len(uniques) == 1 else uniques
@to_ivy_arrays_and_back
def where(condition, x=None, y=None, *, size=None, fill_value=0):
if x is None and y is None:
return nonzero(condition, size=size, fill_value=fill_value)
if x is not None and y is not None:
return ivy.where(condition, x, y)
else:
raise ValueError("Both x and y should be given.")
| ivy/ivy/functional/frontends/jax/numpy/searching_sorting.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/searching_sorting.py",
"repo_id": "ivy",
"token_count": 3453
} | 30 |
import ivy
from ivy.functional.frontends.mxnet.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def tensordot(a, b, axes=2):
return ivy.tensordot(a, b, axes=axes)
| ivy/ivy/functional/frontends/mxnet/numpy/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy/linalg.py",
"repo_id": "ivy",
"token_count": 82
} | 31 |
from .creating_data_types import *
from .data_type_testing import *
from .data_type_information import *
from .general import *
from .miscellaneous import *
| ivy/ivy/functional/frontends/numpy/data_type_routines/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/data_type_routines/__init__.py",
"repo_id": "ivy",
"token_count": 44
} | 32 |
from . import matrix_and_vector_products
from .matrix_and_vector_products import *
from . import norms_and_other_numbers
from .norms_and_other_numbers import *
from . import decompositions
from .decompositions import *
from . import matrix_eigenvalues
from .matrix_eigenvalues import *
from . import solving_equations_and_inverting_matrices
from .solving_equations_and_inverting_matrices import *
| ivy/ivy/functional/frontends/numpy/linalg/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/linalg/__init__.py",
"repo_id": "ivy",
"token_count": 120
} | 33 |
# global
import ivy
from ivy.functional.frontends.numpy import promote_types_of_numpy_inputs
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _add(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.add(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _divide(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.divide(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _divmod(
x1,
x2,
/,
out1_2=(None, None),
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
if dtype:
x1 = ivy.astype(ivy.array(x1), ivy.as_ivy_dtype(dtype))
x2 = ivy.astype(ivy.array(x2), ivy.as_ivy_dtype(dtype))
ret = [ivy.floor_divide(x1, x2, out=out), ivy.remainder(x1, x2, out=out)]
if ivy.is_array(where):
ret = ivy.where(
where,
ret,
(
[
ivy.default(out, ivy.zeros_like(ret[0])),
ivy.default(out, ivy.zeros_like(ret[1])),
]
),
out=out,
)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _float_power(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.float_power(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _floor_divide(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
if dtype:
x1 = ivy.astype(ivy.array(x1), ivy.as_ivy_dtype(dtype))
x2 = ivy.astype(ivy.array(x2), ivy.as_ivy_dtype(dtype))
ret = ivy.floor_divide(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _fmod(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
if dtype:
x1 = ivy.astype(ivy.array(x1), ivy.as_ivy_dtype(dtype))
x2 = ivy.astype(ivy.array(x2), ivy.as_ivy_dtype(dtype))
ret = ivy.fmod(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _mod(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
if dtype:
x1 = ivy.astype(ivy.array(x1), ivy.as_ivy_dtype(dtype))
x2 = ivy.astype(ivy.array(x2), ivy.as_ivy_dtype(dtype))
ret = ivy.remainder(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _modf(
x,
/,
out1_2=(None, None),
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
if dtype:
x = ivy.astype(ivy.array(x), ivy.as_ivy_dtype(dtype))
integral_part = ivy.floor(x)
fractional_part = x - integral_part
if ivy.is_array(where):
integral_part = ivy.where(
where,
integral_part,
ivy.default(out, ivy.zeros_like(integral_part)),
out=out,
)
fractional_part = ivy.where(
where,
fractional_part,
ivy.default(out, ivy.zeros_like(fractional_part)),
out=out,
)
return fractional_part, integral_part
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _multiply(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.multiply(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _negative(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.negative(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _positive(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.positive(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _power(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.pow(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _reciprocal(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
if dtype is None:
dtype = ivy.as_ivy_dtype(x.dtype)
ret = ivy.reciprocal(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret.astype(dtype)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _remainder(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
if dtype:
x1 = ivy.astype(ivy.array(x1), ivy.as_ivy_dtype(dtype))
x2 = ivy.astype(ivy.array(x2), ivy.as_ivy_dtype(dtype))
ret = ivy.remainder(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _subtract(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
x1, x2 = promote_types_of_numpy_inputs(x1, x2)
ret = ivy.subtract(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def vdot(
a,
b,
/,
):
a, b = promote_types_of_numpy_inputs(a, b)
return ivy.multiply(a, b).sum()
| ivy/ivy/functional/frontends/numpy/mathematical_functions/arithmetic_operations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/arithmetic_operations.py",
"repo_id": "ivy",
"token_count": 4638
} | 34 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
def _cpercentile(N, percent, key=lambda x: x):
"""Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
N.sort()
k = (len(N) - 1) * percent
f = ivy.math.floor(k)
c = ivy.math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
def _quantile_is_valid(q):
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if not (0.0 <= q[i] <= 1.0):
return False
else:
if not (ivy.all(q >= 0) and ivy.all(q <= 1)):
return False
return True
# --- Main --- #
# ------------ #
def nanpercentile(
a,
/,
*,
q,
axis=None,
out=None,
overwrite_input=False,
method="linear",
keepdims=False,
interpolation=None,
):
a = ivy.array(a)
q = ivy.divide(q, 100.0)
q = ivy.array(q)
if not _quantile_is_valid(q):
# raise ValueError("percentile s must be in the range [0, 100]")
ivy.logging.warning("percentile s must be in the range [0, 100]")
return []
if axis is None:
resultarray = []
nanlessarray = []
for x in a:
for i in x:
if not ivy.isnan(i):
nanlessarray.append(i)
for i in q:
resultarray.append(_cpercentile(nanlessarray, i))
return resultarray
elif axis == 1:
resultarray = []
nanlessarrayofarrays = []
for i in a:
nanlessarray = []
for t in i:
if not ivy.isnan(t):
nanlessarray.append(t)
nanlessarrayofarrays.append(nanlessarray)
for i in q:
arrayofpercentiles = []
for ii in nanlessarrayofarrays:
arrayofpercentiles.append(_cpercentile(ii, i))
resultarray.append(arrayofpercentiles)
return resultarray
elif axis == 0:
resultarray = []
try:
a = ivy.swapaxes(a, 0, 1)
except ivy.utils.exceptions.IvyError:
ivy.logging.warning("axis is 0 but couldn't swap")
finally:
nanlessarrayofarrays = []
for i in a:
nanlessarray = []
for t in i:
if not ivy.isnan(t):
nanlessarray.append(t)
nanlessarrayofarrays.append(nanlessarray)
for i in q:
arrayofpercentiles = []
for ii in nanlessarrayofarrays:
arrayofpercentiles.append(_cpercentile(ii, i))
resultarray.append(arrayofpercentiles)
return resultarray
@to_ivy_arrays_and_back
@handle_numpy_out
def ptp(a, axis=None, out=None, keepdims=False):
x = ivy.max(a, axis=axis, keepdims=keepdims)
y = ivy.min(a, axis=axis, keepdims=keepdims)
ret = ivy.subtract(x, y)
return ret.astype(a.dtype, copy=False)
| ivy/ivy/functional/frontends/numpy/statistics/order_statistics.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/statistics/order_statistics.py",
"repo_id": "ivy",
"token_count": 1720
} | 35 |
# global
import ivy
import ivy.functional.frontends.paddle as paddle
from ivy.func_wrapper import (
with_unsupported_dtypes,
handle_out_argument,
with_supported_dtypes,
)
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
"bool",
"uint8",
"int8",
"int16",
"int32",
"int64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
ret = ivy.allclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
return paddle.to_tensor([ret])
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"uint8",
"int8",
"int16",
"int32",
"int64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def bitwise_and(x, y, /, *, name=None, out=None):
return ivy.bitwise_and(x, y, out=out)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"uint8",
"int8",
"int16",
"int32",
"int64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def bitwise_not(x, out=None, name=None):
return ivy.bitwise_invert(x, out=out)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"uint8",
"int8",
"int16",
"int32",
"int64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def bitwise_or(x, y, name=None, out=None):
return ivy.bitwise_or(x, y, out=out)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"uint8",
"int8",
"int16",
"int32",
"int64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def bitwise_xor(x, y, /, *, name=None, out=None):
return ivy.bitwise_xor(x, y, out=out)
@with_unsupported_dtypes(
{"2.6.0 and below": ("uint8", "int8", "int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def equal(x, y, /, *, name=None):
return ivy.equal(x, y)
@with_unsupported_dtypes(
{
"2.6.0 and below": (
"uint8",
"int8",
"int16",
"float16",
"complex64",
"complex128",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def equal_all(x, y, /, *, name=None):
return paddle.to_tensor([ivy.array_equal(x, y)])
@with_unsupported_dtypes(
{"2.6.0 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def greater_equal(x, y, /, *, name=None):
return ivy.greater_equal(x, y)
@with_unsupported_dtypes(
{"2.6.0 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def greater_than(x, y, /, *, name=None):
return ivy.greater(x, y)
@with_unsupported_dtypes(
{"2.6.0 and below": ("uint8", "int8", "int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def is_empty(x, name=None):
return ivy.is_empty(x)
@to_ivy_arrays_and_back
def is_tensor(x):
return ivy.is_array(x)
@with_supported_dtypes(
{
"2.6.0 and below": (
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
return ivy.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
@with_unsupported_dtypes(
{"2.6.0 and below": ("bool", "uint8", "int8", "int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def less_equal(x, y, /, *, name=None):
return ivy.less_equal(x, y)
@with_supported_dtypes(
{"2.6.0 and below": ("bool", "float16", "float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def less_than(x, y, /, *, name=None):
return ivy.astype(ivy.less(x, y), ivy.bool)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def logical_and(x, y, /, *, name=None, out=None):
return ivy.logical_and(x, y, out=out)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def logical_not(x, /, *, name=None, out=None):
return ivy.logical_not(x, out=out)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def logical_or(x, y, /, *, name=None, out=None):
return ivy.logical_or(x, y, out=out)
@with_supported_dtypes(
{
"2.6.0 and below": (
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
@handle_out_argument
def logical_xor(x, y, /, *, name=None, out=None):
return ivy.logical_xor(x, y, out=out)
@with_unsupported_dtypes(
{"2.6.0 and below": ("uint8", "int8", "int16", "complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def not_equal(x, y, /, *, name=None):
if ivy.is_float_dtype(x):
diff = ivy.abs(ivy.subtract(x, y))
res = ivy.not_equal(x, y)
return ivy.where(diff < 1e-8, False, res)
return ivy.not_equal(x, y)
| ivy/ivy/functional/frontends/paddle/logic.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/logic.py",
"repo_id": "ivy",
"token_count": 3401
} | 36 |
# global
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
"paddle",
)
@to_ivy_arrays_and_back
def argmax(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
return ivy.argmax(x, axis=axis, keepdims=keepdim, dtype=dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
"paddle",
)
@to_ivy_arrays_and_back
def argmin(x, /, *, axis=None, keepdim=False, dtype="int64", name=None):
return ivy.argmin(x, axis=axis, keepdims=keepdim, dtype=dtype)
@with_supported_dtypes(
{"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
"paddle",
)
@to_ivy_arrays_and_back
def argsort(x, /, *, axis=-1, descending=False, name=None):
return ivy.argsort(x, axis=axis, descending=descending)
@with_supported_dtypes(
{"2.6.0 and below": ("int32", "int64", "float32", "float64")},
"paddle",
)
@to_ivy_arrays_and_back
def index_sample(x, index):
return x[ivy.arange(x.shape[0])[:, None], index]
# kthvalue
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
)
@to_ivy_arrays_and_back
def kthvalue(x, k, axis=None, keepdim=False, name=None):
if axis is None:
axis = -1
sorted_input = ivy.sort(x, axis=axis)
sort_indices = ivy.argsort(x, axis=axis)
values = ivy.gather(sorted_input, ivy.array(k - 1), axis=axis)
indices = ivy.gather(sort_indices, ivy.array(k - 1), axis=axis)
if keepdim:
values = ivy.expand_dims(values, axis=axis)
indices = ivy.expand_dims(indices, axis=axis)
ret = (values, indices)
return ret
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def masked_select(x, mask, name=None):
return ivy.flatten(x[mask])
@with_supported_dtypes(
{"2.4.2 and below": ("float32", "float64", "int16", "int32", "int64", "uint8")},
"paddle",
)
@to_ivy_arrays_and_back
def nonzero(input, /, *, as_tuple=False):
ret = ivy.nonzero(input)
if as_tuple is False:
ret = ivy.matrix_transpose(ivy.stack(ret))
return ret
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def searchsorted(sorted_sequence, values, out_int32=False, right=False, name=None):
if right:
side = "right"
else:
side = "left"
ret = ivy.searchsorted(sorted_sequence, values, side=side)
if out_int32:
ret = ivy.astype(ret, "int32")
return ret
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def sort(x, /, *, axis=-1, descending=False, name=None):
return ivy.sort(x, axis=axis, descending=descending)
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def topk(x, k, axis=None, largest=True, sorted=True, name=None):
return ivy.top_k(x, k, axis=axis, largest=largest, sorted=sorted)
# where
@with_supported_dtypes(
{"2.6.0 and below": ("float32", "float64", "int32", "int64")},
"paddle",
)
@to_ivy_arrays_and_back
def where(condition, x, y, name=None):
return ivy.where(condition, x, y)
| ivy/ivy/functional/frontends/paddle/search.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/search.py",
"repo_id": "ivy",
"token_count": 1568
} | 37 |
from .generic import NDFrame
import ivy
from .series import Series
from ivy.functional.frontends.pandas.index import Index
class DataFrame(NDFrame):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
name=None,
*args,
**kwargs,
):
super().__init__(
data,
index=index,
dtype=dtype,
copy=copy,
name=None,
columns=None,
*args,
**kwargs,
)
if isinstance(self.orig_data, dict):
# if data is a dict the underlying array needs to be extended to match the
# index as a 2d array
self.columns = list(self.orig_data.keys()) if columns is None else columns
array_data = list(self.orig_data.values())
self.array = ivy.array([array_data for _ in range(len(self.index))])
elif isinstance(self.orig_data, Series):
self.array = self.array.expand_dims()
self.columns = [0]
elif columns is None:
self.columns = ivy.arange(self.array.shape[1]).tolist()
else:
self.columns = columns
assert self.array.ndim == 2, "DataFrame Data must be 2-dimensional"
def __getitem__(self, col):
# turn labels (strings) into numbered indexing so that self.array columns can
# be accessed.
if isinstance(col, (tuple, list)):
numbered_col = [self.columns.index(i) for i in col]
return DataFrame(
self.array[:, numbered_col],
index=self.index,
dtype=self.dtype,
columns=col,
)
col = self.columns.index(col)
return Series(
self.array[:, col],
index=self.index,
dtype=self.dtype,
)
def __getattr__(self, item):
if item in self.columns:
item_index = self.columns.index(item)
return Series(
self.array[:, item_index],
index=self.index,
dtype=self.dtype,
)
else:
return super().__getattr__(item)
def __repr__(self):
return (
f"frontends.pandas.DataFrame ({self.array.to_list()}, "
f"index={self.index}), columns={self.columns})"
)
def sum(self, axis=None, skipna=True, level=None, numeric_only=None, min_count=0):
_array = self.array
if axis is None or axis == "index":
axis = 0 # due to https://github.com/pandas-dev/pandas/issues/54547. TODO: remove this when fixed # noqa: E501
elif axis == "columns":
axis = 1
if min_count > 0:
if ivy.has_nans(_array):
number_values = _array.size - ivy.sum(ivy.isnan(_array))
else:
number_values = _array.size
if min_count > number_values:
return ivy.nan
if skipna:
ret = ivy.nansum(_array, axis=axis)
else:
ret = _array.sum(axis=axis)
return Series(ret, index=self.columns if axis in (0, "index") else self.index)
def mean(self, axis=0, skipna=True, numeric_only=None, **kwargs):
_array = ivy.astype(self.array, ivy.default_float_dtype())
axis = 0 if axis == "index" else 1 if axis == "columns" else axis
if skipna:
ret = ivy.nanmean(_array, axis=axis)
else:
ret = _array.mean(axis=axis)
if axis is None:
return ret # scalar case
return Series(
ret, index=Index(self.columns) if axis in (0, "index") else self.index
)
def get(self, key, default=None):
if key in self.columns:
return self[key]
return default
def keys(self):
return self.columns
| ivy/ivy/functional/frontends/pandas/dataframe.py/0 | {
"file_path": "ivy/ivy/functional/frontends/pandas/dataframe.py",
"repo_id": "ivy",
"token_count": 1983
} | 38 |
from .integrate import *
| ivy/ivy/functional/frontends/scipy/integrate/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/integrate/__init__.py",
"repo_id": "ivy",
"token_count": 7
} | 39 |
from .sparse import *
from . import csgraph
from . import linalg
| ivy/ivy/functional/frontends/scipy/sparse/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/sparse/__init__.py",
"repo_id": "ivy",
"token_count": 20
} | 40 |
from . import multiclass
from .multiclass import *
from . import validation
from .validation import *
| ivy/ivy/functional/frontends/sklearn/utils/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/utils/__init__.py",
"repo_id": "ivy",
"token_count": 26
} | 41 |
import functools
import ivy
import ivy.functional.frontends.tensorflow as tf_frontend
from ivy.functional.frontends.tensorflow.func_wrapper import (
_ivy_array_to_tensorflow,
_to_ivy_array,
to_ivy_arrays_and_back,
)
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = "channels_last"
bias_shape = bias.shape
if len(bias_shape) == 1:
if data_format == "channels_first":
return tf_frontend.nn.bias_add(x, bias, data_format="NC...")
return tf_frontend.nn.bias_add(x, bias, data_format="N...C")
if x.ndim in (3, 4, 5):
if data_format == "channels_first":
bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]
return x + tf_frontend.reshape(bias, bias_reshape_axis)
return x + tf_frontend.reshape(bias, (1,) + bias_shape)
return tf_frontend.nn.bias_add(x, bias)
@to_ivy_arrays_and_back
def depthwise_conv2d(
x,
depthwise_kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
data_format = "channels_last" if data_format is None else data_format
if data_format not in {"channels_first", "channels_last"}:
raise ValueError("Unknown data_format: " + str(data_format))
tf_data_format = "NHWC"
permuted_x = False
if data_format == "channels_first":
if ivy.dev(x) == "cpu":
x = tf_frontend.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
permuted_x = True
else:
tf_data_format = "NCHW"
padding = padding.upper()
if padding not in {"VALID", "SAME"}:
raise ValueError("Unknown padding: " + str(padding))
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = tf_frontend.nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
dilations=dilation_rate,
data_format=tf_data_format,
)
if permuted_x:
x = tf_frontend.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@to_ivy_arrays_and_back
def dot(x, y):
return ivy.dot(x, y)
def mean(x, axis=None, keepdims=False):
return tf_frontend.reduce_mean(x, axis, keepdims)
@to_ivy_arrays_and_back
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
@functools.wraps(step_function)
def _new_step_function(*args, **kwargs):
frontend_args = ivy.nested_map(
_ivy_array_to_tensorflow, args, include_derived=True, shallow=False
)
frontend_kwargs = ivy.nested_map(
_ivy_array_to_tensorflow, kwargs, include_derived=True, shallow=False
)
ret = step_function(*frontend_args, **frontend_kwargs)
return ivy.nested_map(_to_ivy_array, ret, include_derived=True)
return ivy.rnn(
_new_step_function,
inputs,
initial_states,
go_backwards=go_backwards,
mask=mask,
constants=constants,
unroll=unroll,
input_length=input_length,
time_major=time_major,
zero_output_for_mask=zero_output_for_mask,
return_all_outputs=return_all_outputs,
)
| ivy/ivy/functional/frontends/tensorflow/keras/backend.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/keras/backend.py",
"repo_id": "ivy",
"token_count": 1593
} | 42 |
from . import convolution_functions
from .convolution_functions import *
from . import distance_functions
from .distance_functions import *
from . import dropout_functions
from .dropout_functions import *
from . import layer_functions
from .layer_functions import *
from . import linear_functions
from .linear_functions import *
from . import loss_functions
from .loss_functions import *
from . import non_linear_activation_functions
from .non_linear_activation_functions import *
from . import pooling_functions
from .pooling_functions import *
from . import sparse_functions
from .sparse_functions import *
from . import vision_functions
from .vision_functions import *
from . import norms
from .norms import *
| ivy/ivy/functional/frontends/torch/nn/functional/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/__init__.py",
"repo_id": "ivy",
"token_count": 197
} | 43 |
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@with_supported_dtypes(
{
"2.2 and below": (
"float32",
"float64",
)
},
"torch",
)
@to_ivy_arrays_and_back
def bernoulli(input, p, *, generator=None, out=None):
seed = generator.initial_seed() if generator is not None else None
return ivy.bernoulli(p, logits=input, seed=seed, out=out)
@to_ivy_arrays_and_back
def manual_seed(seed: int):
ivy.seed(seed_value=seed)
return None
@with_supported_dtypes(
{
"2.2 and below": (
"float32",
"float64",
)
},
"torch",
)
@to_ivy_arrays_and_back
def multinomial(input, num_samples, replacement=False, *, generator=None, out=None):
seed = generator.initial_seed() if generator is not None else None
return ivy.multinomial(
num_samples + 1, # doesn't matter because `probs` is provided, but should be
# greater than the number of samples
num_samples,
probs=input,
replace=replacement,
seed=seed,
out=out,
)
@with_supported_dtypes(
{
"2.2 and below": (
"float32",
"float64",
)
},
"torch",
)
@to_ivy_arrays_and_back
def normal(mean, std, *, generator=None, out=None):
seed = generator.initial_seed() if generator is not None else None
return ivy.random_normal(mean=mean, std=std, seed=seed, out=out)
@with_supported_dtypes(
{
"2.2 and below": (
"float32",
"float64",
)
},
"torch",
)
@to_ivy_arrays_and_back
def poisson(input, generator=None):
seed = generator.initial_seed() if generator is not None else None
return ivy.poisson(input, seed=seed, shape=None)
@to_ivy_arrays_and_back
def rand(
*size,
generator=None,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False,
pin_memory=False,
**kwargs,
):
if not size and "size" not in kwargs:
raise ValueError("Missing 1 required positional/keyword argument: size")
size = size if size else kwargs["size"]
if (
isinstance(size, (list, tuple))
and len(size) == 1
and isinstance(size[0], (list, tuple, ivy.Shape))
):
size = size[0]
seed = generator.initial_seed() if generator is not None else None
return ivy.random_uniform(
shape=size,
seed=seed,
out=out,
dtype=dtype,
device=device,
)
@to_ivy_arrays_and_back
def rand_like(
input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=False,
):
shape = input.shape
if not dtype:
dtype = input.dtype
return ivy.random_uniform(
shape=shape,
dtype=dtype,
device=device,
)
@to_ivy_arrays_and_back
def randint(
low,
high,
size,
*,
generator=None,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False,
):
seed = generator.initial_seed() if generator is not None else None
return ivy.randint(
low,
high,
shape=size,
seed=seed,
out=out,
dtype=dtype,
device=device,
)
@to_ivy_arrays_and_back
def randint_like(
input,
low,
high,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None,
):
shape = input.shape
return ivy.randint(
low,
high,
shape=shape,
device=device,
dtype=dtype,
)
@to_ivy_arrays_and_back
def randn(
*size,
generator=None,
out=None,
dtype=None,
layout=None,
device=None,
requires_grad=False,
pin_memory=False,
**kwargs,
):
if not size and "size" not in kwargs:
raise ValueError("Missing 1 required positional/keyword argument: size")
size = size if size else kwargs["size"]
if (
isinstance(size, (list, tuple))
and len(size) == 1
and isinstance(size[0], (list, tuple, ivy.Shape))
):
size = size[0]
seed = generator.initial_seed() if generator is not None else None
return ivy.random_normal(
shape=size,
seed=seed,
out=out,
dtype=dtype,
device=device,
)
@to_ivy_arrays_and_back
def randn_like(
input,
*,
dtype=None,
layout=None,
device=None,
requires_grad=False,
memory_format=None,
):
shape = input.shape
if not dtype:
dtype = input.dtype
return ivy.random_normal(
shape=shape,
dtype=dtype,
device=device,
)
@to_ivy_arrays_and_back
def randperm(
n,
*,
generator=None,
out=None,
dtype=ivy.int64,
layout=None,
device=None,
requires_grad=False,
pin_memory=False,
):
seed = generator.initial_seed() if generator is not None else None
arr = ivy.arange(n, device=device, dtype=dtype)
ret = ivy.shuffle(arr, seed=seed, out=out)
return ret
# ToDo: will need to create a Generator class to be able to fully test these functions
def seed() -> int:
"""Return a 64 bit number used to seed the RNG."""
return int(ivy.randint(-(2**63), 2**63 - 1))
@with_supported_dtypes(
{"2.2 and below": ("uint8",)},
"torch",
)
@to_ivy_arrays_and_back
def set_rng_state(new_state):
return ivy.seed(seed_value=new_state)
| ivy/ivy/functional/frontends/torch/random_sampling.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/random_sampling.py",
"repo_id": "ivy",
"token_count": 2562
} | 44 |
import ivy
from ivy.functional.frontends.xgboost.linear.coordinate_common import (
get_bias_gradient,
coordinate_delta_bias,
update_bias_residual,
coordinate_delta,
)
def coordinate_updater(gpair, data, lr, weight, n_feat, n_iter, reg_alpha, reg_lambda):
"""Implements one step of coordinate descent. The original optimizer
implements parallel calculations. The below code is an approximation of the
original one, but rather than computing the update direction for a single
parameter at a time using a for loop and cumulative gradients, it does the
update in parallel by means of matrix-vector multiplications. Given that
xgboost's updater is non-deterministic, the approximated and original
implementations converge to pretty the same optima, resulting in metrics'
values(accuracy, f1-score) differing at a level of 0.001(for separate runs
metrics may end up being the same).
Parameters
----------
gpair
Array of shape (n_samples, 2) holding gradient-hessian pairs.
data
Training data of shape (n_samples, n_features).
lr
Learning rate.
weight
Array of shape (n_features+1, n_output_group) holding feature weights
and biases.
n_feat
Number of features in the training data.
n_iter
Number of current iteration.
reg_alpha
Denormalized regularization parameter alpha.
reg_lambda
Denormalized regularization parameter lambda.
Returns
-------
Updated weights of shape (n_features+1, n_output_group).
"""
# update biases for all groups
bias_grad = get_bias_gradient(gpair)
dbias = lr * coordinate_delta_bias(bias_grad[0], bias_grad[1])
bias_weights = weight[-1] + dbias
# upd gradients with bias delta and extract hessians
grad = update_bias_residual(dbias, gpair)
hess = ivy.expand_dims(gpair[:, 1], axis=1)
# don't update where hessian is less than zero
mask = ivy.where(hess < 0.0, 0.0, 1.0)
sum_hess = ivy.sum(ivy.square(data) * hess * mask, axis=0, keepdims=True)
sum_grad = ivy.sum(data * grad * mask, axis=0, keepdims=True)
# we transpose the arrays to convert (1, n_features) to (n_features, 1)
dw = lr * coordinate_delta(
sum_grad.T, sum_hess.T, weight[:-1, :], reg_alpha, reg_lambda
)
feature_weights = weight[:-1] + dw
# faster updates because some backends doesn't support inplace updates
# speeds up training time because we don't need to create copies implicitly
return ivy.vstack([feature_weights, bias_weights])
| ivy/ivy/functional/frontends/xgboost/linear/updater_coordinate.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/linear/updater_coordinate.py",
"repo_id": "ivy",
"token_count": 926
} | 45 |
# global
from typing import Union, Tuple, Optional, Sequence, Iterable, Generator
import warnings
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.utils.exceptions import handle_exceptions
from ivy.func_wrapper import (
outputs_to_ivy_arrays,
handle_nestable,
to_native_arrays_and_back,
handle_out_argument,
infer_dtype,
handle_array_like_without_promotion,
inputs_to_ivy_arrays,
handle_device,
handle_backend_invalid,
handle_array_function,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@infer_dtype
@handle_device
def vorbis_window(
window_length: Union[ivy.Array, ivy.NativeArray],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return an array that contains a vorbis power complementary window of
size window_length.
Parameters
----------
window_length
the length of the vorbis window.
dtype
data type of the returned array. By default float32.
out
optional output array, for writing the result to.
Returns
-------
ret
Input array with the vorbis window.
Examples
--------
>>> ivy.vorbis_window(3)
ivy.array([0.38268346, 1. , 0.38268352])
>>> ivy.vorbis_window(5)
ivy.array([0.14943586, 0.8563191 , 1. , 0.8563191, 0.14943568])
"""
return ivy.current_backend().vorbis_window(window_length, dtype=dtype, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@infer_dtype
@handle_device
def hann_window(
size: int,
*,
periodic: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Generate a Hann window. The Hanning window is a taper formed by using a
weighted cosine.
Parameters
----------
size
the size of the returned window.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
dtype
The data type to produce. Must be a floating point type.
out
optional output array, for writing the result to.
Returns
-------
ret
The array containing the window.
Examples
--------
>>> ivy.hann_window(4, periodic = True)
ivy.array([0. , 0.5, 1. , 0.5])
>>> ivy.hann_window(7, periodic = False)
ivy.array([0. , 0.25, 0.75, 1. , 0.75, 0.25, 0. ])
"""
return ivy.current_backend().hann_window(
size, periodic=periodic, dtype=dtype, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@infer_dtype
@handle_device
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Kaiser window with window length window_length and shape
beta.
Parameters
----------
window_length
an int defining the length of the window.
periodic
If True, returns a periodic window suitable for use in spectral analysis.
If False, returns a symmetric window suitable for use in filter design.
beta
a float used as shape parameter for the window.
dtype
data type of the returned array.
out
optional output array, for writing the result to.
Returns
-------
ret
The array containing the window.
Examples
--------
>>> ivy.kaiser_window(5)
ivy.array([5.2773e-05, 1.0172e-01, 7.9294e-01, 7.9294e-01, 1.0172e-01]])
>>> ivy.kaiser_window(5, True, 5)
ivy.array([0.0367, 0.4149, 0.9138, 0.9138, 0.4149])
>>> ivy.kaiser_window(5, False, 5)
ivy.array([0.0367, 0.5529, 1.0000, 0.5529, 0.0367])
"""
return ivy.current_backend().kaiser_window(
window_length, periodic, beta, dtype=dtype, out=out
)
@handle_exceptions
@handle_nestable
@handle_out_argument
@infer_dtype
def kaiser_bessel_derived_window(
window_length: int,
beta: float = 12.0,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Kaiser bessel derived window with window length
window_length and shape beta.
Parameters
----------
window_length
an int defining the length of the window.
beta
a float used as shape parameter for the window.
dtype
data type of the returned array
out
optional output array, for writing the result to.
Returns
-------
ret
The array containing the window.
Examples
--------
>>> ivy.kaiser_bessel_derived_window(5)
ivy.array([0.00726415, 0.9999736 , 0.9999736 , 0.00726415])
>>> ivy.kaiser_bessel_derived_window(5, 5)
ivy.array([0.18493208, 0.9827513 , 0.9827513 , 0.18493208])
"""
if window_length < 2:
result = ivy.array([], dtype=dtype)
if ivy.exists(out):
ivy.inplace_update(out, result)
return result
half_len = window_length // 2
kaiser_w = ivy.kaiser_window(half_len + 1, False, beta, dtype=dtype)
kaiser_w_csum = ivy.cumsum(kaiser_w)
half_w = ivy.sqrt(kaiser_w_csum[:-1] / kaiser_w_csum[-1:])
window = ivy.concat((half_w, half_w[::-1]), axis=0)
result = window.astype(dtype)
return result
@handle_exceptions
@handle_nestable
@infer_dtype
def hamming_window(
window_length: int,
*,
periodic: bool = True,
alpha: float = 0.54,
beta: float = 0.46,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the Hamming window with window length window_length.
Parameters
----------
window_length
an int defining the length of the window.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
alpha
The coefficient alpha in the hamming window equation
beta
The coefficient beta in the hamming window equation
dtype
data type of the returned array.
out
optional output array, for writing the result to.
Returns
-------
ret
The array containing the window.
Examples
--------
>>> ivy.hamming_window(5)
ivy.array([0.0800, 0.3979, 0.9121, 0.9121, 0.3979])
>>> ivy.hamming_window(5, periodic=False)
ivy.array([0.0800, 0.5400, 1.0000, 0.5400, 0.0800])
>>> ivy.hamming_window(5, periodic=False, alpha=0.2, beta=2)
ivy.array([-1.8000, 0.2000, 2.2000, 0.2000, -1.8000])
"""
if window_length < 2:
return ivy.ones([window_length], dtype=dtype, out=out)
if periodic:
count = ivy.arange(window_length) / window_length
else:
count = ivy.linspace(0, window_length, window_length)
result = (alpha - beta * ivy.cos(2 * ivy.pi * count)).astype(dtype)
if ivy.exists(out):
result = ivy.inplace_update(out, result)
return result
hamming_window.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_out_argument",
"handle_device",
),
"to_skip": (),
}
@handle_exceptions
@handle_nestable
@outputs_to_ivy_arrays
@handle_device
def tril_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
*,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> Tuple[ivy.Array, ...]:
"""Return the indices of the lower triangular part of a row by col matrix
in a 2-by-N shape (tuple of two N dimensional arrays), where the first row
contains row coordinates of all indices and the second row contains column
coordinates. Indices are ordered based on rows and then columns. The lower
triangular part of the matrix is defined as the elements on and below the
diagonal. The argument k controls which diagonal to consider. If k = 0,
all elements on and below the main diagonal are retained. A positive value
excludes just as many diagonals below the main diagonal, and similarly a
negative value includes just as many diagonals above the main diagonal. The
main diagonal are the set of indices {(i,i)} for i∈[0,min{n_rows,
n_cols}−1].
Notes
-----
Primary purpose of this function is to slice an array of shape (n,m). See
https://numpy.org/doc/stable/reference/generated/numpy.tril_indices.html
for examples
Tensorflow does not support slicing 2-D tensor with tuple of tensor of indices
Parameters
----------
n_rows
number of rows in the 2-d matrix.
n_cols
number of columns in the 2-d matrix. If None n_cols will be the same as n_rows
k
number of shifts from the main diagonal. k = 0 includes main diagonal,
k > 0 moves downward and k < 0 moves upward
device
device on which to place the created array. Default: ``None``.
Returns
-------
ret
an 2xN shape, tuple of two N dimensional, where first subarray (i.e. ret[0])
contains row coordinates of all indices and the second subarray (i.e ret[1])
contains columns indices.
Function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
>>> x = ivy.tril_indices(4,4,0)
>>> print(x)
(ivy.array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]),
ivy.array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
>>> x = ivy.tril_indices(4,4,1)
>>> print(x)
(ivy.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]),
ivy.array([0, 1, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3]))
>>> x = ivy.tril_indices(4,4,-2)
>>> print(x)
(ivy.array([2, 3, 3]), ivy.array([0, 0, 1]))
>>> x = ivy.tril_indices(4,2,0)
>>> print(x)
(ivy.array([0, 1, 1, 2, 2, 3, 3]),
ivy.array([0, 0, 1, 0, 1, 0, 1]))
>>> x = ivy.tril_indices(2,4,0)
>>> print(x)
(ivy.array([0, 1, 1]), ivy.array([0, 0, 1]))
>>> x = ivy.tril_indices(4,-4,0)
>>> print(x)
(ivy.array([]), ivy.array([]))
>>> x = ivy.tril_indices(4,4,100)
>>> print(x)
(ivy.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]),
ivy.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]))
>>> x = ivy.tril_indices(2,4,-100)
>>> print(x)
(ivy.array([]), ivy.array([]))
"""
return current_backend().tril_indices(n_rows, n_cols, k, device=device)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_ivy_arrays
@infer_dtype
@handle_device
def eye_like(
x: Union[ivy.Array, ivy.NativeArray],
*,
k: int = 0,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a 2D array filled with ones on the k diagonal and zeros
elsewhere. having the same ``shape`` as the first and last dim of input
array ``x``. input array ``x`` should to be 2D.
Parameters
----------
x
input array from which to derive the output array shape.
k
index of the diagonal. A positive value refers to an upper diagonal, a negative
value to a lower diagonal, and 0 to the main diagonal. Default: ``0``.
dtype
output array data type. If dtype is None, the output array data type must be the
default floating-point data type. Default: ``None``.
device
the device on which to place the created array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``x`` and filled with ``ones`` in
diagonal ``k`` and ``zeros`` elsewhere.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances as a replacement to any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x1 = ivy.array([[0, 1],[2, 3]])
>>> y1 = ivy.eye_like(x1)
>>> print(y1)
ivy.array([[1., 0.],
[0., 1.]])
>>> x1 = ivy.array([[0, 1, 2],[3, 4, 5],[6, 7, 8]])
>>> y1 = ivy.eye_like(x1, k=1)
>>> print(y1)
ivy.array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[3, 8],[0, 2]]), b=ivy.array([[0, 2], [8, 5]]))
>>> y = x.eye_like()
>>> print(y)
{
a: ivy.array([[1., 0.],
[0., 1.]]),
b: ivy.array([[1., 0.],
[0., 1.]])
}
"""
shape = ivy.shape(x, as_array=True)
dim = len(shape)
if dim <= 1:
cols = dim
else:
cols = int(shape[-1])
rows = 0 if dim < 1 else int(shape[0])
return ivy.eye(
rows,
cols,
k=k,
dtype=dtype,
device=device,
out=out,
)
def _iter_product(*args, repeat=1):
# itertools.product
pools = [tuple(pool) for pool in args] * repeat
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
@handle_exceptions
@inputs_to_ivy_arrays
def ndenumerate(
input: Iterable,
) -> Generator:
"""Multidimensional index iterator.
Parameters
----------
input
Input array to iterate over.
Returns
-------
ret
An iterator yielding pairs of array coordinates and values.
Examples
--------
>>> a = ivy.array([[1, 2], [3, 4]])
>>> for index, x in ivy.ndenumerate(a):
>>> print(index, x)
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def _ndenumerate(input):
if ivy.is_ivy_array(input) and input.shape == ():
yield (), ivy.to_scalar(input)
else:
i = [range(k) for k in input.shape]
for idx in _iter_product(*i):
yield idx, input[idx]
input = input if ivy.is_ivy_array(input) else ivy.array(input)
return _ndenumerate(input)
@handle_exceptions
def ndindex(
shape: Tuple,
) -> Generator:
"""Multidimensional index iterator.
Parameters
----------
shape
The shape of the array to iterate over.
Returns
-------
ret
An iterator yielding array coordinates.
Examples
--------
>>> a = ivy.array([[1, 2], [3, 4]])
>>> for index in ivy.ndindex(a):
>>> print(index)
(0, 0)
(0, 1)
(1, 0)
(1, 1)
"""
args = [range(k) for k in shape]
return _iter_product(*args)
@handle_exceptions
def indices(
dimensions: Sequence[int],
*,
dtype: Union[ivy.Dtype, ivy.NativeDtype] = ivy.int64,
sparse: bool = False,
) -> Union[ivy.Array, Tuple[ivy.Array, ...]]:
"""Return an array representing the indices of a grid.
Parameters
----------
dimensions
The shape of the grid.
dtype
The data type of the result.
sparse
Return a sparse representation of the grid instead of a dense representation.
Returns
-------
ret
If sparse is False, returns one grid indices array of shape
(len(dimensions),) + tuple(dimensions).
If sparse is True, returns a tuple of arrays each of shape
(1, ..., 1, dimensions[i], 1, ..., 1) with dimensions[i] in the ith place.
Examples
--------
>>> ivy.indices((3, 2))
ivy.array([[[0 0]
[1 1]
[2 2]]
[[0 1]
[0 1]
[0 1]]])
>>> ivy.indices((3, 2), sparse=True)
(ivy.array([[0], [1], [2]]), ivy.array([[0, 1]]))
"""
if sparse:
return tuple(
ivy.arange(dim)
.expand_dims(
axis=[j for j in range(len(dimensions)) if i != j],
)
.astype(dtype)
for i, dim in enumerate(dimensions)
)
else:
grid = ivy.meshgrid(*[ivy.arange(dim) for dim in dimensions], indexing="ij")
return ivy.stack(grid, axis=0).astype(dtype)
indices.mixed_backend_wrappers = {
"to_add": ("handle_device",),
"to_skip": (),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@to_native_arrays_and_back
def unsorted_segment_min(
data: Union[ivy.Array, ivy.NativeArray],
segment_ids: Union[ivy.Array, ivy.NativeArray],
num_segments: Union[int, ivy.Array, ivy.NativeArray],
) -> ivy.Array:
"""Compute the minimum along segments of an array. Segments are defined by
an integer array of segment IDs.
Note
----
If the given segment ID `i` is negative, then the corresponding
value is dropped, and will not be included in the result.
Parameters
----------
data
The array from which to gather values.
segment_ids
Must be in the same size with the first dimension of `data`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `data`.
num_segments
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret
The output array, representing the result of a segmented min operation.
For each segment, it computes the min value in `data` where `segment_ids`
equals to segment ID.
"""
return ivy.current_backend().unsorted_segment_min(data, segment_ids, num_segments)
@handle_exceptions
@handle_nestable
@to_native_arrays_and_back
def unsorted_segment_sum(
data: Union[ivy.Array, ivy.NativeArray],
segment_ids: Union[ivy.Array, ivy.NativeArray],
num_segments: Union[int, ivy.Array, ivy.NativeArray],
) -> ivy.Array:
"""Compute the sum of elements along segments of an array. Segments are
defined by an integer array of segment IDs.
Parameters
----------
data
The array from which to gather values.
segment_ids
Must be in the same size with the first dimension of `data`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `data`.
num_segments
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ret
The output array, representing the result of a segmented sum operation.
For each segment, it computes the sum of values in `data` where `segment_ids`
equals to segment ID.
"""
return ivy.current_backend().unsorted_segment_sum(data, segment_ids, num_segments)
@handle_exceptions
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@infer_dtype
@handle_device
def blackman_window(
size: int,
*,
periodic: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Generate a Blackman window. The Blackman window is a taper formed by
using the first three terms of a summation of cosines. It was designed to
have close to the minimal leakage possible. It is close to optimal, only
slightly worse than a Kaiser window.
Parameters
----------
window_length
the window_length of the returned window.
periodic
If True, returns a window to be used as periodic function.
If False, return a symmetric window.
dtype
The data type to produce. Must be a floating point type.
out
optional output array, for writing the result to.
Returns
-------
ret
The array containing the window.
Examples
--------
>>> ivy.blackman_window(4, periodic = True)
ivy.array([-1.38777878e-17, 3.40000000e-01, 1.00000000e+00, 3.40000000e-01])
>>> ivy.blackman_window(7, periodic = False)
ivy.array([-1.38777878e-17, 1.30000000e-01, 6.30000000e-01, 1.00000000e+00,
6.30000000e-01, 1.30000000e-01, -1.38777878e-17])
"""
return ivy.current_backend().blackman_window(
size, periodic=periodic, dtype=dtype, out=out
)
@handle_exceptions
@handle_nestable
@infer_dtype
def random_tucker(
shape: Sequence[int],
rank: Sequence[int],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
full: Optional[bool] = False,
orthogonal: Optional[bool] = False,
seed: Optional[int] = None,
non_negative: Optional[bool] = False,
) -> Union[ivy.TuckerTensor, ivy.Array]:
"""Generate a random Tucker tensor.
Parameters
----------
shape
shape of the tensor to generate
rank
rank of the Tucker decomposition
if int, the same rank is used for each mode
otherwise, dimension of each mode
full
if True, a full tensor is returned
otherwise, the decomposed tensor is returned
orthogonal
if True, creates a tensor with orthogonal components
seed
seed for generating random numbers
non_negative
Returns
-------
ivy.TuckerTensor
"""
rank = ivy.TuckerTensor.validate_tucker_rank(shape, rank)
if orthogonal:
for i, (s, r) in enumerate(zip(shape, rank)):
if r > s:
warnings.warn(
"Selected orthogonal=True, but selected a rank larger than the"
f" tensor size for mode {{0}}: rank[{i}]={r} > shape[{i}]={s}."
)
factors = []
for s, r in zip(shape, rank):
if orthogonal:
factor = ivy.random_uniform(shape=(s, s), seed=seed, dtype=dtype)
Q, _ = ivy.qr(factor)
factors.append(ivy.array(Q[:, :r]))
else:
factors.append(ivy.random_uniform(shape=(s, r), seed=seed, dtype=dtype))
core = ivy.random_uniform(shape=rank, seed=seed, dtype=dtype)
if non_negative:
factors = [ivy.abs(f) for f in factors]
core = ivy.abs(core)
if full:
return ivy.TuckerTensor.tucker_to_tensor((core, factors))
else:
return ivy.TuckerTensor((core, factors))
@handle_exceptions
@handle_nestable
@infer_dtype
def random_cp(
shape: Sequence[int],
rank: int,
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
full: Optional[bool] = False,
orthogonal: Optional[bool] = False,
seed: Optional[int] = None,
normalise_factors: Optional[bool] = True,
) -> Union[ivy.CPTensor, ivy.Array]:
"""Generate a random CP tensor.
Parameters
----------
shape
shape of the tensor to generate
rank
rank of the CP decomposition
full
if True, a full tensor is returned
otherwise, the decomposed tensor is returned
orthogonal
if True, creates a tensor with orthogonal components
seed
seed for generating random numbers
Returns
-------
ivy.CPTensor
"""
rank = ivy.CPTensor.validate_cp_rank(shape, rank)
if (rank > min(shape)) and orthogonal:
warnings.warn(
"Can only construct orthogonal tensors when rank <= min(shape) but got "
f"a tensor with min(shape)={min(shape)} < rank={rank}"
)
factors = [
(ivy.random_uniform(shape=(s, rank), dtype=dtype, seed=seed)) for s in shape
]
weights = ivy.ones((rank,), dtype=dtype)
if orthogonal:
factors = [ivy.qr(factor)[0] for factor in factors]
if full:
return ivy.CPTensor.cp_to_tensor((weights, factors))
elif normalise_factors:
return ivy.CPTensor.cp_normalize((weights, factors))
else:
return ivy.CPTensor((weights, factors))
@handle_exceptions
@handle_nestable
@infer_dtype
def random_tr(
shape: Sequence[int],
rank: Sequence[int],
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
full: Optional[bool] = False,
seed: Optional[int] = None,
) -> Union[ivy.TRTensor, ivy.Array]:
"""Generate a random TR tensor.
Parameters
----------
shape : tuple
shape of the tensor to generate
rank : Sequence[int]
rank of the TR decomposition
must verify rank[0] == rank[-1] (boundary conditions)
and len(rank) == len(shape)+1
full : bool, optional, default is False
if True, a full tensor is returned
otherwise, the decomposed tensor is returned
seed :
seed for generating random numbers
context : dict
context in which to create the tensor
Returns
-------
ivy.TRTensor or ivy.Array if full is True
"""
rank = ivy.TRTensor.validate_tr_rank(shape, rank)
# Make sure it's not a tuple but a list
rank = list(rank)
_check_first_and_last_rank_elements_are_equal(rank)
factors = [
ivy.random_uniform(shape=(rank[i], s, rank[i + 1]), dtype=dtype, seed=seed)
for i, s in enumerate(shape)
]
if full:
return ivy.TRTensor.tr_to_tensor(factors)
else:
return ivy.TRTensor(factors)
def _check_first_and_last_rank_elements_are_equal(rank):
if rank[0] != rank[-1]:
message = (
f"Provided rank[0] == {rank[0]} and rank[-1] == {rank[-1]} "
"but boundary conditions dictate rank[0] == rank[-1]."
)
raise ValueError(message)
@handle_exceptions
@handle_nestable
@infer_dtype
def random_parafac2(
shapes: Sequence[int],
rank: int,
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
full: Optional[bool] = False,
seed: Optional[int] = None,
normalise_factors: Optional[bool] = True,
) -> Union[ivy.Parafac2Tensor, ivy.Array]:
"""Generate a random PARAFAC2 tensor.
Parameters
----------
shapes
A shapes of the tensor to generate
rank
rank of the Parafac2 decomposition
full
if True, a full tensor is returned otherwise,
the decomposed tensor is returned
seed
seed for generating random numbers
Returns
-------
ivy.Parafac2Tensor
"""
if any(shape[1] != shapes[0][1] for shape in shapes):
raise ValueError("All matrices must have equal number of columns.")
projection_matrices = [
ivy.qr(ivy.random_uniform(shape=(shape[0], rank), dtype=dtype, seed=seed))[0]
for shape in shapes
]
weights, factors = ivy.random_cp(
[len(shapes), rank, shapes[0][1]],
rank,
normalise_factors=False,
seed=seed,
dtype=dtype,
)
parafac2_tensor = ivy.Parafac2Tensor((weights, factors, projection_matrices))
if normalise_factors:
parafac2_tensor = ivy.Parafac2Tensor.parafac2_normalise(parafac2_tensor)
if full:
return ivy.Parafac2Tensor.parafac2_to_tensor(parafac2_tensor)
else:
return parafac2_tensor
@handle_exceptions
@handle_nestable
@infer_dtype
def random_tt(
shape: Sequence[int],
rank: Union[Sequence[int], int],
/,
*,
full: Optional[bool] = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
) -> Union[ivy.TTTensor, ivy.Array]:
"""Generate a random TT/MPS tensor.
Parameters
----------
shape
shape of the tensor to generate
rank
rank of the TT decomposition
must verify rank[0] == rank[-1] ==1 (boundary conditions)
and len(rank) == len(shape)+1
full
if True, a full tensor is returned
otherwise, the decomposed tensor is returned
seed
seed for generating random numbers
Returns
-------
ivy.TTTensor
"""
rank = ivy.TTTensor.validate_tt_rank(shape, rank)
rank = list(rank)
if rank[0] != 1:
message = (
f"Provided rank[0] == {rank[0]} but boundaring conditions dictatate rank[0]"
" == rank[-1] == 1."
)
raise ValueError(message)
if rank[-1] != 1:
message = (
f"Provided rank[-1] == {rank[-1]} but boundaring conditions dictatate"
" rank[0] == rank[-1] == 1."
)
raise ValueError(message)
factors = [
(ivy.random_uniform(shape=(rank[i], s, rank[i + 1]), dtype=dtype, seed=seed))
for i, s in enumerate(shape)
]
if full:
return ivy.TTTensor.tt_to_tensor(factors)
else:
return ivy.TTTensor(factors)
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def trilu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
k: int = 0,
upper: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""
Return the upper or lower triangular part of a matrix
(or a stack of matrices) ``x``.
note::
The upper triangular part of the matrix is defined as the elements
on and above the specified diagonal ``k``. The lower triangular part
of the matrix is defined as the elements on and below the specified
diagonal ``k``.
Parameters
----------
x
input array having shape (..., M, N) and whose innermost two dimensions form MxN
matrices. *,
k
diagonal below or above which to zero elements. If k = 0, the diagonal is the
main diagonal. If k < 0, the diagonal is below the main diagonal. If k > 0, the
diagonal is above the main diagonal. Default: ``0``.
upper
indicates whether upper or lower part of matrix is retained. Default: ``True``.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the upper or lower triangular part(s). The returned array
must have the same shape and data type as x. All elements below or above the
specified diagonal k must be zeroed. The returned array should be allocated on
the same device as x.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return current_backend(x).trilu(x, k=k, upper=upper, out=out)
@handle_exceptions
@handle_nestable
@to_native_arrays_and_back
def mel_weight_matrix(
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: float = 0.0,
upper_edge_hertz: float = 3000.0,
):
"""Generate a MelWeightMatrix that can be used to re-weight a Tensor
containing a linearly sampled frequency spectra (from DFT or STFT) into
num_mel_bins frequency information based on the [lower_edge_hertz,
upper_edge_hertz]
range on the mel scale. This function defines the mel scale in terms of a frequency
in hertz according to the following formula: mel(f) = 2595 * log10(1 + f/700)
Parameters
----------
num_mel_bins
The number of bands in the mel spectrum.
dft_length
The size of the original DFT obtained from (n_fft / 2 + 1).
sample_rate
Samples per second of the input signal.
lower_edge_hertz
Lower bound on the frequencies to be included in the mel spectrum.
upper_edge_hertz
The desired top edge of the highest frequency band.
Returns
-------
ret
MelWeightMatrix of shape: [frames, num_mel_bins].
Examples
--------
>>> ivy.mel_weight_matrix(3,3,8000)
ivy.array([[0. ,0. , 0.],
[0. ,0. , 0.75694758],
[0. ,0. , 0. ]])
"""
return ivy.current_backend().mel_weight_matrix(
num_mel_bins,
dft_length,
sample_rate,
lower_edge_hertz,
upper_edge_hertz,
)
# unsorted_segment_mean
@handle_exceptions
@handle_nestable
@to_native_arrays_and_back
def unsorted_segment_mean(
data: Union[ivy.Array, ivy.NativeArray],
segment_ids: Union[ivy.Array, ivy.NativeArray],
num_segments: Union[int, ivy.Array, ivy.NativeArray],
) -> ivy.Array:
"""Compute the mean of elements along segments of an array. Segments are
defined by an integer array of segment IDs.
Parameters
----------
data : Union[ivy.Array, ivy.NativeArray]
The array from which to gather values.
segment_ids : Union[ivy.Array, ivy.NativeArray]
Must be in the same size with the first dimension of `data`. Has to be
of integer data type. The index-th element of `segment_ids` array is
the segment identifier for the index-th element of `data`.
num_segments : Union[int, ivy.Array, ivy.NativeArray]
An integer or array representing the total number of distinct segment IDs.
Returns
-------
ivy.Array
The output array, representing the result of a segmented mean operation.
For each segment, it computes the mean value in `data` where `segment_ids`
equals to segment ID.
"""
return ivy.current_backend().unsorted_segment_mean(data, segment_ids, num_segments)
@handle_exceptions
@handle_nestable
@handle_array_function
@to_native_arrays_and_back
def polyval(
coeffs: Union[ivy.Array, ivy.NativeArray],
x: Union[ivy.Array, ivy.NativeArray],
):
"""Evaluate and return a polynomial at specific given values.
Parameters
----------
coeffs
Polynomial coefficients (including zero) from highest degree to constant term.
x
The value of the indeterminate variable at which to evaluate the polynomial.
Returns
-------
ret
Simplified result of substituting x in the coefficients - final value
of polynomial.
Examples
--------
>>> ivy.polyval([3, 0, 1], 5)
ivy.array(76)
"""
return ivy.current_backend().polyval(
coeffs,
x,
)
| ivy/ivy/functional/ivy/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/creation.py",
"repo_id": "ivy",
"token_count": 14298
} | 46 |
# global
from typing import Union, Optional
# local
import ivy
from ivy.func_wrapper import (
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@to_native_arrays_and_back
@handle_device
def invert_permutation(
x: Union[ivy.Array, ivy.NativeArray, list, tuple],
/,
) -> ivy.Array:
"""Compute the inverse of an index permutation.
Parameters
----------
x
1-D integer array-like, which represents indices of a zero-based array and is
supposedly used to permute the array.
Returns
-------
ret
the inverse of the index permutation represented by ''x''
Examples
--------
>>> a = ivy.asarray([0, 3, 1, 2])
>>> ivy.invert_permutation(a)
ivy.array([0, 2, 3, 1])
"""
return ivy.current_backend().invert_permutation(x)
# Array API Standard #
# -------------------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def lexsort(
keys: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Perform an indirect stable sort with an array of keys in ascending
order, with the last key used as primary sort order, second-to-last for
secondary, and so on. Each row of the key must have the same length, which
will also be the length of the returned array of integer indices, which
describes the sort order.
Parameters
----------
keys
array-like input of size (k, N).
N is the shape of each key, key can have multiple dimension.
axis
axis of each key to be indirectly sorted.
By default, sort over the last axis of each key.
out
optional output array, for writing the result to.
Returns
-------
ret
array of integer(type int64) indices with shape N, that sort the keys.
Examples
--------
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ivy.lexsort([b, a]) # Sort by a, then by b
array([2, 0, 4, 6, 5, 3, 1])
"""
return ivy.current_backend().lexsort(keys, axis=axis, out=out)
| ivy/ivy/functional/ivy/experimental/sorting.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/sorting.py",
"repo_id": "ivy",
"token_count": 899
} | 47 |
# global
from typing import Union, Optional, Literal, List
# local
import ivy
from ivy.func_wrapper import (
handle_array_function,
to_native_arrays_and_back,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Array API Standard #
# -------------------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def argsort(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the indices that sort an array ``x`` along a specified axis.
Parameters
----------
x
input array.
axis
axis along which to sort. If set to ``-1``, the function must sort along the
last axis. Default: ``-1``.
descending
sort order. If ``True``, the returned indices sort ``x`` in descending order
(by value). If ``False``, the returned indices sort ``x`` in ascending order
(by value). Default: ``False``.
stable
sort stability. If ``True``, the returned indices must maintain the relative
order of ``x`` values which compare as equal. If ``False``, the returned indices
may or may not maintain the relative order of ``x`` values which compare as
equal (i.e., the relative order of ``x`` values which compare as equal is
implementation-dependent). Default: ``True``.
out
optional output array, for writing the result to. It must have the same shape
as ``x``.
Returns
-------
ret
an array of indices. The returned array must have the same shape as ``x``. The
returned array must have the default array index data type.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.argsort.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([3,1,2])
>>> y = ivy.argsort(x)
>>> print(y)
ivy.array([1,2,0])
>>> x = ivy.array([4,3,8])
>>> y = ivy.argsort(x, descending=True)
>>> print(y)
ivy.array([2,0,1])
>>> x = ivy.array([[1.5, 3.2], [2.3, 2.3]])
>>> ivy.argsort(x, axis=0, descending=True, stable=False, out=x)
>>> print(x)
ivy.array([[1, 0], [0, 1]])
>>> x = ivy.array([[[1,3], [3,2]], [[2,4], [2,0]]])
>>> y = ivy.argsort(x, axis=1, descending=False, stable=True)
>>> print(y)
ivy.array([[[0, 1], [1, 0]], [[0, 1], [1, 0]]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([5,1,3]), b=ivy.array([[0, 3], [3, 2]]))
>>> y = ivy.argsort(x)
>>> print(y)
{
a: ivy.array([1, 2, 0]),
b: ivy.array([[0, 1], [1, 0]])
}
>>> x = ivy.Container(a=ivy.array([[3.5, 5],[2.4, 1]]))
>>> y = ivy.argsort(x)
>>> print(y)
{
a: ivy.array([[0,1],[1,0]])
}
>>> x = ivy.Container(a=ivy.array([4,3,6]), b=ivy.array([[4, 5], [2, 4]]))
>>> y = ivy.argsort(x, descending=True)
>>> print(y)
{
a: ivy.array([2, 0, 1]),
b: ivy.array([[1, 0], [1, 0]])
}
>>> x = ivy.Container(a=ivy.array([[1.5, 3.2],[2.3, 4]]),
... b=ivy.array([[[1,3],[3,2],[2,0]]]))
>>> y = x.argsort(axis=-1, descending=True, stable=False)
>>> print(y)
{
a: ivy.array([[1,0],[1,0]]),
b: ivy.array([[[1,0],[0, 1],[0, 1]]])
}
"""
return ivy.current_backend(x).argsort(
x, axis=axis, descending=descending, stable=stable, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def sort(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a sorted copy of an array.
Parameters
----------
x
input array
axis
axis along which to sort. If set to ``-1``, the function must sort along the
last axis. Default: ``-1``.
descending
direction The direction in which to sort the values
stable
sort stability. If ``True``,
the returned indices must maintain the relative order of ``x`` values which
compare as equal. If ``False``, the returned indices may or may not maintain the
relative order of ``x`` values which compare as equal (i.e., the relative order
of ``x`` values which compare as equal is implementation-dependent).
Default: ``True``.
out
optional output array, for writing the result to. It must have the same shape
as ``x``.
Returns
-------
ret
An array with the same dtype and shape as ``x``, with the elements sorted
along the given `axis`.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.sort.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([7, 8, 6])
>>> y = ivy.sort(x)
>>> print(y)
ivy.array([6, 7, 8])
>>> x = ivy.array([[[8.9,0], [19,5]],[[6,0.3], [19,0.5]]])
>>> y = ivy.sort(x, axis=1, descending=True, stable=False)
>>> print(y)
ivy.array([[[19. , 5. ],[ 8.9, 0. ]],[[19. , 0.5],[ 6. , 0.3]]])
>>> x = ivy.array([1.5, 3.2, 0.7, 2.5])
>>> y = ivy.zeros(5)
>>> ivy.sort(x, descending=True, stable=False, out=y)
>>> print(y)
ivy.array([3.2, 2.5, 1.5, 0.7])
>>> x = ivy.array([[1.1, 2.2, 3.3],[-4.4, -5.5, -6.6]])
>>> ivy.sort(x, out=x)
>>> print(x)
ivy.array([[ 1.1, 2.2, 3.3],
[-6.6, -5.5, -4.4]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([8, 6, 6]),b=ivy.array([[9, 0.7], [0.4, 0]]))
>>> y = ivy.sort(x, descending=True)
>>> print(y)
{
a: ivy.array([8, 6, 6]),
b: ivy.array([[9., 0.7], [0.4, 0.]])
}
>>> x = ivy.Container(a=ivy.array([3, 0.7, 1]),b=ivy.array([[4, 0.9], [0.6, 0.2]]))
>>> y = ivy.sort(x, descending=False, stable=False)
>>> print(y)
{
a: ivy.array([0.7, 1., 3.]),
b: ivy.array([[0.9, 4.], [0.2, 0.6]])
}
"""
return ivy.current_backend(x).sort(
x, axis=axis, descending=descending, stable=stable, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def msort(
a: Union[ivy.Array, ivy.NativeArray, list, tuple],
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return a copy of an array sorted along the first axis.
Parameters
----------
a
array-like input.
out
optional output array, for writing the result to.
Returns
-------
ret
sorted array of the same type and shape as a
Examples
--------
>>> a = ivy.asarray([[8, 9, 6],[6, 2, 6]])
>>> ivy.msort(a)
ivy.array(
[[6, 2, 6],
[8, 9, 6]]
)
"""
return ivy.current_backend(a).msort(a, out=out)
# Extra #
# ------#
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def searchsorted(
x: Union[ivy.Array, ivy.NativeArray],
v: Union[ivy.Array, ivy.NativeArray],
/,
*,
side: Literal["left", "right"] = "left",
sorter: Optional[Union[ivy.Array, ivy.NativeArray, List[int]]] = None,
ret_dtype: Union[ivy.Dtype, ivy.NativeDtype] = ivy.int64,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return the indices of the inserted elements in a sorted array.
Parameters
----------
x
Input array. If `sorter` is None, then it must be sorted in ascending order,
otherwise `sorter` must be an array of indices that sort it.
v
specific elements to insert in array x1
side
The specific elements' index is at the 'left' side or
'right' side in the sorted array x1. If the side is 'left', the
index of the first suitable location located is given. If
'right', return the last such index.
ret_dtype
the data type for the return value, Default: ivy.int64,
only integer data types is allowed.
sorter
optional array of integer indices that sort array x into ascending order,
typically the result of argsort.
out
optional output array, for writing the result to.
Returns
-------
ret
An array of insertion points.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3])
>>> v = ivy.array([2])
>>> y = ivy.searchsorted(x, v)
>>> print(y)
ivy.array([1])
>>> x = ivy.array([0, 1, 2, 3])
>>> v = ivy.array([3])
>>> y = ivy.searchsorted(x, v, side='right')
>>> print(y)
ivy.array([4])
>>> x = ivy.array([0, 1, 2, 3, 4, 5])
>>> v = ivy.array([[3, 1], [10, 3], [-2, -1]])
>>> y = ivy.searchsorted(x, v)
>>> print(y)
ivy.array([[3, 1],
[6, 3],
[0, 0]])
"""
return ivy.current_backend(x, v).searchsorted(
x,
v,
side=side,
sorter=sorter,
out=out,
ret_dtype=ret_dtype,
)
| ivy/ivy/functional/ivy/sorting.py/0 | {
"file_path": "ivy/ivy/functional/ivy/sorting.py",
"repo_id": "ivy",
"token_count": 4514
} | 48 |
import ivy
# Helpers #
# ------- #
def _broadcast_inputs(x1, x2):
x1_, x2_ = x1, x2
iterables = (list, tuple, ivy.Shape)
if not isinstance(x1_, iterables):
x1_, x2_ = x2, x1
if not isinstance(x1_, iterables):
return [x1], [x2]
if not isinstance(x2_, iterables):
x1 = [x1] * len(x2)
return x1, x2
# General with Custom Message #
# --------------------------- #
def check_less(x1, x2, allow_equal=False, message="", as_array=True):
def comp_fn(x1, x2):
return ivy.any(x1 > x2), ivy.any(x1 >= x2)
if not as_array:
def iter_comp_fn(x1_, x2_):
return any(x1 > x2 for x1, x2 in zip(x1_, x2_)), any(
x1 >= x2 for x1, x2 in zip(x1_, x2_)
)
def comp_fn(x1, x2): # noqa F811
return iter_comp_fn(*_broadcast_inputs(x1, x2))
gt, gt_eq = comp_fn(x1, x2)
# less_equal
if allow_equal and gt:
raise ivy.utils.exceptions.IvyException(
f"{x1} must be lesser than or equal to {x2}" if message == "" else message
)
elif not allow_equal and gt_eq:
raise ivy.utils.exceptions.IvyException(
f"{x1} must be lesser than {x2}" if message == "" else message
)
def check_greater(x1, x2, allow_equal=False, message="", as_array=True):
def comp_fn(x1, x2):
return ivy.any(x1 < x2), ivy.any(x1 <= x2)
if not as_array:
def iter_comp_fn(x1_, x2_):
return any(x1 < x2 for x1, x2 in zip(x1_, x2_)), any(
x1 <= x2 for x1, x2 in zip(x1_, x2_)
)
def comp_fn(x1, x2): # noqa F811
return iter_comp_fn(*_broadcast_inputs(x1, x2))
lt, lt_eq = comp_fn(x1, x2)
# greater_equal
if allow_equal and lt:
raise ivy.utils.exceptions.IvyException(
f"{x1} must be greater than or equal to {x2}" if message == "" else message
)
elif not allow_equal and lt_eq:
raise ivy.utils.exceptions.IvyException(
f"{x1} must be greater than {x2}" if message == "" else message
)
def check_equal(x1, x2, inverse=False, message="", as_array=True):
# not_equal
def eq_fn(x1, x2):
return x1 == x2 if inverse else x1 != x2
def comp_fn(x1, x2):
return ivy.any(eq_fn(x1, x2))
if not as_array:
def iter_comp_fn(x1_, x2_):
return any(eq_fn(x1, x2) for x1, x2 in zip(x1_, x2_))
def comp_fn(x1, x2): # noqa F811
return iter_comp_fn(*_broadcast_inputs(x1, x2))
eq = comp_fn(x1, x2)
if inverse and eq:
raise ivy.utils.exceptions.IvyException(
f"{x1} must not be equal to {x2}" if message == "" else message
)
elif not inverse and eq:
raise ivy.utils.exceptions.IvyException(
f"{x1} must be equal to {x2}" if message == "" else message
)
def check_isinstance(x, allowed_types, message=""):
if not isinstance(x, allowed_types):
raise ivy.utils.exceptions.IvyException(
f"type of x: {type(x)} must be one of the allowed types: {allowed_types}"
if message == ""
else message
)
def check_exists(x, inverse=False, message=""):
# not_exists
if inverse and ivy.exists(x):
raise ivy.utils.exceptions.IvyException(
"arg must be None" if message == "" else message
)
# exists
elif not inverse and not ivy.exists(x):
raise ivy.utils.exceptions.IvyException(
"arg must not be None" if message == "" else message
)
def check_elem_in_list(elem, list, inverse=False, message=""):
if inverse and elem in list:
raise ivy.utils.exceptions.IvyException(
message if message != "" else f"{elem} must not be one of {list}"
)
elif not inverse and elem not in list:
raise ivy.utils.exceptions.IvyException(
message if message != "" else f"{elem} must be one of {list}"
)
def check_true(expression, message="expression must be True"):
if not expression:
raise ivy.utils.exceptions.IvyException(message)
def check_false(expression, message="expression must be False"):
if expression:
raise ivy.utils.exceptions.IvyException(message)
def check_all(results, message="one of the args is False", as_array=True):
if (as_array and not ivy.all(results)) or (not as_array and not all(results)):
raise ivy.utils.exceptions.IvyException(message)
def check_any(results, message="all of the args are False", as_array=True):
if (as_array and not ivy.any(results)) or (not as_array and not any(results)):
raise ivy.utils.exceptions.IvyException(message)
def check_all_or_any_fn(
*args,
fn,
type="all",
limit=(0,),
message="args must exist according to type and limit given",
as_array=True,
):
if type == "all":
check_all([fn(arg) for arg in args], message, as_array=as_array)
elif type == "any":
count = 0
for arg in args:
count = count + 1 if fn(arg) else count
if count not in limit:
raise ivy.utils.exceptions.IvyException(message)
else:
raise ivy.utils.exceptions.IvyException("type must be all or any")
def check_shape(x1, x2, message=""):
message = (
message
if message != ""
else (
f"{x1} and {x2} must have the same shape ({ivy.shape(x1)} vs"
f" {ivy.shape(x2)})"
)
)
if ivy.shape(x1)[:] != ivy.shape(x2)[:]:
raise ivy.utils.exceptions.IvyException(message)
def check_same_dtype(x1, x2, message=""):
if ivy.dtype(x1) != ivy.dtype(x2):
message = (
message
if message != ""
else (
f"{x1} and {x2} must have the same dtype ({ivy.dtype(x1)} vs"
f" {ivy.dtype(x2)})"
)
)
raise ivy.utils.exceptions.IvyException(message)
# Creation #
# -------- #
def check_unsorted_segment_valid_params(data, segment_ids, num_segments):
if not isinstance(num_segments, int):
raise TypeError("num_segments must be of integer type")
valid_dtypes = [
ivy.int32,
ivy.int64,
]
if ivy.backend == "torch":
import torch
valid_dtypes = [
torch.int32,
torch.int64,
]
if isinstance(num_segments, torch.Tensor):
num_segments = num_segments.item()
elif ivy.backend == "paddle":
import paddle
valid_dtypes = [
paddle.int32,
paddle.int64,
]
if isinstance(num_segments, paddle.Tensor):
num_segments = num_segments.item()
if segment_ids.dtype not in valid_dtypes:
raise TypeError("segment_ids must have an integer dtype")
if data.shape[0] != segment_ids.shape[0]:
raise ValueError("The length of segment_ids should be equal to data.shape[0].")
if ivy.max(segment_ids) >= num_segments:
error_message = (
f"segment_ids[{ivy.argmax(segment_ids)}] = "
f"{ivy.max(segment_ids)} is out of range [0, {num_segments})"
)
raise ValueError(error_message)
if num_segments <= 0:
raise ValueError("num_segments must be positive")
# General #
# ------- #
def check_gather_input_valid(params, indices, axis, batch_dims):
if batch_dims > axis:
raise ivy.utils.exceptions.IvyException(
f"batch_dims ({batch_dims}) must be less than or equal to axis ({axis})."
)
if params.shape[0:batch_dims] != indices.shape[0:batch_dims]:
raise ivy.utils.exceptions.IvyException(
"batch dimensions must match in `params` and `indices`; saw"
f" {params.shape[0:batch_dims]} vs. {indices.shape[0:batch_dims]}"
)
def check_gather_nd_input_valid(params, indices, batch_dims):
if batch_dims >= len(params.shape):
raise ivy.utils.exceptions.IvyException(
f"batch_dims = {batch_dims} must be less than rank(`params`) ="
f" {len(params.shape)}."
)
if batch_dims >= len(indices.shape):
raise ivy.utils.exceptions.IvyException(
f"batch_dims = {batch_dims} must be less than rank(`indices`) ="
f" {len(indices.shape)}."
)
if params.shape[0:batch_dims] != indices.shape[0:batch_dims]:
raise ivy.utils.exceptions.IvyException(
"batch dimensions must match in `params` and `indices`; saw"
f" {params.shape[0:batch_dims]} vs. {indices.shape[0:batch_dims]}"
)
if indices.shape[-1] > (len(params.shape[batch_dims:])):
raise ivy.utils.exceptions.IvyException(
"index innermost dimension length must be <= rank(`params[batch_dims:]`);"
f" saw: {indices.shape[-1]} vs. {len(params.shape[batch_dims:])} ."
)
def check_one_way_broadcastable(x1, x2):
if len(x1) > len(x2):
return False
for a, b in zip(x1[::-1], x2[::-1]):
if a in (1, b):
pass
else:
return False
return True
def check_inplace_sizes_valid(var, data):
if not check_one_way_broadcastable(data.shape, var.shape):
raise ivy.utils.exceptions.IvyException(
f"Could not output values of shape {var.shape} into array with shape"
f" {data.shape}."
)
def check_shapes_broadcastable(var, data):
if not check_one_way_broadcastable(var, data):
raise ivy.utils.exceptions.IvyBroadcastShapeError(
f"Could not broadcast shape {data} to shape {var}."
)
def check_dimensions(x):
if len(x.shape) <= 1:
raise ivy.utils.exceptions.IvyException(
f"input must have greater than one dimension; {x} has"
f" {len(x.shape)} dimensions"
)
def check_kernel_padding_size(kernel_size, padding_size):
for i in range(len(kernel_size)):
if (
padding_size[i][0] > kernel_size[i] // 2
or padding_size[i][1] > kernel_size[i] // 2
):
raise ValueError(
"Padding size should be less than or equal to half of the kernel size."
f" Got kernel_size: {kernel_size} and padding_size: {padding_size}"
)
def check_dev_correct_formatting(device):
assert device[0:3] in ["gpu", "tpu", "cpu"]
if device != "cpu":
assert device[3] == ":"
assert device[4:].isnumeric()
# Jax Specific #
# ------- #
def _check_jax_x64_flag(dtype):
if (
ivy.backend == "jax"
and not ivy.functional.backends.jax.jax.config.jax_enable_x64
):
ivy.utils.assertions.check_elem_in_list(
dtype,
["float64", "int64", "uint64", "complex128"],
inverse=True,
message=(
f"{dtype} output not supported while jax_enable_x64"
" is set to False, please import jax and enable the flag using "
"jax.config.update('jax_enable_x64', True)"
),
)
| ivy/ivy/utils/assertions.py/0 | {
"file_path": "ivy/ivy/utils/assertions.py",
"repo_id": "ivy",
"token_count": 5268
} | 49 |
import os
import logging
import json
from urllib import request
import importlib
import ivy
folder_path = os.sep.join(__file__.split(os.sep)[:-3])
wrappers_path = os.path.join(folder_path, "wrappers.json")
if os.path.exists(wrappers_path):
wrappers = json.loads(open(wrappers_path).read())
wrapers_dir = os.path.join(folder_path, "ivy/wrappers")
def download_cython_wrapper(func_name: str):
"""Get the wrapper for the given function name."""
if func_name + ".so" not in wrappers["ivy"]["functional"]:
logging.warning(f"Wrapper for {func_name} not found.")
return False
try:
response = request.urlopen(
"https://raw.githubusercontent.com/unifyai"
+ "/binaries/cython_wrappers/wrappers/"
+ func_name
+ ".so"
)
os.makedirs(wrapers_dir, exist_ok=True)
with open(os.path.join(wrapers_dir, func_name + ".so"), "wb") as f:
f.write(response.read())
print("Downloaded wrapper for " + func_name)
return True
except request.HTTPError:
logging.warning(f"Unable to download wrapper for {func_name}.")
return False
def wrapper_exists(func_name: str):
"""Check if the wrapper for the given function name exists."""
return func_name + ".so" in wrappers["ivy"]["functional"]
def load_one_wrapper(func_name: str):
"""Load the wrapper for the given function name."""
module_name = func_name
dir_path = os.path.dirname(os.path.realpath(__file__))
# check if file exists
if os.path.isfile(os.path.join(dir_path, module_name + ".so")):
ivy.wrappers.__dict__[module_name] = importlib.import_module(module_name)
ivy.wrappers.__dict__[module_name + "_wrapper"] = getattr(
ivy.wrappers.__dict__[module_name], module_name + "_wrapper"
)
ivy.wrappers.__all__.append(module_name + "_wrapper")
return True
else:
return False
| ivy/ivy/wrappers/utils.py/0 | {
"file_path": "ivy/ivy/wrappers/utils.py",
"repo_id": "ivy",
"token_count": 811
} | 50 |
# global
import numpy as np
import hypothesis.extra.numpy as nph
from hypothesis import strategies as st
from hypothesis.internal.floats import float_of
from functools import reduce as _reduce
from operator import mul
import sys
import string
import ml_dtypes # noqa
# local
import ivy_tests.test_ivy.helpers.globals as test_globals
from ..pipeline_helper import WithBackendContext
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers.hypothesis_helpers.dtype_helpers import get_dtypes
from . import general_helpers as gh
from . import dtype_helpers, number_helpers
from ..globals import mod_backend
@st.composite
def array_bools(
draw, *, size=st.shared(number_helpers.ints(min_value=1, max_value=4), key="size")
):
"""Draws a list of booleans with a given size.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
size
size of the list.
Returns
-------
ret
A strategy that draws a list.
Examples
--------
>>> array_bools(size=5)
[False, True, False, False, False]
>>> array_bools(size=5)
[False, False, False, False, False]
>>> array_bools(size=5)
[True, False, False, False, False]
>>> array_bools(size=1)
[True]
>>> array_bools(size=1)
[False]
>>> array_bools(size=1)
[True]
>>> array_bools()
[False, False, False, False]
>>> array_bools()
[True, True, True, False]
>>> array_bools()
[True]
"""
if not isinstance(size, int):
size = draw(size)
return draw(st.lists(st.booleans(), min_size=size, max_size=size))
def list_of_size(*, x, size):
"""Return a list of the given length with elements drawn randomly from x.
Parameters
----------
x
a list to draw elements from.
size
length of the list.
Returns
-------
ret
A strategy that draws a list.
Examples
--------
>>> list_of_size(
... x=st.sampled_from([-1, 5, 9]),
... size=4,
... )
[-1, 5, -1, -1]
>>> list_of_size(
... x=st.sampled_from([-1, 5, 9]),
... size=4,
... )
[9, -1, -1, -1]
>>> list_of_size(
... x=st.sampled_from([-1, 5, 9]),
... size=4,
... )
[9, 9, -1, 9]
>>> list_of_size(
... x=st.integers(min_value=0, max_value=4),
... size=10,
... )
[3, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> list_of_size(
... x=st.integers(min_value=0, max_value=4),
... size=10,
... )
[3, 3, 2, 4, 1, 0, 4, 2, 1, 2]
>>> list_of_size(
... x=st.integers(min_value=0, max_value=4),
... size=10,
... )
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> list_of_size(
... x=st.booleans(),
... size=3,
... )
[False, False, False]
>>> list_of_size(
... x=st.booleans(),
... size=3,
... )
[True, True, False]
>>> list_of_size(
... x=st.booleans(),
... size=3,
... )
[False, True, False]
"""
return lists(x=x, min_size=size, max_size=size)
@st.composite
def lists(
draw,
*,
x,
min_size=None,
max_size=None,
size_bounds=None,
):
"""Draws a list with a random bounded size from the data-set x.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
x
data-set of elements.
min_size
minimum size of the list.
max_size
max size of the list.
size_bounds
if min_size or max_size is None, draw them randomly from the range
[size_bounds[0], size_bounds[1]].
Returns
-------
ret
A strategy that draws a list.
Examples
--------
>>> lists(
... x=st.sampled_from([-1, 5, 9]),
... min_size=4,
... max_size=5,
... )
[5, 5, 5, 9, 9]
>>> lists(
... x=st.sampled_from([-1, 5, 9]),
... min_size=4,
... max_size=5,
... )
[5, 9, -1, -1]
>>> lists(
... x=st.sampled_from([-1, 5, 9]),
... min_size=4,
... max_size=5,
... )
[5, 9, 5, 9]
>>> lists(
... x=st.integers(min_value=0, max_value=4),
... size_bounds=(9, 10),
... )
[0, 2, 4, 3, 3, 3, 3, 2, 1, 4]
>>> lists(
... x=st.integers(min_value=0, max_value=4),
... size_bounds=(9, 10),
... )
[1, 0, 1, 2, 1, 4, 1, 3, 1]
>>> lists(
... x=st.integers(min_value=0, max_value=4),
... size_bounds=(9, 10),
... )
[1, 0, 1, 2, 1, 4, 1, 3, 1]
>>> lists(
... x=st.integers(min_value=0, max_value=4),
... size_bounds=[9, 10],
... )
[1, 3, 0, 2, 0, 0, 1, 4, 2, 3]
>>> lists(
... x=st.integers(min_value=0, max_value=4),
... size_bounds=[9, 10],
... )
[0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> lists(
... x=st.integers(min_value=0, max_value=4),
... size_bounds=[9, 10],
... )
[1, 2, 4, 1, 1, 1, 4, 3, 2]
>>> lists(
... x=st.floats(
... min_value=1,
... max_value=3,
... exclude_max=True,
... ),
... min_size=5,
... max_size=5,
... )
[1.1, 1.0, 1.0, 1.0, 1.0]
>>> lists(
... x=st.floats(
... min_value=1,
... max_value=3,
... exclude_max=True,
... ),
... min_size=5,
... max_size=5,
... )
[2.00001, 2.00001, 1.0, 2.999999999999999, 1.9394938006792373]
>>> lists(
... x=st.floats(
... min_value=1,
... max_value=3,
... exclude_max=True,
... ),
... min_size=5,
... max_size=5,
... )
[1.0, 2.00001, 1.0, 2.999999999999999, 1.9394938006792373]
"""
if not isinstance(min_size, int) or not isinstance(max_size, int):
integers = (
number_helpers.ints(min_value=size_bounds[0], max_value=size_bounds[1])
if size_bounds
else number_helpers.ints()
)
if not isinstance(min_size, int):
min_size = draw(st.shared(integers, key=min_size))
if not isinstance(max_size, int):
max_size = draw(st.shared(integers, key=max_size))
return draw(st.lists(x, min_size=min_size, max_size=max_size))
@st.composite
def dtype_and_values(
draw,
*,
available_dtypes=get_dtypes("valid"),
num_arrays=1,
abs_smallest_val=None,
min_value=None,
max_value=None,
large_abs_safety_factor=1.1,
small_abs_safety_factor=1.1,
safety_factor_scale="linear",
allow_inf=False,
allow_nan=False,
exclude_min=False,
exclude_max=False,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
shape=None,
shared_dtype=False,
ret_shape=False,
dtype=None,
array_api_dtypes=False,
shape_key="shape",
):
"""Draws a list of arrays with elements from the given corresponding data
types.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
available_dtypes
if dtype is None, data types are drawn from this list randomly.
num_arrays
Number of arrays to be drawn.
abs_smallest_val
sets the absolute smallest value to be generated for float data types,
this has no effect on integer data types. If none, the default data type
absolute smallest value is used.
min_value
minimum value of elements in each array.
max_value
maximum value of elements in each array.
large_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
small_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
this has no effect on integer data types.
when a "linear" safety factor scaler is used, a data type with minimum
representable number of 0.0001 and a safety factor of 2 transforms the
minimum to 0.0002, a safety factor of 3 transforms the minimum to 0.0003 etc.
when a "log" safety factor scaler is used, a data type with minimum
representable number of 0.5 * 2^-16 and a safety factor of 2 transforms the
minimum to 0.5 * 2^-8, a safety factor of 3 transforms the minimum to 0.5 * 2^-4
safety_factor_scale
The operation to use for the safety factor scaling. Can be "linear" or "log".
Default value = "linear".
allow_inf
if True, allow inf in the arrays.
allow_nan
if True, allow Nans in the arrays.
exclude_min
if True, exclude the minimum limit.
exclude_max
if True, exclude the maximum limit.
min_num_dims
minimum size of the shape tuple.
max_num_dims
maximum size of the shape tuple.
min_dim_size
minimum value of each integer in the shape tuple.
max_dim_size
maximum value of each integer in the shape tuple.
shape
shape of the arrays in the list.
shared_dtype
if True, if dtype is None, a single shared dtype is drawn for all arrays.
ret_shape
if True, the shape of the arrays is also returned.
dtype
A list of data types for the given arrays.
array_api_dtypes
if True, use data types that can be promoted with the array_api_promotion
table.
Returns
-------
ret
A strategy that draws a tuple of a list of dtypes and a list
of their respective arrays.
Examples
--------
>>> dtype_and_values(
... num_arrays=3,
... )
(['uint16', 'float16', 'uint16'], [array([37915, 6322, 26765, 12413,
26986, 34665], dtype=uint16), array([-5.000e-01, -5.000e-01,
-2.000e+00, -6.711e-05, -1.100e+00, -5.955e+04], dtype=float16),
array([40817, 56193, 29200, 0, 5851, 9746], dtype=uint16)])
>>> dtype_and_values(
... num_arrays=3,
... )
(['bool', 'uint32', 'bool'], [array(False), array(0, dtype=uint32),
array(False)])
>>> dtype_and_values(
... num_arrays=3,
... )
(['int8', 'int8', 'int8'], [array(0, dtype=int8), array(0, dtype=int8),
array(0, dtype=int8)])
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... min_value=-10,
... max_value=10,
... num_arrays=2,
... shared_dtype=True,
... ),
(['float32', 'float32'], [array([1.1, 1.5], dtype=float32),
array([-5.9604645e-08, 5.9604645e-08], dtype=float32)])
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... min_value=-10,
... max_value=10,
... num_arrays=2,
... shared_dtype=True,
... ),
(['int32', 'int32'], [array(-5, dtype=int32), array(-1, dtype=int32)])
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... min_value=-10,
... max_value=10,
... num_arrays=2,
... shared_dtype=True,
... ),
(['uint64', 'uint64'], [array([0], dtype=uint64), array([0],
dtype=uint64)])
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=2,
... ret_shape=True
... )
(['int8', 'int32'], [array([27], dtype=int8), array([192],
dtype=int32)], (1,))
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=2,
... ret_shape=True
... )
(['int32', 'int16'], [array(0, dtype=int32), array(0,
dtype=int16)], ())
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=2,
... ret_shape=True
... )
(['int32', 'int16'], [array([[-103, 12, -41795, 1170789994,
44251, 44209, 433075925]], dtype=int32), array([[24791,
-24691, 24892, 16711, 7696, 972, 15357]], dtype=int16)],
(1, 7))
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=1,
... ret_shape=True,
... )
(['uint8'], [array([0], dtype=uint8)], (1,))
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=1,
... ret_shape=True,
... )
(['float32'], [array(-1., dtype=float32)], ())
>>> dtype_and_values(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=1,
... ret_shape=True,
... )
(['int64'], [array(72057594037927936)], ())
"""
if isinstance(min_dim_size, st._internal.SearchStrategy):
min_dim_size = draw(min_dim_size)
if isinstance(max_dim_size, st._internal.SearchStrategy):
max_dim_size = draw(max_dim_size)
if isinstance(available_dtypes, st._internal.SearchStrategy) and dtype is None:
available_dtypes = draw(available_dtypes)
if not isinstance(num_arrays, int):
num_arrays = draw(num_arrays)
if dtype is None:
dtype = draw(
dtype_helpers.array_dtypes(
num_arrays=num_arrays,
available_dtypes=available_dtypes,
shared_dtype=shared_dtype,
array_api_dtypes=array_api_dtypes,
)
)
if shape is not None:
if not isinstance(shape, (tuple, list)):
shape = draw(shape)
else:
shape = draw(
st.shared(
gh.get_shape(
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
),
key=shape_key,
)
)
values = []
for i in range(num_arrays):
values.append(
draw(
array_values(
dtype=dtype[i],
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
allow_nan=allow_nan,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
if ret_shape:
return dtype, values, shape
return dtype, values
@st.composite
def dtype_values_axis(
draw,
*,
available_dtypes,
num_arrays=1,
abs_smallest_val=None,
min_value=None,
max_value=None,
large_abs_safety_factor=1.1,
small_abs_safety_factor=1.1,
safety_factor_scale="linear",
allow_inf=False,
allow_nan=False,
exclude_min=False,
exclude_max=False,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
shape=None,
shared_dtype=False,
min_axis=None,
max_axis=None,
valid_axis=False,
allow_neg_axes=True,
min_axes_size=1,
max_axes_size=None,
force_int_axis=False,
force_tuple_axis=False,
ret_shape=False,
):
"""Draws a list of arrays with elements from the given data type, and a
random axis of the arrays.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
available_dtypes
if dtype is None, data type is drawn from this list randomly.
num_arrays
Number of arrays to be drawn.
abs_smallest_val
sets the absolute smallest value to be generated for float data types,
this has no effect on integer data types. If none, the default data type
absolute smallest value is used.
min_value
minimum value of elements in the array.
max_value
maximum value of elements in the array.
large_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
small_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
this has no effect on integer data types.
when a "linear" safety factor scaler is used, a data type with minimum
representable number of 0.0001 and a safety factor of 2 transforms the
minimum to 0.0002, a safety factor of 3 transforms the minimum to 0.0003 etc.
when a "log" safety factor scaler is used, a data type with minimum
representable number of 0.5 * 2^-16 and a safety factor of 2 transforms the
minimum to 0.5 * 2^-8, a safety factor of 3 transforms the minimum to 0.5 * 2^-4
safety_factor_scale
The operation to use when calculating the maximum value of the list. Can be
"linear" or "log". Default value = "linear".
allow_inf
if True, allow inf in the array.
allow_nan
if True, allow Nans in the arrays.
exclude_min
if True, exclude the minimum limit.
exclude_max
if True, exclude the maximum limit.
min_num_dims
minimum size of the shape tuple.
max_num_dims
maximum size of the shape tuple.
min_dim_size
minimum value of each integer in the shape tuple.
max_dim_size
maximum value of each integer in the shape tuple.
valid_axis
if True, a valid axis will be drawn from the array dimensions.
allow_neg_axes
if True, returned axes may include negative axes.
min_axes_size
minimum size of the axis tuple.
max_axes_size
maximum size of the axis tuple.
force_tuple_axis
if true, all axis will be returned as a tuple.
force_int_axis
if true, the returned axis will be an int.
shape
shape of the array. if None, a random shape is drawn.
shared_dtype
if True, if dtype is None, a single shared dtype is drawn for all arrays.
min_axis
if shape is None, axis is drawn from the range [min_axis, max_axis].
max_axis
if shape is None, axis is drawn from the range [min_axis, max_axis].
ret_shape
if True, the shape of the arrays is also returned.
Returns
-------
ret
A strategy that draws a tuple of a list of dtypes,
a list of arrays, and an axis.
Examples
--------
>>> dtype_values_axis()
(['int16'], [array(29788, dtype=int16)], 0)
>>> dtype_values_axis()
(['complex128'], [array(1.62222885e+156-2.68281172e-257j)], -1)
>>> dtype_values_axis()
(['float64'], [array(-1.40129846e-45)], 3)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=2,
... )
(['int8', 'int16'], [array([[0]], dtype=int8), array([[1]], dtype=int16)], 0)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=2,
... )
(['uint16', 'uint16'], [array(0, dtype=uint16), array(0, dtype=uint16)], 0)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=2,
... )
(['float64', 'int16'], [array(-2.44758124e-308), array(0, dtype=int16)], 0)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("float"),
... min_num_dims=2,
... max_num_dims=3,
... min_dim_size=2,
... max_dim_size=5,
... min_axis=-2,
... max_axis=1,
... )
(['float64'], [array([[1.90000000e+000, 1.63426649e+308],
[-1.50000000e+000, -1.91931887e+234]])], -1)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("float"),
... min_num_dims=2,
... max_num_dims=3,
... min_dim_size=2,
... max_dim_size=5,
... min_axis=-2,
... max_axis=1,
... )
(['bfloat16'], [array([[-1.29488e-38, -1.29488e-38],
[-1.29488e-38, -1.29488e-38]], dtype=bfloat16)], 0)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("float"),
... min_num_dims=2,
... max_num_dims=3,
... min_dim_size=2,
... max_dim_size=5,
... min_axis=-2,
... max_axis=1,
... )
(['float64'], [array([[-2.44758124e-308, -2.44758124e-308],
[-2.44758124e-308, -2.44758124e-308]])], 0)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=1,
... allow_inf=True,
... allow_nan=True,
... )
(['float64'], [array([inf, -5.14361019e+16, 5.96046448e-08, 1.50000000e+00])], -51)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=1,
... allow_inf=True,
... allow_nan=True,
... )
(['int16'], [array(12445, dtype=int16)], 171)
>>> dtype_values_axis(
... available_dtypes=get_dtypes("numeric"),
... num_arrays=1,
... allow_inf=True,
... allow_nan=True,
... )
(['uint32'], [array([0], dtype=uint32)], 0)
"""
results = draw(
dtype_and_values(
available_dtypes=available_dtypes,
num_arrays=num_arrays,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
allow_inf=allow_inf,
allow_nan=allow_nan,
exclude_min=exclude_min,
exclude_max=exclude_max,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
shape=shape,
shared_dtype=shared_dtype,
ret_shape=True,
)
)
dtype, values, arr_shape = results
if valid_axis or shape:
if values[0].ndim == 0:
axis = None
else:
axis = draw(
gh.get_axis(
shape=arr_shape,
min_size=min_axes_size,
max_size=max_axes_size,
allow_neg=allow_neg_axes,
force_int=force_int_axis,
force_tuple=force_tuple_axis,
)
)
else:
axis = draw(number_helpers.ints(min_value=min_axis, max_value=max_axis))
if ret_shape:
return dtype, values, axis, arr_shape
return dtype, values, axis
@st.composite
def array_indices_axis(
draw,
*,
array_dtypes,
indices_dtypes=get_dtypes("valid"),
abs_smallest_val=None,
min_value=None,
max_value=None,
large_abs_safety_factor=1.1,
small_abs_safety_factor=1.1,
safety_factor_scale="linear",
disable_random_axis=False,
axis_zero=False,
allow_inf=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
first_dimension_only=False,
indices_same_dims=False,
valid_bounds=True,
):
"""Generate two arrays x & indices, the values in the indices array are
indices of the array x. Draws an integers randomly from the minimum and
maximum number of positional arguments a given function can take.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
array_dtypes
list of data type to draw the array dtype from.
indices_dtypes
list of data type to draw the indices dtype from.
abs_smallest_val
sets the absolute smallest value to be generated for float data types,
this has no effect on integer data types. If none, the default data type
absolute smallest value is used.
min_value
minimum value of elements in the array.
max_value
maximum value of elements in the array.
large_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
small_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
this has no effect on integer data types.
when a "linear" safety factor scaler is used, a data type with minimum
representable number of 0.0001 and a safety factor of 2 transforms the
minimum to 0.0002, a safety factor of 3 transforms the minimum to 0.0003 etc.
when a "log" safety factor scaler is used, a data type with minimum
representable number of 0.5 * 2^-16 and a safety factor of 2 transforms the
minimum to 0.5 * 2^-8, a safety factor of 3 transforms the minimum to 0.5 * 2^-4
safety_factor_scale
The operation to use for the safety factor scaling. Can be "linear" or "log".
Default value = "linear".
disable_random_axis
axis is randomly generated with hypothesis if False. If True, axis is set
to 0 if axis_zero is True, -1 otherwise.
axis_zero
If True, axis is set to zero if disable_random_axis is True.
allow_inf
inf values are allowed to be generated in the values array when True.
min_num_dims
The minimum number of dimensions the arrays can have.
max_num_dims
The maximum number of dimensions the arrays can have.
min_dim_size
The minimum size of the dimensions of the arrays.
max_dim_size
The maximum size of the dimensions of the arrays.
indices_same_dims
Set x and indices dimensions to be the same
valid_bounds
If False, the strategy may produce out-of-bounds indices.
Returns
-------
ret
A strategy that can be used in the @given hypothesis
decorator which generates arrays of values and indices.
Examples
--------
@given(
array_indices_axis=array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=helpers.get_dtypes("integer"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10
)
)
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... disable_random_axis=True,
... axis_zero=True,
... )
(['int64', 'int64'], array([-65536]), array([0]))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... disable_random_axis=True,
... axis_zero=True,
... )
(['bool', 'int64'], array([False, False, False, True,
False, False, False, False]), array([0, 0, 2, 4,
0, 0, 0, 1]))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... disable_random_axis=True,
... axis_zero=True,
... )
(['int64', 'int64'], array([0]), array([0]))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=get_dtypes("integer"),
... disable_random_axis=True,
... first_dimension_only=True,
... )
(['float64', 'uint64'], array([-2.44758124e-308]),
array([0], dtype=uint64))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=get_dtypes("integer"),
... disable_random_axis=True,
... first_dimension_only=True,
... )
(['bool', 'uint64'], array([False]), array([0], dtype=uint64))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=get_dtypes("integer"),
... disable_random_axis=True,
... first_dimension_only=True,
... )
(['bool', 'int8'], array([False]), array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... )
(['float16', 'int64'], array([-256.], dtype=float16),
array([0]), 0, 0)
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... )
(['uint8', 'int64'], array([1], dtype=uint8),
array([0]), -1, 0)
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... )
(['uint64', 'int64'], array([0], dtype=uint64),
array([0]), 0, 0)
"""
x_dtype, x, x_shape = draw(
dtype_and_values(
available_dtypes=array_dtypes,
allow_inf=allow_inf,
ret_shape=True,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
x_dtype = x_dtype[0]
x = x[0]
if disable_random_axis:
if axis_zero:
axis = 0
else:
axis = -1
batch_dims = 0
batch_shape = x_shape[0:0]
else:
axis = draw(
number_helpers.ints(
min_value=-1 * len(x_shape),
max_value=len(x_shape) - 1,
)
)
batch_dims = draw(
number_helpers.ints(
min_value=0,
max_value=max(0, axis),
)
)
batch_shape = x_shape[0:batch_dims]
if indices_same_dims:
indices_shape = x_shape
else:
shape_var = draw(
gh.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims - batch_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
indices_shape = batch_shape + shape_var
if first_dimension_only:
max_axis = max(x_shape[0] - 1, 0)
else:
max_axis = max(x_shape[axis] - 1, 0)
if not valid_bounds:
max_axis = max_axis + 10
indices_dtype, indices = draw(
dtype_and_values(
available_dtypes=indices_dtypes,
allow_inf=False,
min_value=0,
max_value=max_axis,
shape=indices_shape,
)
)
indices_dtype = indices_dtype[0]
indices = indices[0]
if disable_random_axis:
return [x_dtype, indices_dtype], x, indices
return [x_dtype, indices_dtype], x, indices, axis, batch_dims
@st.composite
def array_indices_put_along_axis(
draw,
*,
array_dtypes,
indices_dtypes=get_dtypes("valid"),
disable_random_axis=False,
axis_zero=False,
allow_inf=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
first_dimension_only=False,
indices_same_dims=False,
valid_bounds=True,
values=None,
values_dtypes=get_dtypes("valid"),
):
"""Generate two arrays x & indices, the values in the indices array are
indices of the array x. Draws an integers randomly from the minimum and
maximum number of positional arguments a given function can take.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
array_dtypes
list of data type to draw the array dtype from.
indices_dtypes
list of data type to draw the indices dtype from.
disable_random_axis
axis is randomly generated with hypothesis if False. If True, axis is set
to 0 if axis_zero is True, -1 otherwise.
axis_zero
If True, axis is set to zero if disable_random_axis is True.
allow_inf
inf values are allowed to be generated in the values array when True.
min_num_dims
The minimum number of dimensions the arrays can have.
max_num_dims
The maximum number of dimensions the arrays can have.
min_dim_size
The minimum size of the dimensions of the arrays.
max_dim_size
The maximum size of the dimensions of the arrays.
indices_same_dims
Set x and indices dimensions to be the same
valid_bounds
If False, the strategy may produce out-of-bounds indices.
values
Custom values array to use instead of randomly generated values.
values_dtypes : Union[None, List[str]]
A list of dtypes for the values parameter. The function will use the dtypes
returned by 'get_dtypes("valid")'.
Returns
-------
ret
A strategy that can be used in the @given hypothesis
decorator which generates arrays of values and indices.
Examples
--------
@given(
array_indices_axis=array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=helpers.get_dtypes("integer"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10
)
)
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... disable_random_axis=True,
... axis_zero=True,
... )
(['int64', 'int64'], array([-65536]), array([0]))
(['bool', 'int64'], array([False, False, False, True,
False, False, False, False]), array([0, 0, 2, 4,
0, 0, 0, 1]))
(['int64', 'int64'], array([0]), array([0]))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=get_dtypes("integer"),
... disable_random_axis=True,
... first_dimension_only=True,
... )
(['float64', 'uint64'], array([-2.44758124e-308]),
array([0], dtype=uint64))
(['bool', 'uint64'], array([False]), array([0], dtype=uint64))
(['bool', 'int8'], array([False]), array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8))
>>> array_indices_axis(
... array_dtypes=get_dtypes("valid"),
... indices_dtypes=["int64"],
... max_num_dims=1,
... indices_same_dims=True,
... )
(['float16', 'int64'], array([-256.], dtype=float16),
array([0]), 0, 0)
(['uint8', 'int64'], array([1], dtype=uint8),
array([0]), -1, 0)
(['uint64', 'int64'], array([0], dtype=uint64),
array([0]), 0, 0)
"""
x_dtype, x, x_shape = draw(
dtype_and_values(
available_dtypes=array_dtypes,
allow_inf=allow_inf,
ret_shape=True,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
x_dtype = x_dtype[0]
x = x[0]
if disable_random_axis:
if axis_zero:
axis = 0
else:
axis = -1
batch_dims = 0
batch_shape = x_shape[0:0]
else:
axis = draw(
number_helpers.ints(
min_value=-1 * len(x_shape),
max_value=len(x_shape) - 1,
)
)
batch_dims = draw(
number_helpers.ints(
min_value=0,
max_value=max(0, axis),
)
)
batch_shape = x_shape[0:batch_dims]
if indices_same_dims:
indices_shape = x_shape
else:
shape_var = draw(
gh.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims - batch_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
indices_shape = batch_shape + shape_var
if first_dimension_only:
max_axis = max(x_shape[0] - 1, 0)
else:
max_axis = max(x_shape[axis] - 1, 0)
if not valid_bounds:
max_axis = max_axis + 10
indices_dtype, indices = draw(
dtype_and_values(
available_dtypes=indices_dtypes,
allow_inf=False,
min_value=0,
max_value=max_axis,
shape=indices_shape,
)
)
indices_dtype = indices_dtype[0]
indices = indices[0]
if disable_random_axis:
return [x_dtype, indices_dtype], x, indices
values_shape = indices_shape
values_dtype, values = draw(
dtype_and_values(
available_dtypes=values_dtypes,
allow_inf=False,
shape=values_shape,
)
)
values_dtype = values_dtype[0]
values = values[0]
return [x_dtype, indices_dtype, values_dtype], x, indices, axis, values, batch_dims
@st.composite
def arrays_and_axes(
draw,
available_dtypes=get_dtypes("float"),
allow_none=False,
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
num=2,
return_dtype=False,
force_int_axis=False,
):
"""Generate a list of arrays and axes.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
available_dtypes
if dtype is None, data type is drawn from this list randomly.
allow_none
if True, one of the dimensions can be None
min_num_dims
The minimum number of dimensions the arrays can have.
max_num_dims
The maximum number of dimensions the arrays can have.
min_dim_size
The minimum size of the dimensions of the arrays.
max_dim_size
The maximum size of the dimensions of the arrays.
num
The number of arrays to be generated
return_dtype
If `True`, return a tuple of the form `(dtype, arrays, axes)` instead of
`(arrays, axes)`
force_int_axis
If `True` and only one axis is drawn for each array, the returned axis will be
an integer instead of a tuple containing one integer or `None`
Returns
-------
A strategy that draws arrays and their axes
Examples
--------
>>> arrays_and_axes(
... allow_none=False,
... min_num_dims=1,
... max_num_dims=2,
... min_dim_size=2,
... max_dim_size=4,
... num=2,
... return_dtype=True,
... )
(['float16', 'float16'], [array([[-1., -1.],
[-1., -1.]], dtype=float16), array([[-1., -1.],
[-1., -1.]], dtype=float16)], (None, None))
>>> arrays_and_axes(
... allow_none=False,
... min_num_dims=1,
... max_num_dims=2,
... min_dim_size=2,
... max_dim_size=4,
... num=2,
... return_dtype=True,
... )
(['float16', 'float32'],
[array([ 1.5 , -8.33], dtype=float16),
array([8.26e+00, 9.10e+00, 6.72e-05], dtype=float16)],
(0, None))
>>> arrays_and_axes(
... allow_none=False,
... min_num_dims=1,
... max_num_dims=2,
... min_dim_size=2,
... max_dim_size=4,
... num=2,
... return_dtype=True,
... )
(['float64', 'float32'],
[array([-1.1, -12.24322108]),
array([[-2.44758124e-308, 8.26446279e+000, 5.96046448e-008],
[1.17549435e-038, 1.06541027e-001, 1.13725760e+001]])],
(None, None))
>>> arrays_and_axes(
... num=1,
... force_int_axis=True,
... )
([array([0.07143888])], 0)
>>> arrays_and_axes(
... num=1,
... force_int_axis=True,
... )
([array([-2.44758124e-308])], None)
>>> arrays_and_axes(
... num=1,
... force_int_axis=True,
... )
([array([-6.72e-05, -6.72e-05, -6.72e-05, -6.72e-05, -6.72e-05],
dtype=float16)], 0)
"""
shapes = []
for _ in range(num):
shape = draw(
gh.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
shapes.append(shape)
if isinstance(available_dtypes, st._internal.SearchStrategy):
available_dtypes = draw(available_dtypes)
dtype = draw(
dtype_helpers.array_dtypes(num_arrays=num, available_dtypes=available_dtypes)
)
arrays = []
for shape in shapes:
arrays.append(
draw(array_values(dtype=dtype[0], shape=shape, min_value=-20, max_value=20))
)
if force_int_axis:
# ToDo: the following code references shape as the last element in shapes
# while this is not the intended behavior of the code. This is a bug
# that should be fixed in the future.
if len(shape) <= 2:
axes = draw(st.one_of(st.integers(0, len(shape) - 1), st.none()))
else:
axes = draw(st.integers(0, len(shape) - 1))
else:
all_axes_ranges = []
for shape in shapes:
if None in all_axes_ranges:
all_axes_ranges.append(st.integers(0, len(shape) - 1))
else:
all_axes_ranges.append(
st.one_of(st.none(), st.integers(0, len(shape) - 1))
)
axes = draw(st.tuples(*all_axes_ranges))
if return_dtype:
return dtype, arrays, axes
return arrays, axes
def _clamp_value(x, dtype_info):
# 0th index is max, 1st index is min
if x > dtype_info[0]:
return dtype_info[0]
if x < dtype_info[1]:
return dtype_info[1]
return x
@st.composite
def array_values(
draw,
*,
dtype,
shape,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_nan=False,
allow_subnormal=False,
allow_inf=False,
exclude_min=True,
exclude_max=True,
large_abs_safety_factor=1.1,
small_abs_safety_factor=1.1,
safety_factor_scale="linear",
):
"""Draws a list (of lists) of a given shape containing values of a given
data type.
Parameters
----------
draw
special function that draws data randomly (but is reproducible) from a given
data-set (ex. list).
dtype
data type of the elements of the list.
shape
shape of the required list.
abs_smallest_val
sets the absolute smallest value to be generated for float data types,
this has no effect on integer data types. If none, the default data type
absolute smallest value is used.
min_value
minimum value of elements in the list.
max_value
maximum value of elements in the list.
allow_nan
if True, allow Nans in the list.
allow_subnormal
if True, allow subnormals in the list.
allow_inf
if True, allow inf in the list.
exclude_min
if True, exclude the minimum limit.
exclude_max
if True, exclude the maximum limit.
large_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
when a "linear" safety factor scaler is used, a safety factor of 2 means
that only 50% of the range is included, a safety factor of 3 means that
only 33% of the range is included etc.
when a "log" safety factor scaler is used, a data type with maximum
value of 2^32 and a safety factor of 2 transforms the maximum to 2^16.
small_abs_safety_factor
A safety factor of 1 means that all values are included without limitation,
this has no effect on integer data types.
when a "linear" safety factor scaler is used, a data type with minimum
representable number of 0.0001 and a safety factor of 2 transforms the
minimum to 0.0002, a safety factor of 3 transforms the minimum to 0.0003 etc.
when a "log" safety factor scaler is used, a data type with minimum
representable number of 0.5 * 2^-16 and a safety factor of 2 transforms the
minimum to 0.5 * 2^-8, a safety factor of 3 transforms the minimum to 0.5 * 2^-4
safety_factor_scale
The operation to use when calculating the maximum value of the list. Can be
"linear" or "log". Default value = "linear".
In the case of min_value or max_value is not in the valid range
the invalid value will be replaced by data type limit, the range
of the numbers in that case is not preserved.
Returns
-------
A strategy that draws a list.
Examples
--------
>>> array_values(
... dtype=get_dtypes("valid"),
... shape=get_shape(),
... )
[1806 87 36912 6955 59576]
>>> array_values(
... dtype=get_dtypes("valid"),
... shape=get_shape(),
... )
1025
"""
assert small_abs_safety_factor >= 1, "small_abs_safety_factor must be >= 1"
assert large_abs_safety_factor >= 1, "large_value_safety_factor must be >= 1"
if isinstance(shape, st._internal.SearchStrategy):
shape = draw(shape)
size = 1
if isinstance(shape, int):
size = shape
else:
for dim in shape:
size *= dim
if isinstance(dtype, st._internal.SearchStrategy):
dtype = draw(dtype)
dtype = dtype[0] if isinstance(dtype, list) else draw(dtype)
if "float" in dtype or "complex" in dtype:
kind_dtype = "float"
if mod_backend[test_globals.CURRENT_BACKEND]:
proc, input_queue, output_queue = mod_backend[test_globals.CURRENT_BACKEND]
input_queue.put(
("dtype_info_helper", test_globals.CURRENT_BACKEND, kind_dtype, dtype)
)
dtype_info = output_queue.get()
else:
dtype_info = array_helpers_dtype_info_helper(
backend=test_globals.CURRENT_BACKEND, kind_dtype=kind_dtype, dtype=dtype
)
elif "int" in dtype:
kind_dtype = "int"
if mod_backend[test_globals.CURRENT_BACKEND]:
proc, input_queue, output_queue = mod_backend[test_globals.CURRENT_BACKEND]
input_queue.put(
("dtype_info_helper", test_globals.CURRENT_BACKEND, kind_dtype, dtype)
)
dtype_info = output_queue.get()
else:
dtype_info = array_helpers_dtype_info_helper(
backend=test_globals.CURRENT_BACKEND, kind_dtype=kind_dtype, dtype=dtype
)
elif "bool" in dtype:
kind_dtype = "bool"
else:
raise TypeError(
f"{dtype} is not a valid data type that can be generated,"
" only integers, floats and booleans are allowed."
)
if kind_dtype != "bool":
if min_value is not None:
min_value = _clamp_value(min_value, dtype_info)
if max_value is not None:
max_value = _clamp_value(max_value, dtype_info)
min_value, max_value, abs_smallest_val = gh.apply_safety_factor(
dtype,
backend=test_globals.CURRENT_BACKEND,
min_value=min_value,
max_value=max_value,
abs_smallest_val=abs_smallest_val,
small_abs_safety_factor=small_abs_safety_factor,
large_abs_safety_factor=large_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
assert max_value >= min_value
if kind_dtype == "int":
if exclude_min:
min_value += 1
if exclude_max:
max_value -= 1
values = draw(list_of_size(x=st.integers(min_value, max_value), size=size))
elif kind_dtype == "float":
floats_info = {
"float16": {"cast_type": "float16", "width": 16},
"bfloat16": {"cast_type": "float32", "width": 32},
"float32": {"cast_type": "float32", "width": 32},
"float64": {"cast_type": "float64", "width": 64},
"complex64": {"cast_type": "complex64", "width": 32},
"complex128": {"cast_type": "complex128", "width": 64},
}
# The smallest possible value is determined by one of the arguments
if min_value > -abs_smallest_val or max_value < abs_smallest_val:
float_strategy = st.floats(
min_value=float_of(min_value, floats_info[dtype]["width"]),
max_value=float_of(max_value, floats_info[dtype]["width"]),
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
exclude_min=exclude_min,
exclude_max=exclude_max,
)
# kind of a hack to not use the calculated max and min values
elif allow_inf or allow_nan:
float_strategy = st.floats(
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
)
else:
float_strategy = st.one_of(
st.floats(
min_value=float_of(min_value, floats_info[dtype]["width"]),
max_value=float_of(
-abs_smallest_val, floats_info[dtype]["width"]
),
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
exclude_min=exclude_min,
exclude_max=exclude_max,
),
st.floats(
min_value=float_of(
abs_smallest_val, floats_info[dtype]["width"]
),
max_value=float_of(max_value, floats_info[dtype]["width"]),
allow_nan=allow_nan,
allow_subnormal=allow_subnormal,
allow_infinity=allow_inf,
width=floats_info[dtype]["width"],
exclude_min=exclude_min,
exclude_max=exclude_max,
),
)
if "complex" in dtype:
float_strategy = st.tuples(float_strategy, float_strategy)
values = draw(list_of_size(x=float_strategy, size=size))
if "complex" in dtype:
values = [complex(*v) for v in values]
else:
values = draw(list_of_size(x=st.booleans(), size=size))
array = np.asarray(values, dtype=dtype)
if isinstance(shape, (tuple, list)):
return array.reshape(shape)
return np.asarray(array)
def array_helpers_dtype_info_helper(backend, kind_dtype, dtype):
with WithBackendContext(backend) as ivy_backend:
if kind_dtype == "float":
return (
ivy_backend.finfo(dtype).max,
ivy_backend.finfo(dtype).min,
getattr(ivy_backend.finfo(dtype), "smallest_normal", None),
)
elif kind_dtype == "int":
return (
ivy_backend.iinfo(dtype).max,
ivy_backend.iinfo(dtype).min,
getattr(ivy_backend.iinfo(dtype), "smallest_normal", None),
)
# From array-api repo #
# ---------------------------- #
def _broadcast_shapes(shape1, shape2):
"""Broadcasts `shape1` and `shape2`"""
N1 = len(shape1)
N2 = len(shape2)
N = max(N1, N2)
shape = [None for _ in range(N)]
i = N - 1
while i >= 0:
n1 = N1 - N + i
if N1 - N + i >= 0:
d1 = shape1[n1]
else:
d1 = 1
n2 = N2 - N + i
if N2 - N + i >= 0:
d2 = shape2[n2]
else:
d2 = 1
if d1 == 1:
shape[i] = d2
elif d2 == 1:
shape[i] = d1
elif d1 == d2:
shape[i] = d1
else:
raise Exception("Broadcast error")
i = i - 1
return tuple(shape)
# from array-api repo
def broadcast_shapes(*shapes):
if len(shapes) == 0:
raise ValueError("shapes=[] must be non-empty")
elif len(shapes) == 1:
return shapes[0]
result = _broadcast_shapes(shapes[0], shapes[1])
for i in range(2, len(shapes)):
result = _broadcast_shapes(result, shapes[i])
return result
# np.prod and others have overflow and math.prod is Python 3.8+ only
def prod(seq):
return _reduce(mul, seq, 1)
# from array-api repo
def mutually_broadcastable_shapes(
num_shapes: int,
*,
base_shape=(),
min_dims: int = 1,
max_dims: int = 4,
min_side: int = 1,
max_side: int = 4,
):
if max_dims is None:
max_dims = min(max(len(base_shape), min_dims) + 5, 32)
if max_side is None:
max_side = max(base_shape[-max_dims:] + (min_side,)) + 5
return (
nph.mutually_broadcastable_shapes(
num_shapes=num_shapes,
base_shape=base_shape,
min_dims=min_dims,
max_dims=max_dims,
min_side=min_side,
max_side=max_side,
)
.map(lambda BS: BS.input_shapes)
.filter(lambda shapes: all(prod(i for i in s if i > 0) < 1000 for s in shapes))
)
@st.composite
def array_and_broadcastable_shape(draw, dtype):
"""Return an array and a shape that the array can be broadcast to."""
if isinstance(dtype, st._internal.SearchStrategy):
dtype = draw(dtype)
dtype = dtype[0] if isinstance(dtype, list) else draw(dtype)
in_shape = draw(nph.array_shapes(min_dims=1, max_dims=4))
x = draw(array_values(shape=in_shape, dtype=dtype))
to_shape = draw(
mutually_broadcastable_shapes(1, base_shape=in_shape)
.map(lambda S: S[0])
.filter(lambda s: broadcast_shapes(in_shape, s) == s),
label="shape",
)
return x, to_shape
@st.composite
def arrays_for_pooling(
draw,
min_dims,
max_dims,
min_side,
max_side,
explicit_or_str_padding=False,
only_explicit_padding=False,
return_dilation=False,
mixed_fn_compos=True,
data_format="channel_last",
return_data_format=False,
):
in_shape = draw(
nph.array_shapes(
min_dims=min_dims, max_dims=max_dims, min_side=min_side, max_side=max_side
)
)
dtype, x = draw(
dtype_and_values(
available_dtypes=get_dtypes("float", mixed_fn_compos=mixed_fn_compos),
shape=in_shape,
num_arrays=1,
max_value=100,
min_value=-100,
)
)
if not isinstance(data_format, str):
data_format = draw(data_format)
array_dim = x[0].ndim
if array_dim == 5:
kernel = draw(
st.tuples(
st.integers(1, in_shape[1]),
st.integers(1, in_shape[2]),
st.integers(1, in_shape[3]),
)
)
if array_dim == 4:
kernel = draw(
st.tuples(st.integers(1, in_shape[1]), st.integers(1, in_shape[2]))
)
if array_dim == 3:
kernel = draw(st.tuples(st.integers(1, in_shape[1])))
if return_dilation:
dilations = []
for i in range(len(kernel)):
if kernel[i] > 1:
max_dilation = (in_shape[i + 1] - kernel[i]) // (kernel[i] - 1) + 1
dilations.append(draw(st.integers(1, max_dilation)))
else:
dilations.append(1)
if explicit_or_str_padding or only_explicit_padding:
if draw(st.booleans()):
max_pad = min(kernel[i] // 2 for i in range(array_dim - 2))
padding = draw(st.integers(0, max_pad))
else:
padding = []
for i in range(array_dim - 2):
max_pad = kernel[i] // 2
padding.append(
draw(
st.tuples(
st.integers(0, max_pad),
st.integers(0, max_pad),
)
)
)
if explicit_or_str_padding:
padding = draw(
st.one_of(st.just(padding), st.sampled_from(["VALID", "SAME"]))
)
else:
padding = draw(st.sampled_from(["VALID", "SAME"]))
# We do this to avoid this error in the tf backend
# ValueError: `strides > 1` not supported in conjunction with `dilation_rate > 1`
# TODO: Explore fully compositional implementation for pooling to bypass this in tf.
if return_dilation:
strides = (
draw(st.tuples(st.integers(1, min(kernel))))
if max(dilations) <= 1
else (1,)
)
else:
strides = draw(st.tuples(st.integers(1, min(kernel))))
if data_format == "channel_first":
dim = len(in_shape)
x[0] = np.transpose(x[0], (0, dim - 1, *range(1, dim - 1)))
out = [dtype, x, kernel, strides, padding]
if return_dilation:
out.append(dilations)
if return_data_format:
out.append(data_format)
return tuple(out)
@st.composite
def dtype_array_query(
draw,
*,
available_dtypes,
min_num_dims=1,
max_num_dims=3,
min_dim_size=0,
max_dim_size=10,
allow_mask=True,
allow_neg_step=True,
):
dtype = draw(
helpers.array_dtypes(
num_arrays=1,
available_dtypes=available_dtypes,
)
)
shape = draw(
helpers.get_shape(
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
array = draw(
helpers.array_values(
dtype=dtype[0],
shape=shape,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
)
)
if allow_mask and draw(st.booleans()):
mask_shape = shape[: draw(st.integers(0, len(shape)))]
index = draw(
helpers.array_values(
dtype="bool",
shape=mask_shape,
).filter(lambda x: np.sum(x) > 0)
)
return dtype + ["bool"], array, index
supported_index_types = ["int", "slice", "list", "array"]
index_types = draw(
st.lists(
st.sampled_from(supported_index_types),
min_size=0,
max_size=len(shape),
)
)
index_types = [v if shape[i] > 0 else "slice" for i, v in enumerate(index_types)]
index = []
empty_array = prod(shape) == 0
for s, index_type in zip(shape, index_types):
if index_type == "int":
new_index = draw(st.integers(min_value=-s + 1, max_value=s - 1))
elif index_type == "seq":
new_index = draw(
st.lists(
st.integers(min_value=-s + 1, max_value=s - 1),
min_size=1 if not empty_array else 0,
max_size=20 if not empty_array else 0,
)
)
elif index_type == "array":
_, new_index = draw(
helpers.dtype_and_values(
min_value=-s + 1,
max_value=s - 1,
dtype=["int64"],
max_num_dims=4,
min_dim_size=1 if not empty_array else 0,
max_dim_size=10 if not empty_array else 0,
)
)
new_index = new_index[0]
else:
start = draw(
st.one_of(
st.integers(min_value=-2 * s, max_value=2 * s),
st.just(None),
)
)
end = draw(
st.one_of(
st.integers(min_value=-2 * s, max_value=2 * s),
st.just(None),
)
)
step = draw(
st.one_of(
(
st.integers(min_value=1, max_value=1 + 2 * s)
if not allow_neg_step
else st.integers(
min_value=-1 - 2 * s, max_value=1 + 2 * s
).filter(lambda x: x != 0)
),
st.just(None),
)
)
new_index = slice(start, end, step)
index += [new_index]
if len(index_types) and draw(st.booleans()):
start = draw(st.integers(min_value=0, max_value=len(index) - 1))
min_ = len(index) if len(index_types) < len(shape) else start
max_ = len(index) if len(index_types) < len(shape) else len(index) - 1
end = draw(st.integers(min_value=min_, max_value=max_))
if start != end:
index = index[:start] + [Ellipsis] + index[end:]
for _ in range(draw(st.integers(min_value=0, max_value=3))):
index.insert(draw(st.integers(0, len(index))), None)
index = tuple(index)
if len(index) == 1 and draw(st.booleans()):
index = index[0]
return dtype + ["int64"] * index_types.count("array"), array, index
@st.composite
def dtype_array_query_val(
draw,
*,
available_dtypes,
min_num_dims=1,
max_num_dims=3,
min_dim_size=0,
max_dim_size=10,
allow_mask=True,
allow_neg_step=True,
):
input_dtype, x, query = draw(
helpers.dtype_array_query(
available_dtypes=available_dtypes,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
allow_mask=allow_mask,
allow_neg_step=allow_neg_step,
)
)
real_shape = x[query].shape
if len(real_shape):
val_shape = real_shape[draw(st.integers(0, len(real_shape))) :]
else:
val_shape = real_shape
val_dtype, val = draw(
helpers.dtype_and_values(
dtype=[input_dtype[0]],
shape=val_shape,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
)
)
val_dtype = draw(
helpers.get_castable_dtype(
draw(available_dtypes), input_dtype[0], x if 0 not in x.shape else None
)
)[-1]
val = val[0].astype(val_dtype)
return input_dtype + [val_dtype], x, query, val
@st.composite
def create_nested_input(draw, dimensions, leaf_values):
if len(dimensions) != 1:
return [
draw(create_nested_input(dimensions[1:], leaf_values))
for _ in range(dimensions[0])
]
value = draw(st.sampled_from(leaf_values))
return [value for _ in range(dimensions[0])]
@st.composite
def cond_data_gen_helper(draw):
dtype_x = helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)),
max_value=10,
min_value=-10,
allow_nan=False,
shared_dtype=True,
).filter(lambda x: np.linalg.cond(x[1][0].tolist()) < 1 / sys.float_info.epsilon)
p = draw(
st.sampled_from([None, 2, -2, 1, -1, "fro", "nuc", float("inf"), -float("inf")])
)
dtype, x = draw(dtype_x)
return dtype, (x[0], p)
# helpers for tests (core and frontend) related to solve function
@st.composite
def get_first_solve_matrix(draw, adjoint=True):
# batch_shape, random_size, shared
# float16 causes a crash when filtering out matrices
# for which `np.linalg.cond` is large.
input_dtype_strategy = st.shared(
st.sampled_from(draw(helpers.get_dtypes("float"))).filter(
lambda x: "float16" not in x
),
key="shared_dtype",
)
input_dtype = draw(input_dtype_strategy)
shared_size = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size")
)
matrix = draw(
helpers.array_values(
dtype=input_dtype,
shape=(shared_size, shared_size),
min_value=2,
max_value=5,
).filter(lambda x: np.linalg.cond(x) < 1 / sys.float_info.epsilon)
)
if adjoint:
adjoint = draw(st.booleans())
if adjoint:
matrix = np.transpose(np.conjugate(matrix))
return input_dtype, matrix, adjoint
@st.composite
def get_second_solve_matrix(draw):
# batch_shape, shared, random_size
# float16 causes a crash when filtering out matrices
# for which `np.linalg.cond` is large.
input_dtype_strategy = st.shared(
st.sampled_from(draw(helpers.get_dtypes("float"))).filter(
lambda x: "float16" not in x
),
key="shared_dtype",
)
input_dtype = draw(input_dtype_strategy)
shared_size = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size")
)
return input_dtype, draw(
helpers.array_values(
dtype=input_dtype, shape=(shared_size, 1), min_value=2, max_value=5
)
)
@st.composite
def einsum_helper(draw):
# Todo: generalize to n equations and arrays
# Generate unique dimensions for both arrays
dims_1 = draw(st.integers(min_value=1, max_value=5))
dims_2 = draw(st.integers(min_value=1, max_value=5))
eq_1 = draw(
st.lists(
st.sampled_from(string.ascii_lowercase),
min_size=dims_1,
max_size=dims_1,
unique=True,
)
)
eq_2 = draw(
st.lists(
st.sampled_from(string.ascii_lowercase),
min_size=dims_2,
max_size=dims_2,
unique=True,
)
)
# Decide which dimensions are common and update eq_2 accordingly
common_dims = min(dims_1, dims_2)
for i in range(common_dims):
change = draw(st.booleans())
if change:
eq_2[i] = eq_1[i]
# Generate shapes according to the dimensions
shape_1 = [draw(st.integers(min_value=1, max_value=5)) for _ in eq_1]
shape_2 = [
(
draw(st.integers(min_value=1, max_value=5))
if eq_2[i] not in eq_1
else shape_1[eq_1.index(eq_2[i])]
)
for i in range(len(eq_2))
]
# Generate arrays and dtypes
dtype_1, value_1 = draw(
dtype_and_values(
available_dtypes=["float64"], shape=shape_1, min_value=-10, max_value=50
)
)
dtype_2, value_2 = draw(
dtype_and_values(
available_dtypes=["float64"], shape=shape_2, min_value=-10, max_value=50
)
)
# Determine output equation
common_symbols = set(eq_1).intersection(set(eq_2))
output_eq = []
if common_symbols:
output_length = draw(st.integers(min_value=1, max_value=len(common_symbols)))
output_eq = draw(
st.lists(
st.sampled_from(list(common_symbols)),
min_size=output_length,
max_size=output_length,
unique=True,
)
)
output_eq = "".join(output_eq)
eq = "".join(eq_1) + "," + "".join(eq_2) + "->" + output_eq
return eq, (value_1[0], value_2[0]), [dtype_1[0], dtype_2[0]]
@st.composite
def create_concatenable_arrays_dtypes(
draw,
min_num_dims,
max_num_dims,
min_num_arrays,
max_num_arrays,
concat_dim,
dtypes,
common_shape=None,
):
"""Draws a random number of arrays with concatenable or stackable
dimensions. Arrays have same number of dimensions, but their shape can
differ along a specified dimension (concat_dim). If concat_dim is None,
arrays have the same shape. Dtypes of arrays can differ.
Parameters
----------
min_num_dims
minimum number of dimensions
max_num_dims
maximum number of dimensions
min_num_arrays
minimum number of arrays
max_num_arrays
maximum number of arrays
concat_dim
dimension along which the shape of arrays can differ,
if None all the arrays will have the same shape
dtypes
list of dtypes from which array dtypes will be draws,
each array can have different dtype
given_common_shape
if not None, specifies the shape of the arrays
(dimension concat_dim can still be modified)
"""
num_arrays = draw(helpers.ints(min_value=min_num_arrays, max_value=max_num_arrays))
if common_shape is None:
num_dims = draw(helpers.ints(min_value=min_num_dims, max_value=max_num_dims))
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=1, max_value=5),
size=num_dims,
)
)
else:
num_dims = len(common_shape)
input_dtypes = draw(
helpers.array_dtypes(num_arrays=num_arrays, available_dtypes=dtypes)
)
array_shapes = [common_shape.copy() for i in range(num_arrays)]
if num_dims > 0 and concat_dim is not None:
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=1, max_value=5),
size=num_arrays,
)
)
for i in range(num_arrays):
array_shapes[i][concat_dim] = unique_dims[i]
xs = []
for sh, dt in zip(array_shapes, input_dtypes):
x = draw(
helpers.array_values(
shape=sh,
dtype=dt,
)
)
xs.append(x)
return xs, input_dtypes
# helpers for tests (core and frontend) related to solve function
@st.composite
def get_first_solve_batch_matrix(draw, choose_adjoint=False):
"""Generate non-singular left hand side of equation system possibly with a
single batch dimension at the beginning. Use get_second_solve_batch_matrix
to get the right hand side.
Parameters
----------
choose_adjoint
if True, randomly generates boolean
value for adjoint output,
otherwise this output is False
Returns
-------
dtype
Data type of the array
matrix
Generated array
adjoint
boolean value specifying whether the system should be solved for
adjoint of array
"""
# float16 causes a crash when filtering out matrices
# for which `np.linalg.cond` is large.
input_dtype_strategy = st.shared(
st.sampled_from(draw(helpers.get_dtypes("float"))).filter(
lambda x: "float16" not in x
),
key="shared_dtype",
)
input_dtype = draw(input_dtype_strategy)
shared_size = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size")
)
batch_size = draw(
st.shared(helpers.ints(min_value=0, max_value=3), key="shared_batch_size")
)
# adjoint of a regular matrix is also regular
matrix = draw(
helpers.array_values(
dtype=input_dtype,
shape=tuple(
([] if batch_size == 0 else [batch_size]) + [shared_size, shared_size]
),
min_value=2,
max_value=5,
).filter(lambda x: np.all(np.linalg.cond(x) < 1 / sys.float_info.epsilon))
)
adjoint = False
if choose_adjoint:
adjoint = draw(st.booleans())
return input_dtype, matrix, adjoint
@st.composite
def get_second_solve_batch_matrix(draw, allow_simplified=True, choose_side=False):
"""Generate right hand side of equation system. Possible with a batch
dimension and possibly with several columns of values. Use
get_first_solve_batch_matrix to generate the left hand side.
Parameters
----------
allow_simplified
if True, a 1D vector with correct length can be returned as the right hand side
choose_side
Randomly choose if the system to be solved is AX=B or XA=B,
where X is the unknown solution
Returns
-------
dtype
Data type of the generated array
matrix
Generated array
left
If True, the system is AX=B, otherwise it is XA=B
"""
# float16 causes a crash when filtering out matrices
# for which `np.linalg.cond` is large.
input_dtype_strategy = st.shared(
st.sampled_from(draw(helpers.get_dtypes("float"))).filter(
lambda x: "float16" not in x
),
key="shared_dtype",
)
input_dtype = draw(input_dtype_strategy)
shared_size = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size")
)
batch_size = draw(
st.shared(helpers.ints(min_value=0, max_value=3), key="shared_batch_size")
)
num_systems = draw(st.shared(helpers.ints(min_value=1, max_value=4)))
left = True
if choose_side:
left = draw(st.booleans())
shape = []
if batch_size > 0:
shape += [batch_size]
if left:
if (
allow_simplified
and batch_size == 0
and num_systems == 1
and draw(st.booleans())
):
shape = tuple(shape + [shared_size])
else:
shape = tuple(shape + [shared_size, num_systems])
else:
shape = tuple(shape + [num_systems, shared_size])
return (
input_dtype,
draw(
helpers.array_values(
dtype=input_dtype, shape=shape, min_value=2, max_value=5
)
),
left,
)
| ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/array_helpers.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/hypothesis_helpers/array_helpers.py",
"repo_id": "ivy",
"token_count": 36564
} | 51 |
from .base import FrontendConfig, SupportedDeviecs, SupportedDtypes
import numpy as np
import pandas as pd
def get_config():
return PandasFrontendConfig()
class PandasFrontendConfig(FrontendConfig):
PandasArray = pd.core.arrays.numpy_.PandasArray
Dtype = np.dtype
Device = str
valid_devices = "cpu"
invalid_devices = ("gpu", "tpu")
valid_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"complex64",
"complex128",
"bool",
]
invalid_dtypes = ["bfloat16"]
valid_numeric_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"complex64",
"complex128",
]
invalid_numeric_dtypes = ["bfloat16"]
valid_int_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
]
invalid_int_dtypes = []
valid_uint_dtypes = [
"uint8",
"uint16",
"uint32",
"uint64",
]
invalid_uint_dtypes = []
valid_float_dtypes = [
"float16",
"float32",
"float64",
]
invalid_float_dtypes = ["bfloat16"]
valid_complex_dtypes = [
"complex64",
"complex128",
]
invalid_complex_dtypes = []
# todo: add extension types
@property
def supported_devices(self):
return SupportedDeviecs(
valid_devices=self.valid_devices, invalid_devices=self.invalid_devices
)
@property
def supported_dtypes(self):
return SupportedDtypes(
valid_dtypes=self.valid_dtypes,
invalid_dtypes=self.invalid_dtypes,
valid_numeric_dtypes=self.valid_numeric_dtypes,
invalid_numeric_dtypes=self.invalid_numeric_dtypes,
valid_int_dtypes=self.valid_int_dtypes,
invalid_int_dtypes=self.invalid_int_dtypes,
valid_uint_dtypes=self.valid_uint_dtypes,
invalid_uint_dtypes=self.invalid_uint_dtypes,
valid_float_dtypes=self.valid_float_dtypes,
invalid_float_dtypes=self.invalid_float_dtypes,
valid_complex_dtypes=self.valid_complex_dtypes,
invalid_complex_dtypes=self.invalid_complex_dtypes,
)
def native_array(self, x):
return x.array
def is_native_array(self, x):
return isinstance(x.array, self.PandasArray)
def to_numpy(self, x):
return x.to_numpy()
def as_native_dtype(self, dtype: str):
return np.dtype(dtype)
def as_native_device(self, device: str):
return device
def isscalar(self, x):
return pd.api.types.is_scalar(x)
| ivy/ivy_tests/test_ivy/test_frontends/config/pandas.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/pandas.py",
"repo_id": "ivy",
"token_count": 1491
} | 52 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
@handle_frontend_test(
fn_tree="jax.lax.cond",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
pred_cond=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_cond(
*,
dtype_and_x,
pred_cond,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
def _test_true_fn(x):
return x + x
def _test_false_fn(x):
return x * x
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
pred=pred_cond,
true_fun=_test_true_fn,
false_fun=_test_false_fn,
operand=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.fori_loop",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-1000,
max_value=1000,
min_num_dims=1,
min_dim_size=1,
),
lower=st.integers(min_value=-10, max_value=10),
upper=st.integers(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_jax_fori_loop(
*,
dtype_and_x,
lower,
upper,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
def _test_body_fn(x, y):
return x + y
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
lower=lower,
upper=upper,
body_fun=_test_body_fn,
init_val=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.map",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
),
test_with_out=st.just(False),
)
def test_jax_map(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
def _test_map_fn(x):
return x + x
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
f=_test_map_fn,
xs=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.scan",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-1000,
max_value=1000,
min_num_dims=1,
min_dim_size=1,
),
length=st.integers(min_value=-10, max_value=10),
init=st.integers(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_jax_scan(
*,
dtype_and_x,
length,
init,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
if length == 0 or length != len(dtype_and_x[1][0]):
return
def _test_scan_fn(carry, x):
return carry + x, x * 2
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
f=_test_scan_fn,
init=init,
xs=x[0],
length=length,
)
@handle_frontend_test(
fn_tree="jax.lax.switch",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
),
index=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_jax_switch(
*,
dtype_and_x,
index,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
def _test_branch_1(x):
return x + x
def _test_branch_2(x):
return x * x
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
index=index,
branches=[_test_branch_1, _test_branch_2],
operand=x[0],
)
@handle_frontend_test(
fn_tree="jax.lax.while_loop",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-1000,
max_value=1000,
min_num_dims=1,
min_dim_size=1,
),
)
def test_jax_while_loop(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
def _test_cond_fn(x):
def any_negative_real(arr):
for elem in arr:
if isinstance(elem, (int, float)) and elem < 0:
return True
elif isinstance(elem, complex):
return False
elif isinstance(elem, (list, tuple)):
if any_negative_real(elem):
return True
return False
return any_negative_real(x)
def _test_body_fn(x):
return x + 1
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
cond_fun=_test_cond_fn,
body_fun=_test_body_fn,
init_val=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_control_flow_operators.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_control_flow_operators.py",
"repo_id": "ivy",
"token_count": 3141
} | 53 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_searching import (
_broadcastable_trio,
)
from ...test_numpy.test_sorting_searching_counting.test_searching import (
_broadcastable_trio as _where_helper,
)
# --- Helpers --- #
# --------------- #
# searchsorted
@st.composite
def _searchsorted(draw):
dtype_x, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
shape=(draw(st.integers(min_value=1, max_value=10)),),
),
)
dtype_v, v = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
min_num_dims=1,
)
)
input_dtypes = dtype_x + dtype_v
xs = x + v
side = draw(st.sampled_from(["left", "right"]))
sorter = None
xs[0] = np.sort(xs[0], axis=-1)
return input_dtypes, xs, side, sorter
# unique
@st.composite
def _unique_helper(draw):
arr_dtype, arr, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"numeric", full=False, key="searchsorted"
),
min_num_dims=1,
min_dim_size=2,
ret_shape=True,
)
)
axis = draw(st.sampled_from(list(range(len(shape))) + [None]))
return_index = draw(st.booleans())
return_inverse = draw(st.booleans())
return_counts = draw(st.booleans())
return arr_dtype, arr, return_index, return_inverse, return_counts, axis
# --- Main --- #
# ------------ #
# argmax
@handle_frontend_test(
fn_tree="jax.numpy.argmax",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
force_int_axis=True,
min_num_dims=1,
valid_axis=True,
),
keepdims=st.booleans(),
)
def test_jax_argmax(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
)
# argmin
@handle_frontend_test(
fn_tree="jax.numpy.argmin",
dtype_and_x=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
force_int_axis=True,
min_num_dims=1,
valid_axis=True,
),
keepdims=st.booleans(),
)
def test_jax_argmin(
*,
dtype_and_x,
keepdims,
on_device,
fn_tree,
frontend,
backend_fw,
test_flags,
):
input_dtype, x, axis = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
)
# argsort
@handle_frontend_test(
fn_tree="jax.numpy.argsort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
test_with_out=st.just(False),
)
def test_jax_argsort(
*,
dtype_x_axis,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
)
# argwhere
@handle_frontend_test(
fn_tree="jax.numpy.argwhere",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_out=st.just(False),
)
def test_jax_argwhere(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
size=None,
fill_value=None,
)
# count_nonzero
@handle_frontend_test(
fn_tree="jax.numpy.count_nonzero",
dtype_input_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
min_num_dims=1,
force_int_axis=True,
valid_axis=True,
allow_neg_axes=True,
),
keepdims=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_count_nonzero(
dtype_input_axis,
keepdims,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, axis = dtype_input_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keepdims,
)
# extract
@handle_frontend_test(
fn_tree="jax.numpy.extract",
broadcastables=_broadcastable_trio(),
)
def test_jax_extract(
broadcastables,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
cond, xs, dtype = broadcastables
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=cond,
arr=xs[0],
)
# flatnonzero
@handle_frontend_test(
fn_tree="jax.numpy.flatnonzero",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
test_with_out=st.just(False),
)
def test_jax_flatnonzero(
dtype_and_x,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
)
# nanargmax
@handle_frontend_test(
fn_tree="jax.numpy.nanargmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_nanargmax(
dtype_x_axis,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
keep_dims,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
)
# nanargmin
@handle_frontend_test(
fn_tree="jax.numpy.nanargmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_nanargmin(
dtype_x_axis,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
keep_dims,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keep_dims,
)
# msort
# @handle_frontend_test(
# fn_tree="jax.numpy.msort",
# dtype_and_x=helpers.dtype_and_values(
# available_dtypes=helpers.get_dtypes("numeric"),
# min_num_dims=2,
# min_dim_size=2,
# ),
# test_with_out=st.just(False),
# )
# def test_jax_msort(
# dtype_and_x,
# frontend,
# test_flags,
# fn_tree,
# ):
# input_dtype, x = dtype_and_x
# helpers.test_frontend_function(
# input_dtypes=input_dtype,
# frontend=frontend,
# test_flags=test_flags,
# fn_tree=fn_tree,
# a=x[0],
# )
# TODO : deprecated since jax 0.4.1. \
# Uncomment with multiversion testing pipeline enabled.
# nonzero
@handle_frontend_test(
fn_tree="jax.numpy.nonzero",
dtype_and_a=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
test_with_out=st.just(False),
)
def test_jax_nonzero(
dtype_and_a,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
dtype, a = dtype_and_a
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=a[0],
)
@handle_frontend_test(
fn_tree="jax.numpy.searchsorted",
dtype_x_v_side_sorter=_searchsorted(),
test_with_out=st.just(False),
)
def test_jax_searchsorted(
dtype_x_v_side_sorter,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtypes, xs, side, sorter = dtype_x_v_side_sorter
helpers.test_frontend_function(
input_dtypes=input_dtypes,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
v=xs[1],
side=side,
sorter=sorter,
)
# sort
@handle_frontend_test(
fn_tree="jax.numpy.sort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
test_with_out=st.just(False),
)
def test_jax_sort(
*,
dtype_x_axis,
frontend,
backend_fw,
fn_tree,
on_device,
test_flags,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
)
# sort_complex
@handle_frontend_test(
fn_tree="jax.numpy.sort_complex",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
min_dim_size=1,
min_axis=-1,
max_axis=0,
),
test_with_out=st.just(False),
)
def test_jax_sort_complex(
*,
dtype_x_axis,
frontend,
backend_fw,
test_flags,
fn_tree,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
test_values=False,
)
@handle_frontend_test(
fn_tree="jax.numpy.unique", fn_inputs=_unique_helper(), test_with_out=st.just(False)
)
def test_jax_unique(fn_inputs, backend_fw, frontend, test_flags, fn_tree, on_device):
arr_dtype, arr, return_index, return_inverse, return_counts, axis = fn_inputs
helpers.test_frontend_function(
input_dtypes=arr_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ar=arr[0],
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
axis=axis,
)
# where
@handle_frontend_test(
fn_tree="jax.numpy.where",
broadcastables=_where_helper(),
only_cond=st.booleans(),
size=st.integers(min_value=1, max_value=20),
fill_value=st.one_of(st.integers(-10, 10), st.floats(-10, 10), st.booleans()),
)
def test_jax_where(
*,
broadcastables,
only_cond,
size,
fill_value,
frontend,
backend_fw,
fn_tree,
on_device,
test_flags,
):
cond, x1, x2, dtype = broadcastables
if only_cond:
x1, x2 = None, None
else:
size, fill_value = None, None
helpers.test_frontend_function(
input_dtypes=["bool", dtype],
fn_tree=fn_tree,
on_device=on_device,
test_flags=test_flags,
frontend=frontend,
backend_to_test=backend_fw,
condition=cond,
x=x1,
y=x2,
size=size,
fill_value=fill_value,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_searching_sorting.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_searching_sorting.py",
"repo_id": "ivy",
"token_count": 6813
} | 54 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import _diag_helper
# --- Helpers --- #
# --------------- #
@st.composite
def _diag_flat_helper(draw):
x_shape = draw(
helpers.get_shape(
min_num_dims=1, max_num_dims=2, min_dim_size=1, max_dim_size=10
)
)
dtype_and_x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=x_shape,
small_abs_safety_factor=2,
large_abs_safety_factor=2,
safety_factor_scale="log",
)
)
k = draw(helpers.ints(min_value=-5, max_value=5))
return dtype_and_x[0], dtype_and_x[1], k
# --- Main --- #
# ------------ #
# diag
@handle_frontend_test(
fn_tree="numpy.diag",
dtype_and_x_k=_diag_helper(),
)
def test_numpy_diag(
dtype_and_x_k,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, k = dtype_and_x_k
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
v=x[0],
k=k,
)
# diagflat
@handle_frontend_test(
fn_tree="numpy.diagflat",
dtype_and_x_k=_diag_flat_helper(),
)
def test_numpy_diagflat(
dtype_and_x_k,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x, k = dtype_and_x_k
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
v=x[0],
k=k,
)
# tri
@handle_frontend_test(
fn_tree="numpy.tri",
rows=helpers.ints(min_value=3, max_value=10),
cols=helpers.ints(min_value=3, max_value=10),
k=helpers.ints(min_value=-10, max_value=10),
dtype=helpers.get_dtypes("valid", full=False),
test_with_out=st.just(False),
)
def test_numpy_tri(
rows,
cols,
k,
dtype,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
N=rows,
M=cols,
k=k,
dtype=dtype[0],
)
# tril
@handle_frontend_test(
fn_tree="numpy.tril",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_numpy_tril(
dtype_and_x,
k,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
k=k,
)
# triu
@handle_frontend_test(
fn_tree="numpy.triu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
min_num_dims=2,
max_num_dims=5,
min_dim_size=1,
max_dim_size=5,
),
k=helpers.ints(min_value=-10, max_value=10),
test_with_out=st.just(False),
)
def test_numpy_triu(
dtype_and_x,
k,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
m=x[0],
k=k,
)
@handle_frontend_test(
fn_tree="numpy.vander",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.tuples(
helpers.ints(min_value=1, max_value=10),
),
large_abs_safety_factor=15,
small_abs_safety_factor=15,
safety_factor_scale="log",
),
N=st.integers(min_value=1, max_value=10) | st.none(),
increasing=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_vander(
*, fn_tree, dtype_and_x, N, increasing, test_flags, backend_fw, frontend, on_device
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
on_device=on_device,
frontend=frontend,
fn_tree=fn_tree,
x=x[0],
N=N,
increasing=increasing,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_building_matrices.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_creation_routines/test_building_matrices.py",
"repo_id": "ivy",
"token_count": 2650
} | 55 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler
import ivy.functional.frontends.numpy as np_frontend
# --- Helpers --- #
# --------------- #
@st.composite
def _helper_c_(draw):
dim = draw(st.integers(1, 3))
num_of_elems = draw(st.integers(1, 5))
elem_shape = draw(helpers.get_shape(min_num_dims=dim, max_num_dims=dim))
ret = []
if dim == 1:
start = draw(st.integers(min_value=-100, max_value=100))
step = draw(st.integers(1, 3))
stop = start + 1 + (tuple(elem_shape)[0] - 1) * step
elem = slice(start, stop, step)
ret.append(elem)
input_dtypes, x, casting, dtype = draw(
np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=elem_shape,
num_arrays=num_of_elems,
shared_dtype=True,
)
],
),
)
return x + ret
@st.composite
def _helper_r_(draw):
elems_in_last_dim = draw(st.integers(min_value=2, max_value=8))
num_of_elems = draw(st.integers(min_value=1, max_value=4))
dim = draw(st.one_of(st.just(1), st.integers(2, 4)))
first_elem_str = draw(st.booleans())
ret = []
if first_elem_str:
to_mat = draw(st.booleans())
if to_mat:
elem = draw(st.sampled_from(["c", "r"]))
dim = min(dim, 2)
else:
num = draw(st.integers(1, 3))
elem = ""
if num == 1:
elem += str(draw(st.integers(-1, dim - 1)))
elif num >= 2:
ndmin = draw(st.integers(1, 6))
elem += str(draw(st.integers(-1, ndmin - 1)))
elem += "," + str(ndmin)
if num == 3:
elem += "," + str(draw(st.integers(-1, ndmin - 1)))
ret.append(elem)
if "ndmin" not in locals():
ndmin = False
if dim == 1:
while num_of_elems > 0:
num_of_elems -= 1
elem_type = draw(st.sampled_from(["array", "slice"]))
if elem_type == "array":
if not ndmin:
shape = (draw(st.integers(1, 5)),)
else:
shape = (elems_in_last_dim,)
elem = draw(
helpers.array_values(
dtype=helpers.get_dtypes("valid"),
shape=shape,
)
)
if len(elem) == 1 and draw(st.booleans()):
elem = elem[0]
else:
start = draw(st.integers(min_value=-100, max_value=100))
step = draw(st.integers(1, 3))
if not ndmin:
stop = draw(st.integers(start + 1, start + 10))
else:
stop = start + 1 + (elems_in_last_dim - 1) * step
elem = slice(start, stop, step)
ret.append(elem)
else:
elem_shape = draw(helpers.get_shape(min_num_dims=dim, max_num_dims=dim))
input_dtypes, x, casting, dtype = draw(
np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=elem_shape,
num_arrays=num_of_elems,
shared_dtype=True,
)
],
),
)
ret += x
return ret, elems_in_last_dim, dim
# --- Main --- #
# ------------ #
@handle_frontend_test(fn_tree="numpy.add", inputs=_helper_c_()) # dummy fn_tree
def test_numpy_c_(inputs, backend_fw):
ret_gt = np.c_.__getitem__(tuple(inputs))
with BackendHandler.update_backend(backend_fw):
ret = np_frontend.c_.__getitem__(tuple(inputs))
if isinstance(inputs[0], str) and inputs[0] in ["r", "c"]:
ret = ret._data
else:
ret = ret.ivy_array
assert np.allclose(ret, ret_gt)
@handle_frontend_test(
fn_tree="numpy.fill_diagonal",
dtype_x_axis=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=2,
min_dim_size=2,
max_num_dims=2,
),
val=helpers.floats(min_value=-10, max_value=10),
wrap=helpers.get_dtypes(kind="bool"),
test_with_out=st.just(False),
)
def test_numpy_fill_diagonal(
dtype_x_axis,
wrap,
val,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_x_axis
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtype,
on_device=on_device,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
a=x[0],
val=val,
wrap=wrap,
)
@handle_frontend_test(fn_tree="numpy.add", inputs=_helper_r_()) # dummy fn_tree
def test_numpy_r_(inputs, backend_fw):
inputs, elems_in_last_dim, dim = inputs
ret_gt = np.r_.__getitem__(tuple(inputs))
with BackendHandler.update_backend(backend_fw):
ret = np_frontend.r_.__getitem__(tuple(inputs))
if isinstance(inputs[0], str) and inputs[0] in ["r", "c"]:
ret = ret._data
else:
ret = ret.ivy_array
assert np.allclose(ret, ret_gt)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_inserting_data_into_arrays.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_indexing_routines/test_inserting_data_into_arrays.py",
"repo_id": "ivy",
"token_count": 3054
} | 56 |
# global
from hypothesis import strategies as st, assume
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_manipulation import ( # noqa
_get_splits,
)
# array_split
@handle_frontend_test(
fn_tree="numpy.array_split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=1, allow_none=False, is_mod_split=True
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
test_with_out=st.just(False),
)
def test_numpy_array_split(
*,
dtype_value,
indices_or_sections,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
assume(isinstance(indices_or_sections, int))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
axis=axis,
)
# dsplit
@handle_frontend_test(
fn_tree="numpy.dsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=3), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=3, axis=2, allow_none=False, is_mod_split=True
),
test_with_out=st.just(False),
)
def test_numpy_dsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
if isinstance(indices_or_sections, np.ndarray):
assume(indices_or_sections.ndim == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
)
# hsplit
@handle_frontend_test(
fn_tree="numpy.hsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=2, axis=1, allow_none=False, is_mod_split=True
),
test_with_out=st.just(False),
)
def test_numpy_hsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
if isinstance(indices_or_sections, np.ndarray):
assume(indices_or_sections.ndim == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
)
# split
@handle_frontend_test(
fn_tree="numpy.split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=1, allow_none=False, is_mod_split=True
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
test_with_out=st.just(False),
)
def test_numpy_split(
*,
dtype_value,
indices_or_sections,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
axis=axis,
)
# vsplit
@handle_frontend_test(
fn_tree="numpy.vsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=2, axis=0, allow_none=False, is_mod_split=True
),
test_with_out=st.just(False),
)
def test_numpy_vsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
if isinstance(indices_or_sections, np.ndarray):
assume(indices_or_sections.ndim == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
ary=value[0],
indices_or_sections=indices_or_sections,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_splitting_arrays.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_splitting_arrays.py",
"repo_id": "ivy",
"token_count": 2641
} | 57 |
# global
from hypothesis import strategies as st
# local
from ivy_tests.test_ivy.test_functional.test_core.test_statistical import (
_statistical_dtype_values,
)
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# nanpercentile
@handle_frontend_test(
fn_tree="numpy.nanpercentile",
dtype_values_axis=_statistical_dtype_values(function="nanpercentile"),
where=np_frontend_helpers.where(),
keep_dims=st.booleans(),
)
def test_numpy_nanpercentile(
dtype_values_axis,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
keep_dims,
):
input_dtypes, values, axis = dtype_values_axis
if isinstance(axis, tuple):
axis = axis[0]
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
a=values[0][0],
q=values[0][1],
axis=axis,
out=None,
backend_to_test=backend_fw,
overwrite_input=None,
method=None,
keepdims=keep_dims,
interpolation=None,
frontend=frontend,
fn_tree=fn_tree,
test_flags=test_flags,
input_dtypes=input_dtypes,
)
# ptp
@handle_frontend_test(
fn_tree="numpy.ptp",
dtype_values_axis=_statistical_dtype_values(function="ptp"),
keep_dims=st.booleans(),
test_with_out=st.just(False),
)
def test_numpy_ptp(
dtype_values_axis,
frontend,
backend_fw,
test_flags,
fn_tree,
keep_dims,
):
input_dtypes, values, axis = dtype_values_axis
if isinstance(axis, tuple):
axis = axis[0]
helpers.test_frontend_function(
a=values[0],
axis=axis,
out=None,
keepdims=keep_dims,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
test_flags=test_flags,
input_dtypes=input_dtypes,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_order_statistics.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_order_statistics.py",
"repo_id": "ivy",
"token_count": 990
} | 58 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa
_get_dtype_values_k_axes_for_rot90,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _arrays_dim_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = 2
common_shape = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_dims - 1,
max_size=num_dims - 1,
)
)
_dim = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_arrays,
max_size=num_arrays,
)
)
min_dim = min(unique_dims)
max_dim = max(unique_dims)
_idx = draw(
helpers.array_values(
shape=min_dim,
dtype="int64",
min_value=0,
max_value=max_dim,
exclude_min=False,
)
)
xs = []
# available_input_types = draw(helpers.get_dtypes("integer"))
# available_input_types = ["int32", "int64", "float16", "float32", "float64"]
available_input_types = ["int32", "int64"]
input_dtypes = draw(
helpers.array_dtypes(
available_dtypes=available_input_types,
num_arrays=num_arrays,
shared_dtype=True,
)
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:_dim] + [ud] + common_shape[_dim:],
dtype=dt,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
)
)
xs.append(x)
return xs, input_dtypes, _dim, _idx
@st.composite
def dtypes_x_reshape_(draw):
shape = draw(helpers.get_shape(min_num_dims=1))
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
)
)
return dtypes, x, shape
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="paddle.tensor.manipulation.index_add_",
xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes(),
)
def test_paddle_index_add_(
*,
xs_dtypes_dim_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, axis, indices = xs_dtypes_dim_idx
if xs[0].shape[axis] < xs[1].shape[axis]:
source, input = xs
else:
input, source = xs
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
x=input,
index=indices,
axis=axis,
value=source,
)
# reshape_
@handle_frontend_test(
fn_tree="paddle.tensor.manipulation.reshape_",
dtypes_x_reshape=dtypes_x_reshape_(),
)
def test_paddle_reshape_(
*,
dtypes_x_reshape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, shape = dtypes_x_reshape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
shape=shape,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_manipulation.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_manipulation.py",
"repo_id": "ivy",
"token_count": 1877
} | 59 |
# import tensorflow
from ivy_tests.test_ivy.test_frontends import NativeClass
tensorflow_classes_to_ivy_classes = {}
def convtensor(argument):
"""Convert NativeClass in argument to ivy frontend counterpart for
tensorflow."""
if isinstance(argument, NativeClass):
return tensorflow_classes_to_ivy_classes.get(argument._native_class)
return argument
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/__init__.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/__init__.py",
"repo_id": "ivy",
"token_count": 126
} | 60 |
# global
import numpy as np
from hypothesis import strategies as st
import ivy
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _binary_focal_args(draw):
shape = st.tuples(st.integers(1, 10), st.integers(1, 10), st.integers(1, 10))
common_float_dtype = helpers.get_dtypes("float", full=False)
from_logits = draw(
helpers.dtype_and_values(
available_dtypes=draw(helpers.get_dtypes("bool")), shape=(1,)
)
)
if from_logits[0]:
min_value = -10.0
max_value = 10.0
else:
min_value = 0.0
max_value = 1.0
dtype_y_true = draw(
helpers.dtype_and_values(
available_dtypes=draw(helpers.get_dtypes("integer")),
min_value=0,
max_value=2,
exclude_max=True,
shape=draw(st.shared(shape, key="shape")),
)
)
dtype_y_pred = draw(
helpers.dtype_and_values(
dtype=draw(st.shared(common_float_dtype, key="float_dtype")),
min_value=min_value,
max_value=max_value,
shape=draw(st.shared(shape, key="shape")),
)
)
dtype_label_smoothing = draw(
helpers.dtype_and_values(
dtype=draw(st.shared(common_float_dtype, key="float_dtype")),
min_value=0.0,
max_value=1.0,
exclude_min=False,
exclude_max=False,
shape=(1,),
)
)
dtype_gamma = draw(
helpers.dtype_and_values(
dtype=draw(st.shared(common_float_dtype, key="float_dtype")),
min_value=0.0,
max_value=10.0,
shape=(1,),
)
)
# attr = Tidx:type, default = DT_INT32, allowed = [DT_INT32, DT_INT64] > [Op:Mean]
dtype_axis = draw(
helpers.dtype_and_values(
available_dtypes=[ivy.int32, ivy.int64],
min_value=-len(draw(st.shared(shape, key="shape"))),
max_value=len(draw(st.shared(shape, key="shape"))),
shape=(1,),
)
)
dtype_true, y_true = dtype_y_true
dtype_pred, y_pred = dtype_y_pred
dtype_gamma, gamma = dtype_gamma
dtype_from_logits, from_logits = from_logits
dtype_label_smoothing, label_smoothing = dtype_label_smoothing
dtype_axis, axis = dtype_axis
dtypes = [
dtype_true[0],
dtype_pred[0],
dtype_gamma[0],
dtype_from_logits[0],
dtype_label_smoothing[0],
dtype_axis[0],
]
values = [
y_true[0],
y_pred[0],
gamma[0],
from_logits[0],
label_smoothing[0],
axis[0],
]
return dtypes, values
@st.composite
def _dtype_pred_and_labels(
draw,
*,
dtype=None,
available_dtypes=helpers.get_dtypes("numeric"),
min_pred_val=0,
max_pred_val=1, # predication array output as probabilities
label_set=None,
min_label_val=0,
max_label_val=None,
allow_inf=False,
allow_nan=False,
exclude_min=False,
exclude_max=False,
sparse_label=False,
min_num_dims=0,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
shape=None,
):
if isinstance(min_dim_size, st._internal.SearchStrategy):
min_dim_size = draw(min_dim_size)
if isinstance(max_dim_size, st._internal.SearchStrategy):
max_dim_size = draw(max_dim_size)
if isinstance(available_dtypes, st._internal.SearchStrategy):
available_dtypes = draw(available_dtypes)
if dtype is None:
assert available_dtypes is not None, "Unspecified dtype or available_dtypes."
dtype = draw(
helpers.array_dtypes(
num_arrays=1,
available_dtypes=available_dtypes,
)
)
dtype.append("int32")
# initialize shapes for pred and label
if shape is not None:
if not isinstance(shape, (tuple, list)):
shape = draw(shape)
else:
shape = draw(
st.shared(
helpers.get_shape(
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
),
key="shape",
)
)
if not sparse_label:
label_shape = shape
else:
label_shape = shape[:-1]
pred = draw(
helpers.array_values(
dtype=dtype[0],
shape=shape,
min_value=min_pred_val,
max_value=max_pred_val,
allow_inf=allow_inf,
allow_nan=allow_nan,
exclude_min=exclude_min,
exclude_max=exclude_max,
)
)
# generate labels by restriction
if label_set is not None:
length = 1
for _ in label_shape:
length *= _
indices = draw(
helpers.list_of_size(
x=st.integers(min_value=0, max_value=len(label_set) - 1),
size=length,
)
)
values = [label_set[_] for _ in indices]
array = np.array(values)
labels = array.reshape(label_shape).tolist()
else:
labels = draw(
helpers.array_values(
dtype=dtype[1],
shape=label_shape,
min_value=min_label_val,
max_value=max_label_val,
allow_inf=allow_inf,
allow_nan=allow_nan,
exclude_min=exclude_min,
exclude_max=exclude_max,
)
)
return dtype, pred, labels
# --- Main --- #
# ------------ #
# binary_accuracy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.binary_accuracy",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
),
threshold=st.floats(min_value=0.0, max_value=1.0),
test_with_out=st.just(False),
)
def test_tensorflow_binary_accuracy(
*,
dtype_and_x,
threshold,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
threshold=threshold,
)
# binary_crossentropy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.binary_crossentropy",
dtype_pred_and_labels=_dtype_pred_and_labels(
available_dtypes=helpers.get_dtypes("float"),
min_pred_val=1e-6,
max_label_val=5,
min_dim_size=1,
min_num_dims=1,
),
from_logits=st.booleans(),
label_smoothing=helpers.floats(min_value=0.0, max_value=1.0),
test_with_out=st.just(False),
)
def test_tensorflow_binary_crossentropy(
*,
dtype_pred_and_labels,
from_logits,
label_smoothing,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, y_pred, y_true = dtype_pred_and_labels
helpers.test_frontend_function(
input_dtypes=input_dtype[::-1],
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-1,
atol=1e-1,
y_true=y_true,
y_pred=y_pred,
from_logits=from_logits,
label_smoothing=label_smoothing,
)
# binary_focal_crossentropy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.binary_focal_crossentropy",
binary_focal_args=_binary_focal_args(),
test_with_out=st.just(False),
)
def test_tensorflow_binary_focal_crossentropy(
*,
binary_focal_args,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
dtypes, values = binary_focal_args
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=values[0],
y_pred=values[1],
gamma=values[2],
from_logits=values[3],
label_smoothing=values[4],
axis=values[5],
)
# categorical_accuracy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.categorical_accuracy",
dtype_and_y=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
shape=helpers.get_shape(
allow_none=False,
min_num_dims=1,
),
),
test_with_out=st.just(False),
)
def test_tensorflow_categorical_accuracy(
*,
dtype_and_y,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, y = dtype_and_y
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=y[0],
y_pred=y[1],
)
# categorical_crossentropy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.categorical_crossentropy",
y_true=st.lists(
st.integers(min_value=0, max_value=4), min_size=1, max_size=1
), # ToDo: we should be using the helpers
dtype_y_pred=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=(5,),
min_value=-10,
max_value=10,
),
from_logits=st.booleans(),
label_smoothing=helpers.floats(min_value=0.0, max_value=1.0),
test_with_out=st.just(False),
)
def test_tensorflow_categorical_crossentropy(
*,
y_true,
dtype_y_pred,
from_logits,
label_smoothing,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
y_true = ivy.array(y_true, dtype=ivy.float32)
dtype, y_pred = dtype_y_pred
# Perform softmax on prediction if it's not a probability distribution.
if not from_logits:
y_pred = ivy.exp(y_pred) / ivy.sum(ivy.exp(y_pred))
helpers.test_frontend_function(
input_dtypes=[ivy.float32, dtype],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=y_true,
y_pred=y_pred,
from_logits=from_logits,
label_smoothing=label_smoothing,
)
# Cosine Similarity
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.cosine_similarity",
d_type=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2
),
y_true=helpers.array_values(
dtype=ivy.int32, shape=(1, 5), min_value=1, max_value=5
),
y_pred=helpers.array_values(
dtype=ivy.int32, shape=(1, 5), min_value=5, max_value=10
),
test_with_out=st.just(False),
)
def test_tensorflow_cosine_similarity(
*,
d_type,
y_true,
y_pred,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
helpers.test_frontend_function(
input_dtypes=d_type[0],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=y_true,
y_pred=y_pred,
)
# hinge
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.hinge",
dtype_pred_and_labels=_dtype_pred_and_labels(
available_dtypes=helpers.get_dtypes("float"),
label_set=[-1, 1],
min_num_dims=2,
min_dim_size=2,
),
test_with_out=st.just(False),
)
def test_tensorflow_hinge(
*,
dtype_pred_and_labels,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, y_pred, y_true = dtype_pred_and_labels
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_pred=y_pred,
y_true=y_true,
)
# kl_divergence
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.kl_divergence",
aliases=["tensorflow.keras.metrics.kld"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
),
)
def test_tensorflow_kl_divergence(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# log_cosh
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.log_cosh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=False,
min_num_dims=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
test_with_out=st.just(False),
)
def test_tensorflow_log_cosh(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# mean_absolute_error
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.mean_absolute_error",
aliases=["tensorflow.keras.metrics.mae"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
)
def test_tensorflow_mean_absolute_error(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# mean_absolute_percentage_error
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.mean_absolute_percentage_error",
aliases=["tensorflow.keras.metrics.mape"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
)
def test_tensorflow_mean_absolute_percentage_error(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# mean_squared_error
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.mean_squared_error",
aliases=["tensorflow.keras.metrics.mse"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
)
def test_tensorflow_mean_squared_error(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# mean_squared_logarithmic_error
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.mean_squared_logarithmic_error",
aliases=["tensorflow.keras.metrics.msle"],
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_num_dims=1,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_tensorflow_metrics_mean_squared_logarithmic_error(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# poisson
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.poisson",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
shared_dtype=True,
min_num_dims=1,
),
test_with_out=st.just(False),
)
def test_tensorflow_poisson(
*,
dtype_and_x,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=x[0],
y_pred=x[1],
)
# sparse_categorical_crossentropy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.sparse_categorical_crossentropy",
y_true=st.lists(st.integers(min_value=0, max_value=4), min_size=1, max_size=1),
dtype_y_pred=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=(5,),
min_value=-10,
max_value=10,
),
from_logits=st.booleans(),
test_with_out=st.just(False),
)
def test_tensorflow_sparse_categorical_crossentropy(
*,
y_true,
dtype_y_pred,
from_logits,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
y_true = ivy.array(y_true, dtype=ivy.int32)
dtype, y_pred = dtype_y_pred
y_pred = y_pred[0]
# Perform softmax on prediction if it's not a probability distribution.
if not from_logits:
y_pred = ivy.exp(y_pred) / ivy.sum(ivy.exp(y_pred))
helpers.test_frontend_function(
input_dtypes=[ivy.int32] + dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=y_true,
y_pred=y_pred[0],
from_logits=from_logits,
)
# sparse_top_k_categorical_accuracy
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.sparse_top_k_categorical_accuracy",
dtype_pred_and_labels=_dtype_pred_and_labels(
available_dtypes=helpers.get_dtypes("float"),
min_pred_val=1e-6,
max_label_val=5,
sparse_label=True,
shape=(5, 10),
),
k=st.integers(min_value=3, max_value=10),
test_with_out=st.just(False),
)
def test_tensorflow_sparse_top_k_categorical_accuracy(
*,
dtype_pred_and_labels,
k,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, y_pred, y_true = dtype_pred_and_labels
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_true=y_true,
y_pred=y_pred,
k=k,
)
# squared_hinge
@handle_frontend_test(
fn_tree="tensorflow.keras.metrics.squared_hinge",
dtype_pred_and_labels=_dtype_pred_and_labels(
available_dtypes=helpers.get_dtypes("float"),
label_set=[-1, 1],
min_num_dims=2,
min_dim_size=2,
),
test_with_out=st.just(False),
)
def test_tensorflow_squared_hinge(
*,
dtype_pred_and_labels,
frontend,
test_flags,
fn_tree,
on_device,
backend_fw,
):
input_dtype, y_pred, y_true = dtype_pred_and_labels
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y_pred=y_pred,
y_true=y_true,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_keras/test_metrics.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_keras/test_metrics.py",
"repo_id": "ivy",
"token_count": 10873
} | 61 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "torch"
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/conftest.py",
"repo_id": "ivy",
"token_count": 31
} | 62 |
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
@st.composite
def _x_and_linear(draw, dtypes):
dtype = draw(dtypes)
in_features = draw(helpers.ints(min_value=1, max_value=2))
out_features = draw(helpers.ints(min_value=1, max_value=2))
x_shape = (
1,
1,
in_features,
)
weight_shape = (out_features, in_features)
bias_shape = (out_features,)
x = draw(
helpers.array_values(dtype=dtype[0], shape=x_shape, min_value=0, max_value=1)
)
weight = draw(
helpers.array_values(
dtype=dtype[0], shape=weight_shape, min_value=0, max_value=1
)
)
bias = draw(
helpers.array_values(dtype=dtype[0], shape=bias_shape, min_value=0, max_value=1)
)
return dtype, x, weight, bias
# --- Main --- #
# ------------ #
# linear
@handle_frontend_test(
fn_tree="torch.nn.functional.linear",
dtype_x_weight_bias=_x_and_linear(
dtypes=helpers.get_dtypes("float", full=False),
),
)
def test_torch_linear(
*,
dtype_x_weight_bias,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, weight, bias = dtype_x_weight_bias
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
weight=weight,
bias=bias,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_linear_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_linear_functions.py",
"repo_id": "ivy",
"token_count": 760
} | 63 |
import pytest
@pytest.fixture(scope="session")
def frontend():
return "torchvision"
| ivy/ivy_tests/test_ivy/test_frontends/test_torchvision/conftest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torchvision/conftest.py",
"repo_id": "ivy",
"token_count": 32
} | 64 |
"""Collection of tests for unified general functions."""
# global
import copy
import warnings
import pytest
import numpy as np
# local
import ivy
# --- Helpers --- #
# --------------- #
def _mnai(n, idx, fn):
if len(idx) == 1:
n[idx[0]] = fn(n[idx[0]])
else:
_mnai(n[idx[0]], idx[1:], fn)
def _pnai(n, idx):
if len(idx) == 1:
del n[idx[0]]
else:
_pnai(n[idx[0]], idx[1:])
def _snai(n, idx, v):
if len(idx) == 1:
n[idx[0]] = v
else:
_snai(n[idx[0]], idx[1:], v)
# --- Main --- #
# ------------ #
# only checking for dicts but can test other nested functions using
# collections.abc.Sequences/Mapping/Iterable
def apply_fn_to_list(item, fun):
if isinstance(item, list):
return [apply_fn_to_list(x, fun) for x in item]
else:
return fun(item)
def map_nested_dicts(ob, func):
for k, v in ob.items():
if isinstance(v, dict):
map_nested_dicts(v, func)
else:
ob[k] = apply_fn_to_list(v, func)
# all_nested_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_all_nested_indices(nest):
indices = ivy.all_nested_indices(nest)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 1, 0]
assert indices[2] == ["b", "c", 0, 0, 0]
assert indices[3] == ["b", "c", 0, 1, 0]
assert indices[4] == ["b", "c", 1, 0, 0]
assert indices[5] == ["b", "c", 1, 1, 0]
# all_nested_indices_w_nest_checks
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_all_nested_indices_w_nest_checks(nest):
indices = ivy.all_nested_indices(nest, True)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 0]
assert indices[2] == ["a", 1, 0]
assert indices[3] == ["a", 1]
assert indices[4] == ["a"]
assert indices[5] == ["b", "c", 0, 0, 0]
assert indices[6] == ["b", "c", 0, 0]
assert indices[7] == ["b", "c", 0, 1, 0]
assert indices[8] == ["b", "c", 0, 1]
assert indices[9] == ["b", "c", 0]
assert indices[10] == ["b", "c", 1, 0, 0]
assert indices[11] == ["b", "c", 1, 0]
assert indices[12] == ["b", "c", 1, 1, 0]
assert indices[13] == ["b", "c", 1, 1]
assert indices[14] == ["b", "c", 1]
assert indices[15] == ["b", "c"]
assert indices[16] == ["b"]
# copy_nest
def test_copy_nest():
nest = {
"a": [ivy.array([0]), ivy.array([1])],
"b": {"c": [ivy.array([[2], [4]]), ivy.array([[6], [8]])]},
}
nest_copy = ivy.copy_nest(nest)
# copied nests
assert nest["a"] is not nest_copy["a"]
assert nest["b"] is not nest_copy["b"]
assert nest["b"]["c"] is not nest_copy["b"]["c"]
# non-copied arrays
assert nest["a"][0] is nest_copy["a"][0]
assert nest["a"][1] is nest_copy["a"][1]
assert nest["b"]["c"][0] is nest_copy["b"]["c"][0]
assert nest["b"]["c"][1] is nest_copy["b"]["c"][1]
from collections import namedtuple
NAMEDTUPLE = namedtuple("OutNamedTuple", ["x", "y"])
nest = NAMEDTUPLE(x=ivy.array([1.0]), y=ivy.array([2.0]))
copied_nest = ivy.copy_nest(nest, include_derived=True)
assert isinstance(copied_nest, NAMEDTUPLE)
# duplicate_array_index_chains
@pytest.mark.parametrize("x", [[-1.0]])
@pytest.mark.parametrize("y", [[1.0]])
@pytest.mark.parametrize(
"nest", [[{"a": None, "b": {"c": None, "d": None}}, [None, None]]]
)
def test_duplicate_array_index_chains(nest, x, y):
x = ivy.array(x)
y = ivy.array(y)
nest[0]["a"] = nest[0]["b"]["d"] = nest[1][0] = x
nest[0]["b"]["c"] = nest[1][1] = y
duplicate_index_chains = ivy.duplicate_array_index_chains(nest)
assert duplicate_index_chains[0] == [[0, "a"], [0, "b", "d"], [1, 0]]
assert duplicate_index_chains[1] == [[0, "b", "c"], [1, 1]]
# Tests #
# ------#
# index_nest
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": (((2,), (4,)), ((6,), (8,)))}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
def test_index_nest(nest, index):
ret = ivy.index_nest(nest, index)
true_ret = nest
for i in index:
true_ret = true_ret[i]
assert ret == true_ret
# insert_into_nest_at_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize("indices", [(("a", 0, 0), ("b", "c", 1, 0))])
@pytest.mark.parametrize("values", [(1, 2)])
def test_insert_into_nest_at_indices(nest, indices, values):
ivy.insert_into_nest_at_indices(nest, indices, values)
def indices_nest(nest, indices):
ret = tuple(ivy.index_nest(nest, index) for index in indices)
return ret
assert indices_nest(nest, indices) == values
# insert_into_nest_at_index
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize("index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0)])
@pytest.mark.parametrize("value", [1])
def test_insert_into_nest_index(nest, index, value):
ivy.insert_into_nest_at_index(nest, index, value)
assert ivy.index_nest(nest, index) == value
# map_nest_at_index
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0, 0, 0), ("b", "c", 1, 0, 0)]
)
@pytest.mark.parametrize("fn", [lambda x: x + 2])
@pytest.mark.parametrize("shallow", [True, False])
def test_map_nest_at_index(nest, index, fn, shallow):
nest_copy = copy.deepcopy(nest)
result = ivy.map_nest_at_index(nest, index, fn, shallow=shallow)
_mnai(nest_copy, index, fn)
assert result == nest_copy
if shallow:
assert nest == nest_copy
else:
assert nest != nest_copy
# map_nest_at_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0, 0, 0), ("b", "c", 1, 0, 0))]
)
@pytest.mark.parametrize("fn", [lambda x: x + 2, lambda x: x**2])
@pytest.mark.parametrize("shallow", [True, False])
def test_map_nest_at_indices(nest, indices, fn, shallow):
nest_copy = copy.deepcopy(nest)
result = ivy.map_nest_at_indices(nest, indices, fn, shallow)
def mnais(n, idxs, vs):
[_mnai(n, index, vs) for index in idxs]
mnais(nest_copy, indices, fn)
assert result == nest_copy
if shallow:
assert nest == nest_copy
else:
assert nest != nest_copy
# multi_index_nest
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": (((2,), (4,)), ((6,), (8,)))}}]
)
@pytest.mark.parametrize(
"multi_indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0), ("b", "c", 1, 0))]
)
def test_multi_index_nest(nest, multi_indices):
rets = ivy.multi_index_nest(nest, multi_indices)
true_rets = []
for indices in multi_indices:
true_ret = nest
for i in indices:
true_ret = true_ret[i]
true_rets.append(true_ret)
assert rets == true_rets
# nested_any
@pytest.mark.parametrize("x", [{"a": [[0, 1], [2, 3]], "b": {"c": [[0], [1]]}}])
@pytest.mark.parametrize("fn", [lambda x: True if x % 2 == 0 else False])
def test_nested_any(x, fn):
x_copy = copy.deepcopy(x)
x_bool = ivy.nested_any(x, fn)
map_nested_dicts(x_copy, fn)
def is_true_any(ob):
for v in ob.values():
if isinstance(v, dict):
is_true_any(v)
if isinstance(v, list):
for i, item in enumerate(v):
return item.count(True) == 1
x_copy_bool = is_true_any(x_copy)
assert x_copy_bool == x_bool
# nested_argwhere
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_nested_argwhere(nest):
indices = ivy.nested_argwhere(nest, lambda x: x < 5)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 1, 0]
assert indices[2] == ["b", "c", 0, 0, 0]
assert indices[3] == ["b", "c", 0, 1, 0]
# nested_argwhere_w_nest_checks
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
def test_nested_argwhere_w_nest_checks(nest):
indices = ivy.nested_argwhere(
nest, lambda x: isinstance(x, list) or (isinstance(x, int) and x < 5), True
)
assert indices[0] == ["a", 0, 0]
assert indices[1] == ["a", 0]
assert indices[2] == ["a", 1, 0]
assert indices[3] == ["a", 1]
assert indices[4] == ["a"]
assert indices[5] == ["b", "c", 0, 0, 0]
assert indices[6] == ["b", "c", 0, 0]
assert indices[7] == ["b", "c", 0, 1, 0]
assert indices[8] == ["b", "c", 0, 1]
assert indices[9] == ["b", "c", 0]
assert indices[10] == ["b", "c", 1, 0]
assert indices[11] == ["b", "c", 1, 1]
assert indices[12] == ["b", "c", 1]
assert indices[13] == ["b", "c"]
# nested_map
@pytest.mark.parametrize("x", [{"a": [[0, 1], [2, 3]], "b": {"c": [[0], [1]]}}])
@pytest.mark.parametrize("fn", [lambda x: x**2])
@pytest.mark.parametrize("shallow", [True, False])
def test_nested_map(x, fn, shallow):
x_copy = copy.deepcopy(x)
result = ivy.nested_map(fn, x, shallow=shallow)
map_nested_dicts(x_copy, fn)
assert result == x_copy
if shallow:
assert x == x_copy
else:
assert x != x_copy
# nested_multi_map
@pytest.mark.parametrize("func", [lambda x, _: x[0] - x[1]])
@pytest.mark.parametrize(
"nests",
[
[
np.asarray([-1.82, 1.25, -2.91, 0.109, 0.76, 1.7, 0.231, 4.45]),
np.asarray([-3.98, -3.86, 7.94, 2.08, 9.3, 2.35, 9.37, 1.7]),
]
],
)
def test_nested_multi_map(func, nests):
nests = ivy.nested_map(
lambda x: ivy.array(x) if isinstance(x, np.ndarray) else x,
nests,
include_derived=True,
shallow=False,
)
# without index_chains specification
nested_multi_map_res = ivy.nested_multi_map(func, nests)
# modify this to test for other functions
nests_without_multi_map_res = nests[0] - nests[1]
assert ivy.all_equal(nested_multi_map_res, nests_without_multi_map_res)
# prune_empty
@pytest.mark.parametrize("nest", [{"a": [{}, {}], "b": {"c": [1], "d": []}}])
def test_prune_empty(nest):
ret = ivy.prune_empty(ivy.copy_nest(nest))
assert ret == {"b": {"c": [1]}}
# prune_nest_at_index
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
def test_prune_nest_at_index(nest, index):
nest_copy = copy.deepcopy(nest)
# handling cases where there is nothing to prune
try:
ivy.prune_nest_at_index(nest, index)
_pnai(nest_copy, index)
except Exception:
warnings.warn("Nothing to delete.")
assert nest == nest_copy
# prune_nest_at_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize("indices", [(("a", 0), ("a", 0, 0), ("a", 1), ("b", "c", 0))])
def test_prune_nest_at_indices(nest, indices):
nest_copy = copy.deepcopy(nest)
ivy.prune_nest_at_indices(nest_copy, indices)
print(nest_copy)
for idx in indices:
try:
ele_org = ivy.index_nest(nest, idx)
ele_new = ivy.index_nest(nest_copy, idx)
except ivy.utils.exceptions.IvyIndexError:
return
else:
assert ele_org != ele_new
# set_nest_at_index
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"index", [("a", 0, 0), ("a", 1, 0), ("b", "c", 0), ("b", "c", 1, 0)]
)
@pytest.mark.parametrize("value", [-1])
@pytest.mark.parametrize("shallow", [True, False])
def test_set_nest_at_index(nest, index, value, shallow):
nest_copy = copy.deepcopy(nest)
result = ivy.set_nest_at_index(nest, index, value, shallow=shallow)
_snai(nest_copy, index, value)
assert result == nest_copy
if shallow:
assert nest == nest_copy
else:
assert nest != nest_copy
# set_nest_at_indices
@pytest.mark.parametrize(
"nest", [{"a": [[0], [1]], "b": {"c": [[[2], [4]], [[6], [8]]]}}]
)
@pytest.mark.parametrize(
"indices", [(("a", 0, 0), ("a", 1, 0)), (("b", "c", 0), ("b", "c", 1, 0))]
)
@pytest.mark.parametrize("values", [(1, 2)])
@pytest.mark.parametrize("shallow", [False, True])
def test_set_nest_at_indices(nest, indices, values, shallow):
nest_copy = copy.deepcopy(nest)
result = ivy.set_nest_at_indices(nest, indices, values, shallow=shallow)
def snais(n, idxs, vs):
[_snai(n, index, value) for index, value in zip(idxs, vs)]
snais(nest_copy, indices, values)
assert result == nest_copy
if shallow:
assert nest == nest_copy
else:
assert nest != nest_copy
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_nest.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_nest.py",
"repo_id": "ivy",
"token_count": 6272
} | 65 |
"""Collection of tests for unified neural network activation functions."""
# global
from hypothesis import strategies as st, assume
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# gelu
@handle_test(
fn_tree="functional.ivy.gelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-1e4,
max_value=1e4,
),
approximate=st.booleans(),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
)
def test_gelu(
*,
dtype_and_x,
approximate,
complex_mode,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
atol_=1e-2,
rtol_=1e-2,
x=x[0],
approximate=approximate,
complex_mode=complex_mode,
)
# hardswish
@handle_test(
fn_tree="functional.ivy.hardswish",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
)
def test_hardswish(
*,
dtype_and_x,
complex_mode,
test_flags,
backend_fw,
fn_name,
on_device,
):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
x=x[0],
complex_mode=complex_mode,
)
# leaky_relu
@handle_test(
fn_tree="functional.ivy.leaky_relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes(
"float_and_complex", full=False, key="leaky_relu"
),
large_abs_safety_factor=16,
small_abs_safety_factor=16,
safety_factor_scale="log",
),
alpha=st.floats(min_value=-1e-4, max_value=1e-4),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
)
def test_leaky_relu(
*, dtype_and_x, alpha, complex_mode, test_flags, backend_fw, fn_name, on_device
):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-2,
atol_=1e-2,
x=x[0],
alpha=alpha,
complex_mode=complex_mode,
)
# log_softmax
@handle_test(
fn_tree="functional.ivy.log_softmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=2,
large_abs_safety_factor=12,
small_abs_safety_factor=12,
safety_factor_scale="log",
min_value=-2,
),
axis=helpers.ints(min_value=-1, max_value=0),
)
def test_log_softmax(*, dtype_and_x, axis, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x=x[0],
axis=axis,
)
# mish
@handle_test(
fn_tree="functional.ivy.mish",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=25,
small_abs_safety_factor=25,
min_dim_size=2,
safety_factor_scale="log",
),
ground_truth_backend="jax",
)
def test_mish(*, dtype_and_x, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x=x[0],
)
# relu
@handle_test(
fn_tree="functional.ivy.relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
)
def test_relu(*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
x=x[0],
complex_mode=complex_mode,
)
# sigmoid
@handle_test(
fn_tree="functional.ivy.sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
ground_truth_backend="jax",
)
def test_sigmoid(
*, dtype_and_x, complex_mode, test_flags, backend_fw, fn_name, on_device
):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
atol_=1e-02,
rtol_=1e-02,
x=x[0],
complex_mode=complex_mode,
)
# softmax
@handle_test(
fn_tree="functional.ivy.softmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=1,
large_abs_safety_factor=8,
small_abs_safety_factor=4,
safety_factor_scale="log",
),
axis=st.one_of(
helpers.ints(min_value=-1, max_value=0),
st.none(),
),
)
def test_softmax(*, dtype_and_x, axis, test_flags, backend_fw, fn_name, on_device):
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x=x[0],
axis=axis,
)
# softplus
@handle_test(
fn_tree="functional.ivy.softplus",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
),
beta=st.one_of(helpers.number(min_value=0.1, max_value=10), st.none()),
threshold=st.one_of(helpers.number(min_value=0.1, max_value=30), st.none()),
complex_mode=st.sampled_from(["jax", "split", "magnitude"]),
)
def test_softplus(
*,
dtype_and_x,
beta,
threshold,
complex_mode,
test_flags,
backend_fw,
fn_name,
on_device,
):
assume(beta != 0)
assume(threshold != 0)
dtype, x = dtype_and_x
helpers.test_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_name=fn_name,
on_device=on_device,
rtol_=1e-02,
atol_=1e-02,
x=x[0],
beta=beta,
threshold=threshold,
complex_mode=complex_mode,
)
| ivy/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py",
"repo_id": "ivy",
"token_count": 3777
} | 66 |
import ivy
import numpy as np
import pytest
# These tests have been adapetd from Tensorly
# https://github.com/tensorly/tensorly/blob/main/tensorly/tests/test_tt_tensor.py
@pytest.mark.parametrize("n_pad", [1, 2])
def test_pad_tt_rank(n_pad):
rank = (1, 2, 2, 2, 1)
tt = ivy.random_tt((4, 3, 5, 2), rank)
padded_tt = ivy.TTTensor(
ivy.TTTensor.pad_tt_rank(tt, n_padding=n_pad, pad_boundaries=False)
)
rec = tt.to_tensor()
rec_padded = padded_tt.to_tensor()
np.testing.assert_array_almost_equal(rec, rec_padded, decimal=4)
np.testing.assert_(padded_tt.rank == (1, *[i + n_pad for i in rank[1:-1]], 1))
# TODO: Uncomment once ivy.tensor_train is implemented
# @pytest.mark.parametrize(
# "shape, rank",
# [((3, 4, 5, 6, 2, 10), 10)],
# )
# def test_tt_to_tensor_random(shape, rank):
# tensor = ivy.random_uniform(shape)
# tensor_shape = tensor.shape
# factors = ivy.tensor_train(tensor, rank)
# reconstructed_tensor = ivy.TTTensor.tt_to_tensor(factors)
# np.testing.assert_(ivy.shape(reconstructed_tensor) == tensor_shape)
# D = len(factors)
# for k in range(D):
# (r_prev, _, r_k) = factors[k].shape
# assert r_prev <= rank, "TT rank with index " + str(k) + "exceeds rank"
# assert r_k <= rank, "TT rank with index " + str(k + 1) + "exceeds rank"
@pytest.mark.parametrize(
("shape", "rank"),
[((4, 5, 4, 8, 5), (1, 3, 2, 2, 4, 1))],
)
def test_tt_n_param(shape, rank):
factors = ivy.random_tt(shape, rank)
true_n_param = ivy.sum([ivy.prod(f.shape) for f in factors])
n_param = ivy.TTTensor._tt_n_param(shape, rank)
np.testing.assert_equal(n_param, true_n_param)
@pytest.mark.parametrize(
("n1", "n2", "n3", "shape1", "shape2", "shape3"),
[(3, 4, 2, (1, 3, 2), (2, 4, 2), (2, 2, 1))],
)
def test_tt_to_tensor(n1, n2, n3, shape1, shape2, shape3):
tensor = ivy.zeros((n1, n2, n3))
for i in range(n1):
for j in range(n2):
for k in range(n3):
tensor[i][j][k] = (i + 1) + (j + 1) + (k + 1)
tensor = ivy.array(tensor)
factors = [None] * 3
factors[0] = ivy.zeros(shape1)
factors[1] = ivy.zeros(shape2)
factors[2] = ivy.zeros(shape3)
for i in range(3):
for j in range(4):
for k in range(2):
factors[0][0][i][0] = i + 1
factors[0][0][i][1] = 1
factors[1][0][j][0] = 1
factors[1][0][j][1] = 0
factors[1][1][j][0] = j + 1
factors[1][1][j][1] = 1
factors[2][0][k][0] = 1
factors[2][1][k][0] = k + 1
factors = [ivy.array(f) for f in factors]
np.testing.assert_array_almost_equal(tensor, ivy.TTTensor.tt_to_tensor(factors))
@pytest.mark.parametrize(
"coef",
[(0.2)],
)
def test_validate_tt_rank(coef):
tensor_shape = tuple(ivy.random.randint(5, 10, shape=(4,)))
n_param_tensor = ivy.prod(tensor_shape)
rank = ivy.TTTensor.validate_tt_rank(tensor_shape, coef, rounding="floor")
n_param = ivy.TTTensor._tt_n_param(tensor_shape, rank)
np.testing.assert_(n_param <= n_param_tensor * coef)
rank = ivy.TTTensor.validate_tt_rank(tensor_shape, coef, rounding="ceil")
n_param = ivy.TTTensor._tt_n_param(tensor_shape, rank)
np.testing.assert_(n_param >= n_param_tensor * coef)
@pytest.mark.parametrize(
("true_shape", "true_rank"),
[
(
(3, 4, 5),
(1, 3, 2, 1),
)
],
)
def test_validate_tt_tensor(true_shape, true_rank):
factors = ivy.random_tt(true_shape, true_rank).factors
shape, rank = ivy.TTTensor.validate_tt_tensor(factors)
np.testing.assert_equal(
shape,
true_shape,
err_msg=f"Returned incorrect shape (got {shape}, expected {true_shape})",
)
np.testing.assert_equal(
rank,
true_rank,
err_msg=f"Returned incorrect rank (got {rank}, expected {true_rank})",
)
factors[0] = ivy.random_uniform(shape=(4, 4))
with np.testing.assert_raises(ValueError):
ivy.TTTensor.validate_tt_tensor(factors)
factors[0] = ivy.random_uniform(shape=(3, 3, 2))
with np.testing.assert_raises(ValueError):
ivy.TTTensor.validate_tt_tensor(factors)
| ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tt_tensor.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_tt_tensor.py",
"repo_id": "ivy",
"token_count": 2119
} | 67 |
"""Collection of tests for normalization layers."""
# global
from hypothesis import strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_method
# --- Helpers --- #
# --------------- #
@st.composite
def _generate_batchnorm_data(draw):
batch_size = draw(st.integers(min_value=2, max_value=5))
num_features = draw(st.integers(min_value=2, max_value=3))
num_dims = draw(st.integers(min_value=1, max_value=3))
dims = [draw(st.integers(1, 5)) for i in range(num_dims)]
x_shape = [batch_size] + [*dims] + [num_features]
dtype, inputs = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float", full=True),
shape=x_shape,
min_value=0,
max_value=1,
).filter(lambda x: x[0][0] not in ["float64"])
)
return dtype, inputs, num_features
# --- Main --- #
# ------------ #
@handle_method(
method_tree="BatchNorm2D.__call__",
dtype_and_x_features=_generate_batchnorm_data(),
momentum=st.floats(min_value=0.0, max_value=1.0, exclude_min=True),
init_with_v=st.booleans(),
method_with_v=st.booleans(),
)
def test_batch_norm_2d_layer(
*,
dtype_and_x_features,
momentum,
init_with_v,
method_with_v,
test_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
):
input_dtype, x, features = dtype_and_x_features
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"num_features": features,
"eps": ivy.min_base,
"affine": True,
"momentum": momentum,
"track_running_stats": True,
"device": on_device,
"dtype": input_dtype[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"inputs": x[0]},
class_name=class_name,
method_name=method_name,
init_with_v=init_with_v,
method_with_v=method_with_v,
test_gradients=test_gradients,
rtol_=1e-02,
atol_=1e-02,
on_device=on_device,
)
@handle_method(
method_tree="LayerNorm.__call__",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"), min_num_dims=2
),
new_std=st.floats(min_value=0.0, max_value=1.0),
init_with_v=st.booleans(),
method_with_v=st.booleans(),
)
def test_layer_norm_layer(
*,
dtype_and_x,
new_std,
init_with_v,
method_with_v,
test_gradients,
on_device,
class_name,
method_name,
backend_fw,
ground_truth_backend,
init_flags,
method_flags,
):
input_dtype, x = dtype_and_x
helpers.test_method(
backend_to_test=backend_fw,
ground_truth_backend=ground_truth_backend,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={
"normalized_shape": x[0].shape,
"eps": ivy.min_base,
"elementwise_affine": True,
"new_std": new_std,
"device": on_device,
"dtype": input_dtype[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={"inputs": x[0]},
class_name=class_name,
method_name=method_name,
init_with_v=init_with_v,
method_with_v=method_with_v,
test_gradients=test_gradients,
on_device=on_device,
)
| ivy/ivy_tests/test_ivy/test_stateful/test_norms.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_norms.py",
"repo_id": "ivy",
"token_count": 1796
} | 68 |
import pickle # noqa
from pydriller import Repository
import os # noqa
import bz2
import _pickle as cPickle
BACKENDS = ["numpy", "jax", "tensorflow", "torch"]
def get_tests(_tests_file, _line):
tests_file_line = set()
if 0 <= _line < len(_tests_file):
tests_file_line = _tests_file[_line]
return set() if len(tests_file_line) >= MAX_TESTS else tests_file_line
def determine_tests_line(_tests_file, _line, _tests_to_run):
tests_file_line = get_tests(_tests_file, _line)
tests_file_prev = get_tests(_tests_file, _line - 1)
tests_file_next = get_tests(_tests_file, _line + 1)
_tests_to_run.update(tests_file_line)
_tests_to_run.update(tests_file_prev)
_tests_to_run.update(tests_file_next)
return _tests_to_run
MAX_TESTS = 10
if __name__ == "__main__":
tests = bz2.BZ2File("tests.pbz2", "rb")
tests = cPickle.load(tests)
ref_commit_hash = tests["commit"]
print("Reference Commit: ", ref_commit_hash)
tests_to_run = set()
for commit in Repository(".", single=ref_commit_hash).traverse_commits():
ref_commit = commit._c_object
break
for commit in Repository(".", order="reverse").traverse_commits():
tests["commit"] = commit.hash
diff_index = ref_commit.diff(commit._c_object, create_patch=True)
modified_files = commit._parse_diff(diff_index)
for file in modified_files:
try:
file_name = f"{file.new_path},cover"
except Exception:
continue
if file_name not in tests.keys():
continue
tests_file = tests[file_name]
change = file.diff_parsed
added = {x - 1 for (x, _) in change["added"]}
deleted = {x - 1 for (x, _) in change["deleted"]}
updated = added.intersection(deleted)
added = added.difference(updated)
deleted = deleted.difference(updated)
# Now Update the Mapping and compute the tests to run
for line in deleted:
tests_to_run = determine_tests_line(tests_file, line, tests_to_run)
for line in sorted(deleted, reverse=True):
if line < len(tests_file):
del tests_file[line]
for line in added:
top = -1
bottom = -1
if 0 <= line - 1 < len(tests_file):
top = tests_file[line - 1]
if 0 <= line + 1 < len(tests_file):
bottom = tests_file[line + 1]
tests_line = set()
if top != -1 and bottom != -1:
tests_line = top.intersection(bottom)
elif top != -1:
tests_line = top
elif bottom != -1:
tests_line = bottom
tests_file.insert(line, tests_line)
tests[file_name] = tests_file
# Now Compute the Tests to Run
for line in updated:
tests_to_run = determine_tests_line(tests_file, line, tests_to_run)
for line in added:
tests_to_run = determine_tests_line(tests_file, line, tests_to_run)
break
with bz2.BZ2File("tests.pbz2", "w") as f:
cPickle.dump(tests, f)
print("----- Determined Tests -----")
print(len(tests_to_run))
for test_index in tests_to_run:
print(tests["index_mapping"][test_index])
print("----------------------------")
with open("tests_to_run", "w") as f:
for test_index in tests_to_run:
test = tests["index_mapping"][test_index]
f.write(test + "\n")
| ivy/scripts/determine_tests/array_api_determine_tests.py/0 | {
"file_path": "ivy/scripts/determine_tests/array_api_determine_tests.py",
"repo_id": "ivy",
"token_count": 1751
} | 69 |
import os
import sys
import git
import bz2
import _pickle as cPickle
# The path to your Mapping directory
mapping_dir = "Mapping/"
# Check if the directory exists
if not os.path.exists(mapping_dir):
print(f"Directory does not exist: {mapping_dir}")
sys.exit(1)
# Create a Repo object to interact with the Git repositories
current_repo = git.Repo("ivy/")
mapping_repo = git.Repo(mapping_dir)
# Get the commit history of the current repository (limit to top 100 commits)
current_repo_commits = [
commit.hexsha for commit in current_repo.iter_commits(max_count=100)
]
# The path to the tests.pbz2 file
test_file_path = os.path.join(mapping_dir, "tests.pbz2")
# Go back in the history of the Mapping repository
for commit in mapping_repo.iter_commits():
print(commit.hexsha)
try:
mapping_repo.git.checkout(commit)
except git.GitCommandError as e:
print(f"Error checking out commit: {commit.hexsha}\n{str(e)}")
continue
# Check if the tests.pbz2 file exists
if not os.path.isfile(test_file_path):
print(f"File does not exist in this commit: {commit.hexsha}")
continue
# Unpickle the file
tests = bz2.BZ2File(test_file_path, "rb")
tests = cPickle.load(tests)
# Get the commit hash
commit_hash = tests.get("commit")
print("Commit:", commit_hash)
if commit_hash is None:
print("Commit hash not found in the test dictionary.")
continue
# Check if the commit hash exists in the current repository's history
if commit_hash in current_repo_commits:
print(f"Found matching commit hash in current repository: {commit_hash}")
break
| ivy/scripts/setup_tests/clone-mapping.py/0 | {
"file_path": "ivy/scripts/setup_tests/clone-mapping.py",
"repo_id": "ivy",
"token_count": 618
} | 70 |
git config pull.rebase true
git stash
git pull
git stash apply
| ivy/scripts/shell/stash_pull.sh/0 | {
"file_path": "ivy/scripts/shell/stash_pull.sh",
"repo_id": "ivy",
"token_count": 18
} | 71 |
{
"name": "Ivy Development Environment (image)",
"image": "unifyai/ivy:latest",
"customizations": {
"vscode": {
"extensions": [
"ms-python.python"
],
"settings": {
"python.defaultInterpreterPath": "/opt/miniconda/envs/multienv/bin/python3"
}
}
},
"postCreateCommand": {
"post_create": "bash .devcontainer/post_create_commands.sh",
"bashrc": "echo \"alias python=python3\" >> ~/.bashrc"
},
"initializeCommand": "docker pull unifyai/ivy:latest",
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode",
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"installZsh": true,
"configureZshAsDefaultShell": true,
"installOhMyZsh": true,
"upgradePackages": false
},
"ghcr.io/devcontainers/features/docker-outside-of-docker:1": {
"moby": true,
"installDockerBuildx": true,
"version": "20.10",
"dockerDashComposeVersion": "v2"
},
"ghcr.io/devcontainers/features/github-cli:1": {
"installDirectlyFromGitHubRelease": true,
"version": "latest"
}
}
}
| ivy/.devcontainer/devcontainer.json/0 | {
"file_path": "ivy/.devcontainer/devcontainer.json",
"repo_id": "ivy",
"token_count": 652
} | 0 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>
| ivy/.idea/vcs.xml/0 | {
"file_path": "ivy/.idea/vcs.xml",
"repo_id": "ivy",
"token_count": 67
} | 1 |
FROM debian:buster
WORKDIR /ivy
ARG fw
ARG pycon=3.10
# Install miniconda
ENV CONDA_DIR /opt/miniconda/
RUN apt clean && \
rm -rf /var/lib/apt/lists/* && \
apt-get update && \
apt-get install -y wget && \
wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/miniconda
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda create --name multienv python==$pycon
# to fix protobuf conflicts
ENV PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION python
ENV PATH=/opt/miniconda/envs/multienv/bin:$PATH
RUN apt-get update && \
apt-get install -y python3-pip python3-tk && \
apt-get install -y libsm6 libxext6 libxrender-dev libgl1-mesa-glx && \
apt-get install -y git && \
apt-get install -y rsync && \
apt-get install -y libusb-1.0-0 && \
apt-get install -y libglib2.0-0 && \
pip3 install pip-autoremove && \
pip3 install --upgrade pip && \
pip3 install setuptools==58.5.3
# Install Ivy Upstream
RUN git clone --progress --recurse-submodules https://github.com/unifyai/ivy --depth 1 && \
cd ivy && \
cd ivy_tests/array_api_testing/test_array_api && \
pip3 install --no-cache-dir -r requirements.txt
COPY /docker/multiversion_framework_directory.py .
COPY /docker/requirement_mappings_multiversion.json .
COPY /docker/multiversion_testing_requirements.txt .
# requirement mappings directs which dependency to be installed and where
SHELL ["/bin/bash", "-c"]
RUN python3 multiversion_framework_directory.py $fw && \
pip install -r multiversion_testing_requirements.txt && \
pip-autoremove torch -y && \
pip-autoremove tensorflow -y && \
pip-autoremove jax -y
ENV PATH=/opt/miniconda/envs/multienv/bin:$PATH
| ivy/docker/DockerfileMultiversion/0 | {
"file_path": "ivy/docker/DockerfileMultiversion",
"repo_id": "ivy",
"token_count": 699
} | 2 |
{% extends "top_level_module.rst" %}
{% block toctree -%}
{% if functions %}
.. autosummary::
:toctree: {{name}}
:template: functional_module.rst
:hide-table:
:recursive:
{% for function in functions %}
{% if not function.startswith('_') %}
{{ fullname }}.{{ function }}
{% endif %}
{%- endfor %}
{% endif %}
{% if modules %}
.. autosummary::
:toctree: {{name}}
:template: top_functional_module.rst
:recursive:
{% for module in modules %}
{{ module }}
{%- endfor %}
{% endif %}
{% endblock %}
{% block options %}
:special-members: __init__
:show-inheritance:
{% endblock %}
{% block custom_content %}
{% for function in functions %}
.. autofunction:: ivy.{{ function }}
{% endfor %}
{% for class in classes %}
.. autoclass:: ivy.{{ class }}
{% endfor %}
{% for attribute in attributes %}
.. autoivydata:: {{ fullname }}.{{ attribute }}
{% endfor %}
{% if not functions and not classes and not attributes and not modules %}
There are no functions in this module yet. 🚧
{% endif %}
{% endblock %}
| ivy/docs/_templates/top_functional_module.rst/0 | {
"file_path": "ivy/docs/_templates/top_functional_module.rst",
"repo_id": "ivy",
"token_count": 393
} | 3 |
Arrays
======
.. _`inputs_to_native_arrays`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/func_wrapper.py#L149
.. _`outputs_to_ivy_arrays`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/func_wrapper.py#L209
.. _`empty class`: https://github.com/unifyai/ivy/blob/529c8c0f128ff28331da7c8f52912d777d786cbe/ivy/__init__.py#L8
.. _`overwritten`: https://github.com/unifyai/ivy/blob/529c8c0f128ff28331da7c8f52912d777d786cbe/ivy/functional/backends/torch/__init__.py#L11
.. _`self._data`: https://github.com/unifyai/ivy/blob/529c8c0f128ff28331da7c8f52912d777d786cbe/ivy/array/__init__.py#L89
.. _`ArrayWithElementwise`: https://github.com/unifyai/ivy/blob/529c8c0f128ff28331da7c8f52912d777d786cbe/ivy/array/elementwise.py#L12
.. _`ivy.Array.add`: https://github.com/unifyai/ivy/blob/63d9c26acced9ef40e34f7b4fc1c1a75017f9c69/ivy/array/elementwise.py#L22
.. _`programmatically`: https://github.com/unifyai/ivy/blob/529c8c0f128ff28331da7c8f52912d777d786cbe/ivy/__init__.py#L148
.. _`backend type hints`: https://github.com/unifyai/ivy/blob/8605c0a50171bb4818d0fb3e426cec874de46baa/ivy/functional/backends/torch/elementwise.py#L219
.. _`Ivy type hints`: https://github.com/unifyai/ivy/blob/8605c0a50171bb4818d0fb3e426cec874de46baa/ivy/functional/ivy/elementwise.py#L1342
.. _`__setitem__`: https://github.com/unifyai/ivy/blob/8605c0a50171bb4818d0fb3e426cec874de46baa/ivy/array/__init__.py#L234
.. _`function wrapping`: https://github.com/unifyai/ivy/blob/0f131178be50ea08ec818c73078e6e4c88948ab3/ivy/func_wrapper.py#L170
.. _`inherits`: https://github.com/unifyai/ivy/blob/8cbffbda9735cf16943f4da362ce350c74978dcb/ivy/array/__init__.py#L44
.. _`is the case`: https://data-apis.org/array-api/latest/API_specification/array_object.html
.. _`__add__`: https://github.com/unifyai/ivy/blob/e4d9247266f5d99faad59543923bb24b88a968d9/ivy/array/__init__.py#L291
.. _`__sub__`: https://github.com/unifyai/ivy/blob/e4d9247266f5d99faad59543923bb24b88a968d9/ivy/array/__init__.py#L299
.. _`__mul__`: https://github.com/unifyai/ivy/blob/e4d9247266f5d99faad59543923bb24b88a968d9/ivy/array/__init__.py#L307
.. _`__truediv__`: https://github.com/unifyai/ivy/blob/e4d9247266f5d99faad59543923bb24b88a968d9/ivy/array/__init__.py#L319
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`arrays thread`: https://discord.com/channels/799879767196958751/1189905906905919609
.. _`wrapped logic`: https://github.com/unifyai/ivy/blob/6a729004c5e0db966412b00aa2fce174482da7dd/ivy/func_wrapper.py#L95
.. _`NumPy's`: https://numpy.org/doc/stable/user/basics.dispatch.html#basics-dispatch
.. _`PyTorch's`: https://pytorch.org/docs/stable/notes/extending.html#extending-torch
There are two types of arrays in Ivy, there is the :class:`ivy.NativeArray` and also the :class:`ivy.Array`.
Native Array
------------
The :class:`ivy.NativeArray` is simply a placeholder class for a backend-specific array class, such as :class:`np.ndarray`, :class:`tf.Tensor`, :class:`torch.Tensor` or :class:`jaxlib.xla_extension.DeviceArray`.
When no framework is set, this is an `empty class`_.
When a framework is set, this is `overwritten`_ with the backend-specific array class.
Ivy Array
---------
The :class:`ivy.Array` is a simple wrapper class, which wraps around the :class:`ivy.NativeArray`, storing it in `self._data`_.
All functions in the Ivy functional API which accept *at least one array argument* in the input are implemented as instance methods in the :class:`ivy.Array` class.
The only exceptions to this are functions in the `nest <https://github.com/unifyai/ivy/blob/906ddebd9b371e7ae414cdd9b4bf174fd860efc0/ivy/functional/ivy/nest.py>`_ module and the `meta <https://github.com/unifyai/ivy/blob/906ddebd9b371e7ae414cdd9b4bf174fd860efc0/ivy/functional/ivy/meta.py>`_ module, which have no instance method implementations.
The organization of these instance methods follows the same organizational structure as the files in the functional API.
The :class:`ivy.Array` class `inherits`_ from many category-specific array classes, such as `ArrayWithElementwise`_, each of which implements the category-specific instance methods.
Each instance method simply calls the functional API function internally, but passes in :code:`self._data` as the first *array* argument.
`ivy.Array.add`_ is a good example.
However, it's important to bear in mind that this is *not necessarily the first argument*, although in most cases it will be.
We also **do not** set the :code:`out` argument to :code:`self` for instance methods.
If the only array argument is the :code:`out` argument, then we do not implement this instance method.
For example, we do not implement an instance method for `ivy.zeros <https://github.com/unifyai/ivy/blob/1dba30aae5c087cd8b9ffe7c4b42db1904160873/ivy/functional/ivy/creation.py#L116>`_.
Given the simple set of rules which underpin how these instance methods should all be implemented, if a source-code implementation is not found, then this instance method is added `programmatically`_.
This serves as a helpful backup in cases where some methods are accidentally missed out.
The benefit of the source code implementations is that this makes the code much more readable, with important methods not being entirely absent from the code.
It also enables other helpful perks, such as auto-completions in the IDE etc.
Most special methods also simply wrap a corresponding function in the functional API, as `is the case`_ in the Array API Standard.
Examples include `__add__`_, `__sub__`_, `__mul__`_ and `__truediv__`_ which directly call :func:`ivy.add`, :func:`ivy.subtract`, :func:`ivy.multiply` and :func:`ivy.divide` respectively.
However, for some special methods such as `__setitem__`_, there are substantial differences between the backend frameworks which must be addressed in the :class:`ivy.Array` implementation.
Array Handling
--------------
When calling backend-specific functions such as :func:`torch.sin`, we must pass in :class:`ivy.NativeArray` instances.
For example, :func:`torch.sin` will throw an error if we try to pass in an :class:`ivy.Array` instance.
It must be provided with a :class:`torch.Tensor`, and this is reflected in the `backend type hints`_.
However, all Ivy functions must return :class:`ivy.Array` instances, which is reflected in the `Ivy type hints`_.
The reason we always return :class:`ivy.Array` instances from Ivy functions is to ensure that any subsequent Ivy code is fully framework-agnostic, with all operators performed on the returned array being handled by the special methods of the :class:`ivy.Array` class, and not the special methods of the backend :class:`ivy.NativeArray` class.
For example, calling any of (:code:`+`, :code:`-`, :code:`*`, :code:`/` etc.) on the array will result in (:meth:`__add__`, :meth:`__sub__`, :meth:`__mul__`, :meth:`__truediv__` etc.) being called on the array class.
For most special methods, calling them on the :class:`ivy.NativeArray` would not be a problem because all backends are generally quite consistent, but as explained above, for some functions such as `__setitem__`_ there are substantial differences which must be addressed in the :class:`ivy.Array` implementation in order to guarantee unified behaviour.
Given that all Ivy functions return :class:`ivy.Array` instances, all Ivy functions must also support :class:`ivy.Array` instances in the input, otherwise it would be impossible to chain functions together!
Therefore, most functions in Ivy must adopt the following pipeline:
#. convert all :class:`ivy.Array` instances in the input arguments to :class:`ivy.NativeArray` instances
#. call the backend-specific function, passing in these :class:`ivy.NativeArray` instances
#. convert all of the :class:`ivy.NativeArray` instances which are returned from the backend function back into :class:`ivy.Array` instances, and return
Given the repeating nature of these steps, this is all entirely handled in the `inputs_to_native_arrays`_ and `outputs_to_ivy_arrays`_ wrappers, as explained in the `Function Wrapping <function_wrapping.rst>`_ section.
All Ivy functions *also* accept :class:`ivy.NativeArray` instances in the input.
This is for a couple of reasons.
Firstly, :class:`ivy.Array` instances must be converted to :class:`ivy.NativeArray` instances anyway, and so supporting them in the input is not a problem.
Secondly, this makes it easier to combine backend-specific code with Ivy code, without needing to explicitly wrap any arrays before calling sections of Ivy code.
Therefore, all input arrays to Ivy functions have type :code:`Union[ivy.Array, ivy.NativeArray]`, whereas the output arrays have type :class:`ivy.Array`.
This is further explained in the `Function Arguments <function_arguments.rst>`_ section.
However, :class:`ivy.NativeArray` instances are not permitted for the :code:`out` argument, which is used in most functions.
This is because the :code:`out` argument dictates the array to which the result should be written, and so it effectively serves the same purpose as the function return.
This is further explained in the `Inplace Updates <inplace_updates.rst>`_ section.
As a final point, extra attention is required for *compositional* functions, as these do not directly defer to a backend implementation.
If the first line of code in a compositional function performs operations on the input array, then this will call the special methods on an :class:`ivy.NativeArray` and not on an :class:`ivy.Array`.
For the reasons explained above, this would be a problem.
Therefore, all compositional functions have a separate piece of `wrapped logic`_ to ensure that all :class:`ivy.NativeArray` instances are converted to :class:`ivy.Array` instances before entering into the compositional function.
Integrating custom classes with Ivy
-----------------------------------
Ivy's functional API and its functions can easily be integrated with non-Ivy classes. Whether these classes are ones that inherit from Ivy or completely standalone custom classes, using Ivy's :code:`__ivy_array_function__`, Ivy's functions can handle inputs of those types.
To make use of that feature, the class must contain an implementation for these functions and it must contain an implementation for the function :code:`__ivy_array_function__`. If a non-Ivy class is passed to an Ivy function, a call to this class's :code:`__ivy_array_function__` is made which directs Ivy's function to handle that input type correctly. This allows users to define custom implementations for any of the functions that can be found in Ivy's functional API which would further make it easy to integrate those classes with other Ivy projects.
**Note**
This functionality is inspired by `NumPy's`_ :code:`__ivy_array_function__` and `PyTorch's`_ :code:`__torch_function__`.
As an example, consider the following class :code:`MyArray` with the following definition:
.. code-block:: python
class MyArray:
def __init__(self, data=None):
self.data = data
Running any of Ivy’s functions using a :code:`MyArray` object as input will throw an :code:`IvyBackendException` since Ivy’s functions do not support this class type as input. This is where :code:`__ivy_array_function__` comes into play. Let’s add the method to our :code:`MyArray` class to see how it works.
There are different ways to do so. One way is to use a global dict :code:`HANDLED_FUNCTIONS` which will map Ivy’s functions to the custom variant functions:
.. code-block:: python
HANDLED_FUNCTIONS = {}
class MyArray:
def __init__(self, data=None):
self.data = data
def __ivy_array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
if not all(issubclass(t, (MyArray, ivy.Array, ivy.NativeArray)) for t in types):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
:code:`__ivy_array_function__` accepts four parameters: :code:`func` representing a reference to the array API function being
overridden, :code:`types` a list of the types of objects implementing :code:`__ivy_array_function__`, :code:`args` a tuple of arguments supplied to the function, and :code:`kwargs` being a dictionary of keyword arguments passed to the function.
While this class contains an implementation for :code:`__ivy_array_function__`, it is still not enough as it is necessary to implement any needed Ivy functions with the new :code:`MyArray` class as input(s) for the code to run successfully.
We will define a decorator function :code:`implements` that can be used to add functions to :code:`HANDLED_FUNCTIONS`:
.. code-block:: python
def implements(ivy_function):
def decorator(func):
HANDLED_FUNCTIONS[ivy_function] = func
return func
return decorator
Lastly, we need to apply that decorator to the override function. Let’s consider for example a function that overrides :code:`ivy.abs`:
.. code-block:: python
@implements(ivy.abs)
def my_abs(my_array, ivy_array):
my_array.data = abs(my_array.data)
Now that we have added the function to :code:`HANDLED_FUNCTIONS`, we can now use :code:`ivy.abs` with :code:`MyArray` objects:
.. code-block:: python
X = MyArray(-3)
X = ivy.abs(X)
Of course :code:`ivy.abs` is an example of a function that is easy to override since it only requires one operand. The same approach can be used to override functions with multiple operands, including arrays or array-like objects that define :code:`__ivy_array_function__`.
It is relevant to mention again that any function not stored inside the dict :code:`HANDLED_FUNCTIONS` will not work and it is also important to notice that the operands passed to the function must match that of the function stored in the dict. For instance :code:`my_abs` takes only one parameter which is a :code:`MyArray` object. So, passing any other operands to the function will result in an exception :code:`IvyBackendException` being thrown. Lastly, for a custom class to be covered completely with Ivy's functional API, it is necessary to create an implementation for all the relevant functions within the API that will be used by this custom class. That can be all the functions in the API or only a subset of them.
**Round Up**
This should have hopefully given you a good feel for the different types of arrays, and how these are handled in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `arrays thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/tAlDPnWcLDE" class="video">
</iframe>
| ivy/docs/overview/deep_dive/arrays.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/arrays.rst",
"repo_id": "ivy",
"token_count": 4740
} | 4 |
Inplace Updates
===============
.. _`backend setting`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L204
.. _`handle_out_argument`: https://github.com/unifyai/ivy/blob/dcfec8b85de3c422dc0ca1970d67cb620cae62a4/ivy/func_wrapper.py#L340
.. _`torch.tan`: https://pytorch.org/docs/stable/generated/torch.tan.html
.. _`numpy.tan`: https://numpy.org/doc/stable/reference/generated/numpy.tan.html
.. _`tf.math.tan`: https://www.tensorflow.org/api_docs/python/tf/math/tan
.. _`jax.numpy.tan`: https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.tan.html?highlight=tan
.. _`presence of this attribute`: https://github.com/unifyai/ivy/blob/8ded4a5fc13a278bcbf2d76d1fa58ab41f5797d0/ivy/func_wrapper.py#L341
.. _`by the backend function`: https://github.com/unifyai/ivy/blob/8ded4a5fc13a278bcbf2d76d1fa58ab41f5797d0/ivy/func_wrapper.py#L372
.. _`handled by the wrapper`: https://github.com/unifyai/ivy/blob/8ded4a5fc13a278bcbf2d76d1fa58ab41f5797d0/ivy/func_wrapper.py#L373
.. _`_wrap_fn`: https://github.com/unifyai/ivy/blob/6497b8a3d6b0d8aac735a158cd03c8f98eb288c2/ivy/container/wrapping.py#L69
.. _`NON_WRAPPED_FUNCTIONS`: https://github.com/unifyai/ivy/blob/fdaea62380c9892e679eba37f26c14a7333013fe/ivy/func_wrapper.py#L9
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`ivy.reshape`: https://github.com/unifyai/ivy/blob/633eb420c5006a0a17c238bfa794cf5b6add8598/ivy/functional/ivy/manipulation.py#L418
.. _`ivy.astype`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L46
.. _`ivy.asarray`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/creation.py#L114
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`inplace updates thread`: https://discord.com/channels/799879767196958751/1189906590166437938
.. _`example`: https://github.com/unifyai/ivy/blob/0ef2888cbabeaa8f61ce8aaea4f1175071f7c396/ivy/functional/ivy/layers.py#L169-L176
Inplace updates enable users to overwrite the contents of existing arrays with new data.
This enables much more control over the memory-efficiency of the program, preventing old unused arrays from being kept in memory for any longer than is strictly necessary.
The function :func:`ivy.inplace_update` enables explicit inplace updates.
:func:`ivy.inplace_update` is a *primary* function, and the backend-specific implementations for each backend are presented below.
We also explain the rationale for why each implementation is the way it is, and the important differences.
This is one particular area of the Ivy code where, technically speaking, the function :func:`ivy.inplace_update` will result in subtly different behaviour for each backend, unless the :code:`ensure_in_backend` flag is set to :code:`True`.
While :class:`ivy.Array` instances will always be inplace updated consistently, in some cases it is simply not possible to also inplace update the :class:`ivy.NativeArray` which :class:`ivy.Array` wraps, due to design choices made by each backend.
**NOTE:** Native inplace updates do not modify the dtype of the array being updated, as such the :code:`keep_input_dtype` flag should normally be set to :code:`True` such that inplace updating behavior is consistent between backends.
**JAX**:
.. code-block:: python
def inplace_update(
x: Union[ivy.Array, JaxArray],
val: Union[ivy.Array, JaxArray],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
if ivy.is_array(x) and ivy.is_array(val):
if ensure_in_backend:
raise ivy.utils.exceptions.IvyException(
"JAX does not natively support inplace updates"
)
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
if ivy.is_ivy_array(x):
x.data = val_native
# Handle view updates
if ivy.exists(x._base):
base = x._base
base_idx = ivy.arange(base.size).reshape(base.shape)
for fn, args, kwargs, index in x._manipulation_stack:
kwargs["copy"] = True
base_idx = ivy.__dict__[fn](base_idx, *args, **kwargs)
base_idx = base_idx[index] if ivy.exists(index) else base_idx
base_flat = base.data.flatten()
base_flat = base_flat.at[base_idx.data.flatten()].set(
val_native.flatten()
)
base.data = base_flat.reshape(base.shape)
for ref in base._view_refs:
view = ref()
if ivy.exists(view) and view is not x:
_update_view(view, base)
else:
for ref in x._view_refs:
view = ref()
if ivy.exists(view):
_update_view(view, x)
else:
raise ivy.utils.exceptions.IvyException(
"JAX does not natively support inplace updates"
)
return x
else:
return val
JAX **does not** natively support inplace updates, and so there is no way of actually inplace updating the :code:`JaxArray` instance :code:`x_native`.
Therefore, an inplace update is only performed on :class:`ivy.Array` instances provided in the input.
JAX functions also never returns views so additional logic is added to functionally support multiple variables referencing the same memory (further explained in a later section).
**NumPy**:
.. code-block:: python
def inplace_update(
x: Union[ivy.Array, np.ndarray],
val: Union[ivy.Array, np.ndarray],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
ivy.utils.assertions.check_inplace_sizes_valid(x, val)
if ivy.is_array(x) and ivy.is_array(val):
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
# make both arrays contiguous if not already
if not x_native.flags.c_contiguous:
x_native = np.ascontiguousarray(x_native)
if not val_native.flags.c_contiguous:
val_native = np.ascontiguousarray(val_native)
if val_native.shape == x_native.shape:
if x_native.dtype != val_native.dtype:
x_native = x_native.astype(val_native.dtype)
np.copyto(x_native, val_native)
else:
x_native = val_native
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
return x
else:
return val
NumPy **does** natively support inplace updates, and so :code:`x_native` is updated inplace with :code:`val_native`.
Following this, an inplace update is then also performed on the :class:`ivy.Array` instance, if provided in the input.
**TensorFlow**:
.. code-block:: python
def inplace_update(
x: Union[ivy.Array, tf.Tensor],
val: Union[ivy.Array, tf.Tensor],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
if ivy.is_array(x) and ivy.is_array(val):
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
if _is_variable(x_native):
x_native.assign(val_native)
if ivy.is_ivy_array(x):
x.data = x_native
else:
x = ivy.Array(x_native)
elif ensure_in_backend:
raise ivy.utils.exceptions.IvyException(
"TensorFlow does not support inplace updates of the tf.Tensor"
)
elif ivy.is_ivy_array(x):
x.data = val_native
# Handle view updates
if ivy.exists(x._base):
base = x._base
base_idx = ivy.arange(base.size).reshape(base.shape)
for fn, args, kwargs, index in x._manipulation_stack:
kwargs["copy"] = True
base_idx = ivy.__dict__[fn](base_idx, *args, **kwargs)
base_idx = base_idx[index] if ivy.exists(index) else base_idx
base_flat = tf.reshape(base.data, -1)
base_flat = tf.tensor_scatter_nd_update(
base_flat,
tf.reshape(base_idx.data, (-1, 1)),
tf.reshape(val_native, -1),
)
base.data = tf.reshape(base_flat, base.shape)
for ref in base._view_refs:
view = ref()
if ivy.exists(view) and view is not x:
_update_view(view, base)
else:
for ref in x._view_refs:
view = ref()
if ivy.exists(view):
_update_view(view, x)
else:
x = ivy.to_ivy(x_native)
return x
else:
return val
TensorFlow **does not** natively support inplace updates for :class:`tf.Tensor` instances, and in such cases so there is no way of actually inplace updating the :class:`tf.Tensor` instance :code:`x_native`.
However, TensorFlow **does** natively support inplace updates for :class:`tf.Variable` instances.
Therefore, if :code:`x_native` is a :class:`tf.Variable`, then :code:`x_native` is updated inplace with :code:`val_native`.
Irrespective of whether the native array is a :class:`tf.Tensor` or a :class:`tf.Variable`, an inplace update is then also performed on the :class:`ivy.Array` instance, if provided in the input.
TensorFlow functions also never returns views so additional logic is added to functionally support multiple variables referencing the same memory (further explained in a later section).
**PyTorch**:
.. code-block:: python
def inplace_update(
x: Union[ivy.Array, torch.Tensor],
val: Union[ivy.Array, torch.Tensor],
/,
*,
ensure_in_backend: bool = False,
keep_input_dtype: bool = False,
) -> ivy.Array:
ivy.utils.assertions.check_inplace_sizes_valid(x, val)
if ivy.is_array(x) and ivy.is_array(val):
if keep_input_dtype:
val = ivy.astype(val, x.dtype)
(x_native, val_native), _ = ivy.args_to_native(x, val)
if is_variable(x_native):
x_native.data = val_native
else:
x_native[()] = val_native
if ivy.is_ivy_array(x):
x.data = x_native
_update_torch_views(x)
else:
x = ivy.to_ivy(x_native)
if ensure_in_backend:
x._data = val_native
return x
else:
return val
PyTorch **does** natively support inplace updates, and so :code:`x_native` is updated inplace with :code:`val_native`.
Following this, an inplace update is then also performed on the :class:`ivy.Array` instance, if provided in the input.
PyTorch also supports views for most manipulation and indexing operations as with NumPy but it lacks that functionality with a few functions such as :func:`flip`.
Additional logic had to be added to support view functionality for those functions (described in a section below).
The function :func:`ivy.inplace_update` is also *nestable*, meaning it can accept :class:`ivy.Container` instances in the input.
If an :class:`ivy.Container` instance is provided for the argument :code:`x`, then along with the arrays at all of the leaves, the container :code:`x` is **also** inplace updated, meaning that a new :class:`ivy.Container` instance is not created for the function return.
out argument
------------
Most functions in Ivy support inplace updates via the inclusion of a keyword-only :code:`out` argument.
This enables users to specify the array to which they would like the output of a function to be written.
This could for example be the input array itself, but can also be any other array of choice.
All Ivy functions which return a single array should support inplace updates via the :code:`out` argument.
The type hint of the :code:`out` argument is :code:`Optional[ivy.Array]`.
However, as discussed above, if the function is *nestable* then :class:`ivy.Container` instances are also supported.
:class:`ivy.Container` is omitted from the type hint in such cases, as explained in the `Function Arguments <function_arguments.rst>`_ section.
When the :code:`out` argument is unspecified, then the return is simply provided in a newly created :class:`ivy.Array` (or :class:`ivy.Container` if *nestable*).
However, when :code:`out` is specified, then the return is provided as an inplace update of the :code:`out` argument provided.
This can for example be the same as the input to the function, resulting in a simple inplace update of the input.
In the case of :class:`ivy.Array` return types, the :code:`out` argument is predominantly handled in `handle_out_argument`_.
As explained in the `Function Wrapping <function_wrapping.rst>`_ section, this wrapping is applied to every function with the :code:`@handle_out_argument` decorator dynamically during `backend setting`_.
**Primary Functions**
In the case of *primary* functions, `handle_out_argument`_ does not handle the backend-specific inplace updates in cases where the backend function being wrapped supports them directly, such as `torch.tan`_ and `numpy.tan`_, which both support the :code:`out` argument directly.
When implementing backend-specific functions, the attribute :code:`support_native_out` should be added to all functions which wrap a function in the backend supporting inplace updates directly.
`tf.math.tan`_ and `jax.numpy.tan`_ for example do **not** support inplace updates, and so the :code:`support_native_out` attribute should **not** be added to the :code:`tan` implementations.
The implementations of :func:`ivy.tan` for each backend are as follows.
**JAX** (no :code:`support_native_out` attribute):
.. code-block:: python
def tan(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:
return jnp.tan(x)
**NumPy** (includes :code:`support_native_out` attribute):
.. code-block:: python
@_scalar_output_to_0d_array
def tan(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.tan(x, out=out)
tan.support_native_out = True
**TensorFlow** (no :code:`support_native_out` attribute):
.. code-block:: python
def tan(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.tan(x)
**PyTorch** (includes :code:`support_native_out` attribute):
.. code-block:: python
def tan(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor:
x = _cast_for_unary_op(x)
return torch.tan(x, out=out)
tan.support_native_out = True
It's very important to ensure the :code:`support_native_out` attribute is not added to backend implementations that do not handle the :code:`out` argument, as the `presence of this attribute`_ dictates whether the argument should be handled `by the backend function`_ or `by the wrapper <function_wrapping.rst>`_.
This distinction only concerns how the inplace update is applied to the native array, which is operated upon directly by the backend.
If :code:`out` is specified in an Ivy function, then an inplace update is always **also** performed on the :class:`ivy.Array` instance itself, which is how :code:`out` is provided to the function originally.
The inplace update of this :class:`ivy.Array` is always `handled by the wrapper`_.
Alternatively, if :code:`out` is an :class:`ivy.Container`, then the inplace update is always handled by `_wrap_fn`_ in the container wrapping module.
**Special Case**
Take a function which has multiple possible "paths" through the code:
.. code-block:: python
def cholesky(
x: torch.Tensor, /, *, upper: bool = False, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
if not upper:
return torch.linalg.cholesky(x, out=out)
else:
ret = torch.transpose(
torch.linalg.cholesky(
torch.transpose(x, dim0=len(x.shape) - 1, dim1=len(x.shape) - 2)
),
dim0=len(x.shape) - 1,
dim1=len(x.shape) - 2,
)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
cholesky.support_native_out = True
Here we still have the :attr:`support_native_out` attribute since we want to take advantage of the native inplace update enabled by :func:`torch.linalg.cholesky` in the first condition.
However, in the :code:`else` statement, the last operation is :func:`torch.transpose` which does not support the :code:`out` argument, and so the native inplace update can't be performed by torch here.
This is why we need to call :func:`ivy.inplace_update` explicitly here, to ensure the native inplace update is performed, as well as the :class:`ivy.Array` inplace update.
Another case where we need to use :func:`ivy.inplace_update` with a function that has :attr:`support_native_out` is for the example of the :code:`torch` backend implementation of the :func:`ivy.remainder` function
.. code-block:: python
def remainder(
x1: Union[float, torch.Tensor],
x2: Union[float, torch.Tensor],
/,
*,
modulus: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if not modulus:
res = x1 / x2
res_floored = torch.where(res >= 0, torch.floor(res), torch.ceil(res))
diff = res - res_floored
diff, x2 = ivy.promote_types_of_inputs(diff, x2)
if ivy.exists(out):
if out.dtype != x2.dtype:
return ivy.inplace_update(
out, torch.round(torch.mul(diff, x2)).to(out.dtype)
)
return torch.round(torch.mul(diff, x2), out=out).to(x1.dtype)
return torch.remainder(x1, x2, out=out).to(x1.dtype)
remainder.support_native_out = True
Here, even though the :func:`torch.round` function natively supports the :code:`out` argument, in case the :code:`dtype` of the :code:`out` argument is different
from the :code:`dtype` of the result of the function, we need to use :func:`ivy.inplace_update`, while still trying to utilize the native :code:`out` argument whenever
the :code:`dtype` is the same for maximum possible extent of the native inplace update.
**Compositional Functions**
For *compositional* functions, the :code:`out` argument should **always** be handled in the compositional implementation, with no wrapping applied at all.
This is for a few reasons:
#. we need to show the :code:`out` argument in the compositional function signature, as this is the only function implementation in the codebase.
Adding an argument unused in the implementation could cause some confusion.
#. generally, inplace updates are performed because memory management is an area of concern for the user.
By handling the :code:`out` argument in the compositional implementation itself.
We can maximize the memory efficiency of the function, using inplace updates in as many of the inner Ivy functions as possible.
#. this enables us to make use of backend-specific :code:`out` argument handling.
The second and third points are the most important points.
We'll use :func:`ivy.cross_entropy` as an example:
.. code-block:: python
def cross_entropy(
true: Union[ivy.Array, ivy.NativeArray],
pred: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = -1,
epsilon: float = 1e-7,
reduction: str = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
ivy.utils.assertions.check_elem_in_list(reduction, ["none", "sum", "mean"])
pred = ivy.clip(pred, epsilon, 1 - epsilon)
log_pred = ivy.log(pred)
return _reduce_loss(reduction, log_pred * true, axis, out=out)
By handling the :code:`out` argument in the function, we are able to get the benefits outlined above.
Firstly, the return of :func:`ivy.sum` is the same shape and type as the return of the entire function, and so we can also write this output to the :code:`out` argument inplace.
We can then subsequently overwrite the contents of :code:`out` again with the return of the :func:`ivy.negative` function.
This minimizes the number of arrays created during the execution of the function, which is generally the intention when specifying the :code:`out` argument.
Additionally, with a PyTorch backend, the :func:`ivy.negative` function defers to the :code:`out` argument of :func:`torch.negative` function directly, which is the most efficient inplace update possible, making use of backend-specific optimizations.
If we had instead simply used the wrapper `handle_out_argument`_, then we would not leverage any of these benefits, and instead simply call :func:`ivy.inplace_update` at the very end of the function call.
For some compositional functions, the internal function which generates the final return value does not itself support the :code:`out` argument.
For example, `ivy.multi_head_attention <https://github.com/unifyai/ivy/blob/2045db570d7977830681a7498a3c1045fb5bcc79/ivy/functional/ivy/layers.py#L165>`_ includes support for arbitrary functions passed in the input, including :code:`to_out_fn` which, if specified, is applied to the outputs before returning.
For such functions, the inplace update should just be performed using :func:`ivy.inplace_update` at the end of the function, like `so <https://github.com/unifyai/ivy/blob/2045db570d7977830681a7498a3c1045fb5bcc79/ivy/functional/ivy/layers.py#L254>`_.
Technically, this could be handled using the `handle_out_argument`_ wrapping, but we opt to implement this in the compositional function itself, due to point 1 mentioned above.
**Mixed Functions**
As explained in the `Function Types <function_types.rst>`_ section, *mixed* functions can effectively behave as either compositional or primary functions, depending on the backend that is selected. We must add the :code:`handle_out_argument` to the :code:`add_wrappers`key of
the :code:`mixed_backend_wrappers` attribute so that the decorator gets added to the primary implementation when the backend is set. Here's an `example`_ from the linear function.
copy argument
-------------
As well as the :code:`out` argument, many also support the :code:`copy` argument.
The functions with support for the :code:`copy` argument are either in the `Array API Standard`_, and the standard mandates the inclusion of :code:`copy` in each case.
Or they are expected to return views with specific backends (hence being decorated with the :code:`@handle_view` wrapper) and the :code:`copy` is added to allow a way to prevent views from being created.
The :code:`copy` argument dictates whether a new copy should be created, or whether the input array should be updated inplace.
When :code:`copy` is not specified explicitly, then an inplace update is performed with the same behaviour as :code:`copy=False`.
Setting :code:`copy=False` is equivalent to passing :code:`out=input_array`.
If only one of :code:`copy` or :code:`out` is specified, then this specified argument is given priority.
If both are specified, then priority is given to the more general :code:`out` argument.
As with the :code:`out` argument, the :code:`copy` argument is also handled `by the wrapper <function_wrapping.rst>`_.
Views
------------
Many functions in NumPy and PyTorch return views instead of copies, these functions are mostly manipulation routines or indexing routines.
Views are arrays which access the same data buffer as another array but view it with different metadata like :code:`stride`.
More information about these arrays can be found in `NumPy's documentation <https://numpy.org/doc/stable/user/basics.copies.html>`_.
This essentially means that any inplace update on the original array or any of its views will cause all the other views to be updated as well since they reference the same memory buffer.
We want to keep supporting NumPy and PyTorch inplace updates whenever we can and superset backend behaviour, however it is not trivial to replicate this in JAX and TensorFlow.
The main reason is because these frameworks do not natively support inplace updates so even if multiple native arrays are referencing the same memory buffer, you would never be able to update it once for all of them.
Therefore views and their updates must be tracked through Ivy and extra logic has been added to update views in the case an inplace update happens to any array which is meant to be referencing the same memory.
We call views tracked and updated by Ivy functional views as they work with a functional paradigm.
What functions return views is mostly dictated by NumPy since it has the most expansive support for them, any function which returns views in NumPy or PyTorch should be decorated with the :code:`@handle_view` wrapper, except :func:`get_item` which has it's own :code:`@handle_view_indexing` wrapper.
Every function with this wrapper should also have a :code:`copy` argument such that Ivy maintains a way to prevent views from being created if necessary.
What that wrapper does is update a few :class:`ivy.Array` attributes which help keep track of views, how they were created, and which arrays should be updated together.
These attributes are then used in the :func:`ivy.inplace_update` to update all the arrays which are meant to be referencing the same memory, at least to NumPy's standard.
Of course, these are normally only used with a JAX and TensorFlow backend since NumPy and PyTorch natively update their views and Ivy does not need to do any extra handling except for a few functions where PyTorch fails to return views when NumPy does.
The functions currently implemented in the Ivy API where PyTorch fails to return views at the time of writing are :func:`ivy.flip`, :func:`ivy.rot90`, :func:`ivy.flipud`, :func:`ivy.fliplr`.
In the case one of those functions is used with a Pytorch backend, additional logic has been added to make the returns of those functions behave as views of the original that made them.
Here's a brief description of the additional attributes added to :class:`ivy.Array` and their usage:
#. Base (:code:`._base`): the original array being referenced (array all views stem from)
#. Manipulation stack (:code:`._manipulation_stack`): store of operations that were done on the original to get to the current shape (manipulation or indexing)
#. Reference stack :code:`._view_refs`: Weak references to the arrays that reference the original as view, only populated for base arrays.
#. PyTorch Base (:code:`._torch_base`): Keeps track of functional view (array created from the listed functions above) that made it, otherwise stores original array
#. PyTorch reference stack (:code:`._torch_view_refs`): Functional views referencing this array in its PyTorch base, only populated for original arrays or functional views.
#. PyTorch manipulation cache (:code:`._torch_manipulation`): Tuple storing array or view and function which made the functional view, only populated for functional views
.. note::
Parts of an arrays metadata like :code:`stride` are attributed to the low-level memory layout of arrays while views in :code:`ivy` operate at a higher level of abstraction.
As a result, :func:`ivy.strides` isn't guaranteed to produce an output reflective of the underlying memory layout if the :class:`ivy.Array` passed in is a view (or in other words has a :code:`_base`).
Here's a brief description of how the :code:`@handle_view` wrapper populates these attributes:
#. When an array is made using a function decorated by this wrapper its base becomes the array that made it, or if the array that made it is also a view, its base.
#. The view is then added to the reference stack of the base array (weakly), the operation that created the array is also added to the manipulation stack of the array.
#. The way the PyTorch specific attributes are updated should be adequately explained above.
Here's a brief description of what happens during an inplace operation with a JAX and TensorFlow backend:
#. If the base is inplace updated, then it goes through all the arrays in the reference stack, and through their manipulation, then inplace updates every array respectively.
#. If a view gets inplace updated, an index array is created of the shape of the base array, which then is passed through the manipulation stack of the updated array.
#. The updated array and the index array are then flattened and they then update the original array by performing a scatter update on a flattened version of the original array, which then gets reshaped into the correct shape.
#. Then the all views stemming from the original are updated as described in the first point.
Here's a brief description of what happens during an inplace operation with a PyTorch backend:
#. The array being updated checks if it has a populated reference stack, if it does it inplace updates each functional view in the stack with the output of the stored function called with the array that made it.
It then checks if the functional view has a reference stack and continues recursively until it reaches a point where it exhausts all reference stacks.
#. If the reference stack is empty or exhausted it checks if it has a manipulation stack.
If populated it performs the reverse functional operation with itself as the input and inplace updates the view that made it (reverses the operation that made it).
If the manipulation stack is empty or already exhausted it goes to the array’s PyTorch base and repeats the recursively until everything is exhausted and the base is None.
#. All other views are expected to be updated automatically through PyTorch's native view handling.
**Round Up**
This should have hopefully given you a good feel for inplace updates, and how these are handled in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `inplace updates thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/n8ko-Ig2eZ0" class="video">
</iframe>
| ivy/docs/overview/deep_dive/inplace_updates.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/inplace_updates.rst",
"repo_id": "ivy",
"token_count": 11506
} | 5 |
Get Started
===========
..
If you want to use **Ivy's tracer and transpiler**, make sure to follow the
:ref:`setting up instructions for the API key <overview/get_started:Ivy's tracer and transpiler>`
after installing Ivy!
Depending on your preferred environment you can install Ivy in various ways:
Installing using pip
--------------------
The easiest way to set up Ivy is to install it using pip with the following command:
.. code-block:: bash
pip install ivy
Keep in mind that this **won't** install any framework other than NumPy!
Docker
------
If you prefer to use containers, we also have pre-built Docker images with all the
supported frameworks and some relevant packages already installed, which you can pull from:
.. code-block:: bash
docker pull unifyai/ivy:latest
If you are working on a GPU device, you can pull from:
.. code-block:: bash
docker pull unifyai/ivy:latest-gpu
Installing from source
----------------------
You can also install Ivy from source if you want to take advantage of the latest
changes, but we can't ensure everything will work as expected!
.. code-block:: bash
git clone https://github.com/unifyai/ivy.git
cd ivy
pip install --user -e .
If you are planning to contribute, you want to run the tests, or you are looking
for more in-depth instructions, it's probably best to check out
the `Contributing - Setting Up <contributing/setting_up.rst>`_ page,
where OS-specific and IDE-specific instructions and video tutorials to install Ivy are available!
Ivy's tracer and transpiler
-----------------------------
To use Ivy's tracer and transpiler, you'll need an **API key**. If you don't have one yet, you can
register in `the console <https://console.unify.ai/>`_ to get it!
Ivy Folder
~~~~~~~~~~
When importing Ivy for the first time, a ``.ivy`` folder will be created in your
working directory. If you want to keep this folder in a different location,
you can set an ``IVY_ROOT`` environment variable with the path of your ``.ivy`` folder.
Setting Up the API key
~~~~~~~~~~~~~~~~~~~~~~
Once the ``.ivy`` folder has been created (either manually or automatically by
importing Ivy), you will have to paste your API key as the content of the ``key.pem`` file.
For reference, this would be equivalent to:
.. code-block:: bash
echo -n API_KEY > .ivy/key.pem
Issues and Questions
~~~~~~~~~~~~~~~~~~~~
If you find any issue or bug while using the tracer and/or the transpiler, please
raise an `issue in GitHub <https://github.com/unifyai/ivy/issues>`_ and add the ``tracer``
or the ``transpiler`` label accordingly. A member of the team will get back to you ASAP!
Otherwise, if you haven't found a bug but want to ask a question, suggest something, or get help
from the team directly, feel free to open a new post at the ``pilot-access`` forum in
`Ivy's discord server! <https://discord.com/invite/sXyFF8tDtm>`_
| ivy/docs/overview/get_started.rst/0 | {
"file_path": "ivy/docs/overview/get_started.rst",
"repo_id": "ivy",
"token_count": 818
} | 6 |
.. _`RWorks Graph Tracers`:
Graph Tracers
=============
.. _`TensorFlow`: https://tensorflow.org/
.. _`JAX`: https://jax.readthedocs.io/
.. _`PyTorch`: https://pytorch.org/
.. _`FX`: https://pytorch.org/docs/stable/fx.html
.. _`discord`: https://discord.gg/sXyFF8tDtm
Graph tracers enable acyclic directed computation graphs to be extracted from functions which operate on the tensors, expressed as source code in the framework.
There is inevitably some overlap with the role of the lower level compilers here, but for the purpose of this discussion, we consider tracers as being any tool which executes the function to be traced and produces a computation graph consisting solely of the lowest level functions defined within the framework itself, without going any lower.
In this light, the tracer does not need to know about the hardware, the compiler instruction set, or anything else lower level.
It simply creates an acyclic directed graph which maps the inputs of a function to the outputs of a function, as a composition of the low level functions defined within the framework.
This is a very useful representation which can then make subsequent compilation simpler, and so this graph representation often sits between the raw source code and the lower level compilers which compile to specific hardware.
tf.Graph
--------
The :code:`tf.Graph` class represents an arbitrary `TensorFlow`_ computation, represented as a dataflow graph.
It is used by :code:`tf.function` to represent the function's computations.
Each graph contains a set of :code:`tf.Operation` instances, which represent units of computation; and :code:`tf.Tensor` instances, which represent the units of data that flow between operations.
Jaxpr
-----
Conceptually, one can think of `JAX`_ transformations as first trace-specializing the Python function to be transformed into a small and well-behaved intermediate form that is then interpreted with transformation-specific interpretation rules.
It uses the actual Python interpreter to do most of the heavy lifting to distill the essence of the computation into a simple statically-typed expression language with limited higher-order features.
That language is the jaxpr language.
A :code:`jax.core.Jaxpr` instance represents a function with one or more typed parameters (input variables) and one or more typed results.
The results depend only on the input variables; there are no free variables captured from enclosing scopes.
torch.jit
---------
:code:`torch.jit.trace` and :code:`torch.jit.trace_module` enables a module or Python function to be traced in `PyTorch`_, and an executable is returned which will be optimized using just-in-time compilation.
Example inputs must be provided, and then the function is run, with a recording of the operations performed on all the tensors.
The resulting recording of a standalone function produces a :code:`ScriptFunction` instance.
The resulting recording of :code:`nn.Module.forward` or :code:`nn.Module` produces a :code:`ScriptModule` instance.
This module also contains any parameters that the original module had as well.
torch.fx
--------
`FX`_ is a toolkit for developers to use to transform :code:`torch.nn.Module` instances in `PyTorch`_.
FX consists of three main components: a symbolic tracer, an intermediate representation, and Python code generation.
The symbolic tracer performs a “symbolic execution” of the Python code.
It feeds fake values, called Proxies, through the code.
Operations on these Proxies are recorded.
The intermediate representation is the container for the operations that were recorded during symbolic tracing.
It consists of a list of Nodes that represent function inputs, call-sites (to functions, methods, or :code:`torch.nn.Module` instances), and return values.
The IR is the format in which transformations are applied.
Python code generation is what makes FX a Python-to-Python (or Module-to-Module) transformation toolkit.
For each Graph IR, valid Python code matching the Graph’s semantics can be created.
This functionality is wrapped up in GraphModule, which is a :code:`torch.nn.Module` instance that holds a Graph as well as a forward method generated from the Graph.
Taken together, this pipeline of components (symbolic tracing -> intermediate representation -> transforms -> Python code generation) constitutes the Python-to-Python transformation pipeline of FX.
In addition, these components can be used separately.
For example, symbolic tracing can be used in isolation to capture a form of the code for analysis (and not transformation) purposes.
Code generation can be used for programmatically generating models, for example from a config file.
| ivy/docs/overview/related_work/graph_tracers.rst/0 | {
"file_path": "ivy/docs/overview/related_work/graph_tracers.rst",
"repo_id": "ivy",
"token_count": 1109
} | 7 |
from . import array
from . import container
from . import nested_array
from . import factorized_tensor
| ivy/ivy/data_classes/__init__.py/0 | {
"file_path": "ivy/ivy/data_classes/__init__.py",
"repo_id": "ivy",
"token_count": 26
} | 8 |
# global
import abc
from typing import Union, Callable, Sequence
# local
import ivy
class _ArrayWithGeneralExperimental(abc.ABC):
def reduce(
self: ivy.Array,
init_value: Union[int, float],
computation: Callable,
/,
*,
axes: Union[int, Sequence[int]] = 0,
keepdims: bool = False,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.reduce. This method simply
wraps the function, and so the docstring for ivy.reduce also applies to
this method with minimal changes.
Parameters
----------
self
The array to act on.
init_value
The value with which to start the reduction.
computation
The reduction function.
axes
The dimensions along which the reduction is performed.
keepdims
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one.
Returns
-------
ret
The reduced array.
Examples
--------
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]])
>>> x.reduce(0, ivy.add, 0)
ivy.array([6, 15])
"""
return ivy.reduce(self, init_value, computation, axes=axes, keepdims=keepdims)
| ivy/ivy/data_classes/array/experimental/general.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/general.py",
"repo_id": "ivy",
"token_count": 594
} | 9 |
# global
import abc
# ToDo: implement all methods here as public instance methods
class _ArrayWithImage(abc.ABC):
pass
| ivy/ivy/data_classes/array/image.py/0 | {
"file_path": "ivy/ivy/data_classes/array/image.py",
"repo_id": "ivy",
"token_count": 38
} | 10 |
# global
import operator
# local
import ivy
from .activations import _ContainerWithActivations
from .base import ContainerBase
from .conversions import _ContainerWithConversions
from .creation import _ContainerWithCreation
from .data_type import _ContainerWithDataTypes
from .device import _ContainerWithDevice
from .elementwise import _ContainerWithElementwise
from .general import _ContainerWithGeneral
from .gradients import _ContainerWithGradients
from .image import _ContainerWithImage
from .layers import _ContainerWithLayers
from .linear_algebra import _ContainerWithLinearAlgebra
from .losses import _ContainerWithLosses
from .manipulation import _ContainerWithManipulation
from .norms import _ContainerWithNorms
from .random import _ContainerWithRandom
from .searching import _ContainerWithSearching
from .set import _ContainerWithSet
from .sorting import _ContainerWithSorting
from .statistical import _ContainerWithStatistical
from .utility import _ContainerWithUtility
from ivy.data_classes.container.experimental import (
_ContainerWithActivationExperimental,
_ContainerWithConversionExperimental,
_ContainerWithCreationExperimental,
_ContainerWithData_typeExperimental,
_ContainerWithDeviceExperimental,
_ContainerWithElementWiseExperimental,
_ContainerWithGeneralExperimental,
_ContainerWithGradientsExperimental,
_ContainerWithImageExperimental,
_ContainerWithLayersExperimental,
_ContainerWithLinearAlgebraExperimental,
_ContainerWithManipulationExperimental,
_ContainerWithNormsExperimental,
_ContainerWithRandomExperimental,
_ContainerWithSearchingExperimental,
_ContainerWithSetExperimental,
_ContainerWithSortingExperimental,
_ContainerWithStatisticalExperimental,
_ContainerWithUtilityExperimental,
_ContainerWithLossesExperimental,
)
class Container(
_ContainerWithActivations,
_ContainerWithConversions,
_ContainerWithCreation,
_ContainerWithDataTypes,
_ContainerWithDevice,
_ContainerWithElementwise,
_ContainerWithGeneral,
_ContainerWithGradients,
_ContainerWithImage,
_ContainerWithLayers,
_ContainerWithLinearAlgebra,
_ContainerWithLosses,
_ContainerWithManipulation,
_ContainerWithNorms,
_ContainerWithRandom,
_ContainerWithSearching,
_ContainerWithSet,
_ContainerWithSorting,
_ContainerWithStatistical,
_ContainerWithUtility,
_ContainerWithActivationExperimental,
_ContainerWithConversionExperimental,
_ContainerWithCreationExperimental,
_ContainerWithData_typeExperimental,
_ContainerWithDeviceExperimental,
_ContainerWithElementWiseExperimental,
_ContainerWithGeneralExperimental,
_ContainerWithGradientsExperimental,
_ContainerWithImageExperimental,
_ContainerWithLayersExperimental,
_ContainerWithLinearAlgebraExperimental,
_ContainerWithManipulationExperimental,
_ContainerWithNormsExperimental,
_ContainerWithRandomExperimental,
_ContainerWithSearchingExperimental,
_ContainerWithSetExperimental,
_ContainerWithSortingExperimental,
_ContainerWithStatisticalExperimental,
_ContainerWithUtilityExperimental,
_ContainerWithLossesExperimental,
):
def __init__(
self,
dict_in=None,
queues=None,
queue_load_sizes=None,
container_combine_method="list_join",
queue_timeout=None,
print_limit=10,
key_length_limit=None,
print_indent=4,
print_line_spacing=0,
ivyh=None,
default_key_color="green",
keyword_color_dict=None,
rebuild_child_containers=False,
types_to_iteratively_nest=None,
alphabetical_keys=True,
dynamic_backend=None,
**kwargs
):
ContainerBase.__init__(
self,
dict_in,
queues,
queue_load_sizes,
container_combine_method,
queue_timeout,
print_limit,
key_length_limit,
print_indent,
print_line_spacing,
ivyh,
default_key_color,
keyword_color_dict,
rebuild_child_containers,
types_to_iteratively_nest,
alphabetical_keys,
dynamic_backend,
**kwargs
)
# Built-ins #
# ----------#
def __pos__(self):
return self
def __neg__(self):
return self.cont_map(lambda x, kc: -x, map_sequences=True)
def __pow__(self, power):
"""ivy.Container special method for the power operator, calling
:code:`operator.pow` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
input container. Should have a numeric data type.
power
input array or container of powers. Must be compatible
with ``self`` (see :ref:`broadcasting`). Should have a numeric
data type.
Returns
-------
ret
a container containing the element-wise sums. The returned array must have a
data type determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3]))
>>> y = x ** 2
>>> print(y)
{
a: ivy.array([0, 1]),
b: ivy.array([4, 9])
}
>>> x = ivy.Container(a=ivy.array([0, 1.2]), b=ivy.array([2.2, 3.]))
>>> y = x ** 3.1
>>> print(y)
{
a: ivy.array([0., 1.75979435]),
b: ivy.array([11.52153397, 30.13532257])
}
"""
if isinstance(power, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.pow(xs[0], xs[1]), [self, power], map_nests=True
)
return self.cont_map(lambda x, kc: x**power, map_sequences=True)
def __rpow__(self, power):
return self.cont_map(lambda x, kc: power**x, map_sequences=True)
def __ipow__(self, power):
if isinstance(power, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ipow(xs[0], xs[1]), [self, power], map_nests=True
)
return self.cont_map(lambda x, _: operator.ipow(x, power), map_sequences=True)
def __add__(self, other):
"""ivy.Container special method for the add operator, calling
:code:`operator.add` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input container. Should have a numeric data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
ret
a container containing the element-wise sums. The returned array must have a
data type determined by :ref:`type-promotion`.
Examples
--------
With :code:`Number` instances at the leaves:
>>> x = ivy.Container(a=1, b=2)
>>> y = ivy.Container(a=3, b=4)
>>> z = x + y
>>> print(z)
{
a: 4,
b: 6
}
With :class:`ivy.Array` instances at the leaves:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = x + y
>>> print(z)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([7, 9, 11])
}
With a mix of :class:`ivy.Container` and :class:`ivy.Array` instances:
>>> x = ivy.Container(a=ivy.array([[4.], [5.], [6.]]),
... b=ivy.array([[5.], [6.], [7.]]))
>>> y = ivy.array([[1.1, 2.3, -3.6]])
>>> z = x + y
>>> print(z)
{
a: ivy.array([[5.1, 6.3, 0.4],
[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4]]),
b: ivy.array([[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4],
[8.1, 9.3, 3.4]])
}
"""
return ivy.Container.cont_multi_map(
lambda xs, _: operator.add(xs[0], xs[1]), [self, other], map_nests=True
)
def __radd__(self, other):
"""ivy.Container reverse special method for the add operator, calling
:code:`operator.add` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input container. Should have a numeric data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
ret
a container containing the element-wise sums. The returned array must have a
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = 1
>>> y = ivy.Container(a=3, b=4)
>>> z = x + y
>>> print(z)
{
a: 4,
b: 5
}
"""
return ivy.Container.cont_multi_map(
lambda xs, _: operator.add(xs[0], xs[1]), [other, self], map_nests=True
)
def __iadd__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.iadd(xs[0], xs[1]), [self, other], map_nests=True
)
def __sub__(self, other):
"""ivy.Container special method for the subtract operator, calling
:code:`operator.sub` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input container. Should have a numeric data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
ret
a container containing the element-wise differences. The returned array must
have a data type determined by :ref:`type-promotion`.
Examples
--------
With :code:`Number` instances at the leaves:
>>> x = ivy.Container(a=1, b=2)
>>> y = ivy.Container(a=3, b=4)
>>> z = x - y
>>> print(z)
{
a: -2,
b: -2
}
With :class:`ivy.Array` instances at the leaves:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([4, 3, 2]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([6, 5, 4]))
>>> z = x - y
>>> print(z)
{
a: ivy.array([-3, -3, -3]),
b: ivy.array([-2, -2, -2])
}
With a mix of :class:`ivy.Container` and :class:`ivy.Array` instances:
>>> x = ivy.Container(a=ivy.array([[4.], [5.], [6.]]),
... b=ivy.array([[5.], [6.], [7.]]))
>>> y = ivy.array([[1.1, 2.3, -3.6]])
>>> z = x - y
>>> print(z)
{
a: ivy.array([[2.9, 1.7, 7.6],
[3.9, 2.7, 8.6],
[4.9, 3.7, 9.6]]),
b: ivy.array([[3.9, 2.7, 8.6],
[4.9, 3.7, 9.6],
[5.9, 4.7, 10.6]])
}
"""
return ivy.Container.cont_multi_map(
lambda xs, _: operator.sub(xs[0], xs[1]), [self, other], map_nests=True
)
def __isub__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.isub(xs[0], xs[1]), [self, other], map_nests=True
)
def __rsub__(self, other):
"""ivy.Container reverse special method for the subtract operator,
calling :code:`operator.sub` for each of the corresponding leaves of
the two containers.
Parameters
----------
self
first input container. Should have a numeric data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
ret
a container containing the element-wise differences. The returned array must
have a data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = 1
>>> y = ivy.Container(a=3, b=4)
>>> z = x - y
>>> print(z)
{
a: -2,
b: -3
}
"""
return ivy.Container.cont_multi_map(
lambda xs, _: operator.sub(xs[0], xs[1]), [other, self], map_nests=True
)
def __mul__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.mul(xs[0], xs[1]), [self, other], map_nests=True
)
def __rmul__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.mul(xs[0], xs[1]), [other, self], map_nests=True
)
def __imul__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.imul(xs[0], xs[1]), [self, other], map_nests=True
)
def __mod__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.mod(xs[0], xs[1]), [self, other], map_nests=True
)
def __rmod__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.mod(xs[0], xs[1]), [other, self], map_nests=True
)
def __imod__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.imod(xs[0], xs[1]), [self, other], map_nests=True
)
def __divmod__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: (operator.truediv(xs[0], xs[1]), operator.mod(xs[0], xs[1])),
[self, other],
map_nests=True,
)
def __rdivmod__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: (operator.truediv(xs[0], xs[1]), operator.mod(xs[0], xs[1])),
[other, self],
map_nests=True,
)
def __truediv__(self, other):
"""ivy.Container special method for the divide operator, calling
:code:`operator.truediv` for each of the corresponding leaves of the
two containers.
Parameters
----------
self
first input container. Should have a numeric data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
Returns
-------
ret
a container containing the element-wise differences. The returned array must
have a data type determined by :ref:`type-promotion`.
Examples
--------
With :code:`Number` instances at the leaves:
>>> x = ivy.Container(a=1, b=2)
>>> y = ivy.Container(a=5, b=4)
>>> z = x / y
>>> print(z)
{
a: 0.2,
b: 0.5
}
With :class:`ivy.Array` instances at the leaves:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([4, 3, 2]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([6, 5, 4]))
>>> z = x / y
>>> print(z)
{
a: ivy.array([0.25, 0.40000001, 0.5]),
b: ivy.array([0.66666669, 0.60000002, 0.5])
}
"""
return ivy.Container.cont_multi_map(
lambda xs, _: operator.truediv(xs[0], xs[1]), [self, other], map_nests=True
)
def __rtruediv__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.truediv(xs[0], xs[1]), [other, self], map_nests=True
)
def __itruediv__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.itruediv(xs[0], xs[1]), [self, other], map_nests=True
)
def __floordiv__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.floordiv(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(lambda x, kc: x // other, map_sequences=True)
def __rfloordiv__(self, other):
return self.cont_map(lambda x, kc: other // x, map_sequences=True)
def __ifloordiv__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ifloordiv(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.ifloordiv(x, other), map_sequences=True
)
def __matmul__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.matmul(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.matmul(x, other), map_sequences=True
)
def __rmatmul__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.matmul(xs[0], xs[1]),
[other, self],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.matmul(other, x), map_sequences=True
)
def __imatmul__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.imatmul(xs[0], xs[1]),
[other, self],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.imatmul(x, other), map_sequences=True
)
def __abs__(self):
"""ivy.Container special method for the abs operator, calling
:code:`operator.abs` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
input Container. Should have leaves with numeric data type.
Returns
-------
ret
A container containing the element-wise results.
Examples
--------
With :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([1, -2, 3]),
... b=ivy.array([-1, 0, 5]))
>>> y = abs(x)
>>> print(y)
{
a: ivy.array([1, 2, 3]),
b: ivy.array([1, 0, 5])
}
"""
return self.cont_map(lambda x, kc: operator.abs(x), map_sequences=True)
def __lt__(self, other):
"""ivy.Container special method for the less operator, calling
:code:`operator.lt` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container. May have any data type.
other
second input Container. Must be compatible with x1 (with Broadcasting).
May have any data type.
Returns
-------
ret
A container containing the element-wise results. Any returned array inside
must have a data type of bool.
Examples
--------
With :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 5, 3]),b=ivy.array([5, 3, 7]))
>>> z = x < y
>>> print(z)
{
a: ivy.array([False, False, False]),
b: ivy.array([True, False, True])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.lt(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x < other, map_sequences=True)
def __le__(self, other):
"""ivy.Container special method for the less_equal operator, calling
:code:`operator.le` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container. May have any data type.
other
second input Container. Must be compatible with x1 (with Broadcasting).
May have any data type.
Returns
-------
ret
A container containing the element-wise results. Any returned array inside
must have a data type of bool.
Examples
--------
With :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 5, 3]),b=ivy.array([5, 3, 7]))
>>> z = x <= y
>>> print(z)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, True, True])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.le(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x <= other, map_sequences=True)
def __eq__(self, other):
"""ivy.Container special method for the equal operator, calling
:code:`operator.eq` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container. May have any data type.
other
second input Container. Must be compatible with x1 (with Broadcasting).
May have any data type.
Returns
-------
ret
A container containing the element-wise results. Any returned array inside
must have a data type of bool.
Examples
--------
With :class:`ivy.Container` instances:
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([1, 3, 5]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([1, 4, 5]))
>>> y = x1 == x2
>>> print(y)
{
a: ivy.array([True, True, True]),
b: ivy.array([True, False, True])
}
>>> x1 = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]),
... b=ivy.array([1, 4, 5]))
>>> x2 = ivy.Container(a=ivy.array([1, 3, 3.0]),
... b=ivy.array([1.0, 4.0, 5.0]))
>>> y = x1 == x2
>>> print(y)
{
a: ivy.array([True, False, True]),
b: ivy.array([True, True, True])
}
>>> x1 = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]),
... b=ivy.array([1, 4, 5]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3.0]),
... b=ivy.array([1.0, 4.0, 5.0]))
>>> y = x1 == x2
>>> print(y)
{
a: ivy.array([True, True, True]),
b: ivy.array([True, True, True])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.eq(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x == other, map_sequences=True)
def __ne__(self, other):
"""ivy.Container special method for the not_equal operator, calling
:code:`operator.ne` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container. May have any data type.
other
second input Container. Must be compatible with x1 (with Broadcasting).
May have any data type.
Returns
-------
ret
A container containing the element-wise results. Any returned array inside
must have a data type of bool.
Examples
--------
With :class:`ivy.Container` instances:
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([1, 3, 5]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([1, 4, 5]))
>>> y = x1 != x2
>>> print(y)
{
a: ivy.array([False, False, False]),
b: ivy.array([False, True, False])
}
>>> x1 = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]),
... b=ivy.array([1, 4, 5]))
>>> x2 = ivy.Container(a=ivy.array([1, 3, 3.0]),
... b=ivy.array([1.0, 4.0, 5.0]))
>>> y = x1 != x2
>>> print(y)
{
a: ivy.array([False, True, False]),
b: ivy.array([False, False, False])
}
>>> x1 = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]),
... b=ivy.array([1, 4, 5]))
>>> x2 = ivy.Container(a=ivy.array([1, 2, 3.0]),
... b=ivy.array([1.0, 4.0, 5.0]))
>>> y = x1 != x2
>>> print(y)
{
a: ivy.array([False, False, False]),
b: ivy.array([False, False, False])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ne(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x != other, map_sequences=True)
def __gt__(self, other):
"""ivy.Container special method for the greater operator, calling
:code:`operator.gt` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container. May have any data type.
other
second input Container. Must be compatible with x1 (with Broadcasting).
May have any data type.
Returns
-------
ret
A container containing the element-wise results. Any returned array inside
must have a data type of bool.
Examples
--------
With :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 5, 3]),b=ivy.array([5, 3, 7]))
>>> z = x > y
>>> print(z)
{
a:ivy.array([True,False,True]),
b:ivy.array([False,False,False])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.gt(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x > other, map_sequences=True)
def __ge__(self, other):
"""ivy.Container special method for the greater_equal operator, calling
:code:`operator.ge` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container. May have any data type.
other
second input Container. Must be compatible with x1 (with Broadcasting).
May have any data type.
Returns
-------
ret
A container containing the element-wise results. Any returned array inside
must have a data type of bool.
Examples
--------
With :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 5, 3]),b=ivy.array([5, 3, 7]))
>>> z = x >= y
>>> print(z)
{
a:ivy.array([True,True,True]),
b:ivy.array([False,True,False])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ge(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x >= other, map_sequences=True)
def __and__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.and_(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x and other, map_sequences=True)
def __iand__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.iand(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: operator.iand(x, other), map_sequences=True)
def __rand__(self, other):
return self.cont_map(lambda x, kc: other and x, map_sequences=True)
def __or__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.or_(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: x or other, map_sequences=True)
def __ror__(self, other):
return self.cont_map(lambda x, kc: other or x, map_sequences=True)
def __ior__(self, other):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ior(xs[0], xs[1]), [self, other], map_nests=True
)
def __invert__(self):
return self.cont_map(lambda x, kc: operator.not_(x), map_sequences=True)
def __xor__(self, other):
"""ivy.Container special method for the ge operator, calling
:code:`operator.ge` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input Container.
other
second input Container. Arrays inside must be compatible with ``x1``
(see :ref:`broadcasting`). Should have an integer or boolean data type.
Returns
-------
ret
a container containing the element-wise results. Any returned arrays inside
must have a data type determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` instances:
>>> x = ivy.Container(a=ivy.array([89]), b=ivy.array([2]))
>>> y = ivy.Container(a=ivy.array([12]), b=ivy.array([3]))
>>> z = x ^ y
>>> print(z)
{
a: ivy.array([85]),
b: ivy.array([1])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.xor(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: operator.xor(x, other), map_sequences=True)
def __rxor__(self, other):
return self.cont_map(lambda x, kc: other != x, map_sequences=True)
def __ixor__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ixor(xs[0], xs[1]), [self, other], map_nests=True
)
return self.cont_map(lambda x, kc: operator.ixor(x, other), map_sequences=True)
def __lshift__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.lshift(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.lshift(x, other), map_sequences=True
)
def __rlshift__(self, other):
return self.cont_map(
lambda x, kc: operator.lshift(other, x), map_sequences=True
)
def __ilshift__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.ilshift(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.ilshift(x, other), map_sequences=True
)
def __rshift__(self, other):
"""ivy.Container special method for the right shift operator, calling
:code:`operator.rshift` for each of the corresponding leaves of the two
containers.
Parameters
----------
self
first input container. Should have an integer data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have an integer data type.
Each element must be greater than or equal to ``0``.
Returns
-------
ret
a container containing the element-wise results. The returned array
must have a data type determined by :ref:`type-promotion`.
Examples
--------
With :code:`Number` instances at the leaves:
>>> x = ivy.Container(a=128, b=43)
>>> y = ivy.Container(a=5, b=3)
>>> z = x >> y
>>> print(z)
{
a: 4,
b: 5
}
With :class:`ivy.Array` instances at the leaves:
>>> x = ivy.Container(a=ivy.array([16, 40, 120]),
... b=ivy.array([15, 45, 143]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([0, 3, 4]))
>>> z = x >> y
>>> print(z)
{
a: ivy.array([8, 10, 15]),
b: ivy.array([15, 5, 8])
}
With a mix of :class:`ivy.Container` and :class:`ivy.Array` instances:
>>> x = ivy.Container(a=ivy.array([16, 40, 120]),
... b=ivy.array([15, 45, 143]))
>>> y = ivy.array([1, 2, 3])
>>> z = x >> y
>>> print(z)
{
a: ivy.array([8, 10, 15]),
b: ivy.array([7, 11, 17])
}
"""
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.rshift(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.rshift(x, other), map_sequences=True
)
def __rrshift__(self, other):
"""ivy.Container reverse special method for the right shift operator,
calling :code:`operator.rshift` for each of the corresponding leaves of
the two containers.
Parameters
----------
self
first input container. Should have an integer data type.
other
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have an integer data type. Each element
must be greater than or equal to ``0``.
Returns
-------
ret
a container containing the element-wise results. The returned array
must have a data type determined by :ref:`type-promotion`.
Examples
--------
>>> a = 64
>>> b = ivy.Container(a = ivy.array([0, 1, 2]),
... b = ivy.array([3, 4, 5]))
>>> y = a >> b
>>> print(y)
{
a: ivy.array([64, 32, 16]),
b: ivy.array([8, 4, 2])
}
"""
return self.cont_map(lambda x, kc: other >> x, map_sequences=True)
def __irshift__(self, other):
if isinstance(other, ivy.Container):
return ivy.Container.cont_multi_map(
lambda xs, _: operator.irshift(xs[0], xs[1]),
[self, other],
map_nests=True,
)
return self.cont_map(
lambda x, kc: operator.irshift(x, other), map_sequences=True
)
| ivy/ivy/data_classes/container/container.py/0 | {
"file_path": "ivy/ivy/data_classes/container/container.py",
"repo_id": "ivy",
"token_count": 17895
} | 11 |
# global
from typing import Optional, Union, List, Dict, Tuple, Literal, Sequence, Callable
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithLayersExperimental(ContainerBase):
@staticmethod
def static_max_pool1d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[int, Tuple[int, ...], ivy.Container],
strides: Union[int, Tuple[int, ...], ivy.Container],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NWC",
dilation: Union[int, Tuple[int], ivy.Container] = 1,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.max_pool1d. This method
simply wraps the function, and so the docstring for ivy.max_pool1d also
applies to this method with minimal changes.
Parameters
----------
x
Container of input images *[batch_size, w, d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
"NWC" or "NCW". Defaults to "NWC".
dilaton
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element is covered by a sliding window.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12.).reshape((2,2,3))
>>> b = ivy.arange(24.).reshape((2,3,4))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(ivy.Container.static_max_pool1d(x,2, 2, "VALID"))
{
a: ivy.array([[[3., 4., 5.]],
[[9., 10., 11.]]]),
b: ivy.array([[[4., 5., 6., 7.]],
[[16., 17., 18., 19.]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"max_pool1d",
x,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def max_pool1d(
self: ivy.Container,
kernel: Union[int, Tuple[int, ...], ivy.Container],
strides: Union[int, Tuple[int, ...], ivy.Container],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NWC",
dilation: Union[int, Tuple[int], ivy.Container] = 1,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of `ivy.max_pool1d`. This
method simply wraps the function, and so the docstring for
`ivy.max_pool1d` also applies to this method with minimal changes.
Parameters
----------
self
Container of input images *[batch_size, w, d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
"NWC" or "NCW". Defaults to "NWC".
dilaton
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element is covered by a sliding window.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12.).reshape((2,2,3))
>>> b = ivy.arange(24.).reshape((2,3,4))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(x.max_pool1d(2, 2, "VALID"))
{
a: ivy.array([[[3., 4., 5.]],
[[9., 10., 11.]]]),
b: ivy.array([[[4., 5., 6., 7.]],
[[16., 17., 18., 19.]]])
}
"""
return self.static_max_pool1d(
self,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_max_pool2d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[int, Tuple[int, ...], ivy.Container],
strides: Union[int, Tuple[int, ...], ivy.Container],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NHWC",
dilation: Union[int, Tuple[int, ...], ivy.Container] = 1,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.max_pool2dd. This method
simply wraps the function, and so the docstring for ivy.max_pool2d also
applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
kernel
The size of the window to take a max over.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilaton
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element is covered by a sliding window.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12).reshape((2, 1, 3, 2))
>>> b = ivy.arange(48).reshape((2, 4, 3, 2))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(ivy.Container.static_max_pool2d(x, (2, 2), (1, 1), "SAME"))
{
a: (<class ivy.array.array.Array> shape=[2, 1, 3, 2]),
b: (<class ivy.array.array.Array> shape=[2, 4, 3, 2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"max_pool2d",
x,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def max_pool2d(
self: ivy.Container,
kernel: Union[int, Tuple[int, ...], ivy.Container],
strides: Union[int, Tuple[int, ...], ivy.Container],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NHWC",
dilation: Union[int, Tuple[int, ...], ivy.Container] = 1,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of `ivy.max_pool2d`. This
method simply wraps the function, and so the docstring for
`ivy.max_pool2d` also applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
kernel
The size of the window to take a max over.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
dilaton
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element is covered by a sliding window.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(24.).reshape((1, 2, 3, 4))
>>> b = ivy.arange(48.).reshape((2, 4, 3, 2))
>>> x = ivy.Container(a=a, b=b)
>>> y = x.max_pool2d(3, 1, "VALID")
>>> print(y)
{
a: ivy.array([], shape=(1, 0, 1, 4)),
b: ivy.array([[[[16., 17.]],
[[22., 23.]]],
[[[40., 41.]],
[[46., 47.]]]])
}
"""
return self.static_max_pool2d(
self,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_max_pool3d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[int, Tuple[int, ...], ivy.Container],
strides: Union[int, Tuple[int, ...], ivy.Container],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NDHWC",
dilation: Union[int, Tuple[int, ...], ivy.Container] = 1,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.max_pool3d. This method
simply wraps the function, and so the docstring for ivy.max_pool3d also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
kernel
Convolution filters *[d,h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
dilaton
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element is covered by a sliding window.
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12).reshape((1, 2, 1, 3, 2))
>>> b = ivy.arange(48).reshape((2, 2, 2, 3, 2))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(ivy.Container.static_max_pool3d(x, 2, 1, "VALID"))
{
a: ivy.array([], shape=(1, 1, 0, 2, 2)),
b: ivy.array([[[[[20, 21],
[22, 23]]]],
[[[[44, 45],
[46, 47]]]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"max_pool3d",
x,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def max_pool3d(
self: ivy.Container,
kernel: Union[int, Tuple[int, ...], ivy.Container],
strides: Union[int, Tuple[int, ...], ivy.Container],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]], ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NDHWC",
dilation: Union[int, Tuple[int, ...], ivy.Container] = 1,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.max_pool3d. This method
simply wraps the function, and so the docstring for ivy.max_pool3d also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
kernel
Convolution filters *[d,h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
dilaton
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element is covered by a sliding window.
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(24.).reshape((1, 2, 3, 4, 1))
>>> b = ivy.arange(48.).reshape((2, 4, 3, 2, 1))
>>> x = ivy.Container(a=a, b=b)
>>> print(x.max_pool3d(3, 1, "VALID"))
{
a: ivy.array([], shape=(1, 0, 1, 2, 1)),
b: ivy.array([], shape=(2, 2, 1, 0, 1))
}
"""
return self.static_max_pool3d(
self,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_avg_pool1d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[int, Tuple[int], ivy.Container],
strides: Union[int, Tuple[int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NWC",
count_include_pad: Union[bool, ivy.Container] = False,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.avg_pool1d. This method
simply wraps the function, and so the docstring for ivy.avg_pool1d also
applies to this method with minimal changes.
Parameters
----------
x
Container of input images *[batch_size, w, d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NWC" or "NCW". Defaults to "NWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12.).reshape((2,2,3))
>>> b = ivy.arange(24.).reshape((2,3,4))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(ivy.Container.static_avg_pool1d(x,2, 2, "VALID"))
{
a: ivy.array([[[1.5, 2.5, 3.5]],
[[7.5, 8.5, 9.5]]]),
b: ivy.array([[[2., 3., 4., 5.]],
[[14., 15., 16., 17.]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"avg_pool1d",
x,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def avg_pool1d(
self: ivy.Container,
kernel: Union[int, Tuple[int], ivy.Container],
strides: Union[int, Tuple[int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NWC",
count_include_pad: Union[bool, ivy.Container] = False,
ceil_mode: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of `ivy.avg_pool1d`. This
method simply wraps the function, and so the docstring for
`ivy.avg_pool1d` also applies to this method with minimal changes.
Parameters
----------
self
Container of input images *[batch_size, w, d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NWC" or "NCW". Defaults to "NWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12.).reshape((2,2,3))
>>> b = ivy.arange(24.).reshape((2,3,4))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(x.avg_pool1d(2, 2, "VALID"))
{
a: ivy.array([[[1.5, 2.5, 3.5]],
[[7.5, 8.5, 9.5]]]),
b: ivy.array([[[2., 3., 4., 5.]],
[[14., 15., 16., 17.]]])
}
"""
return self.static_avg_pool1d(
self,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_avg_pool2d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[int, Tuple[int], Tuple[int, int], ivy.Container],
strides: Union[int, Tuple[int], Tuple[int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NHWC",
count_include_pad: Union[bool, ivy.Container] = False,
ceil_mode: Union[bool, ivy.Container] = False,
divisor_override: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.avg_pool2d. This method
simply wraps the function, and so the docstring for ivy.avg_pool2d also
applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
kernel
The size of the window to take a max over.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as divisor,
otherwise kernel_size will be used.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12).reshape((2, 1, 3, 2))
>>> b = ivy.arange(48).reshape((2, 4, 3, 2))
>>> x = ivy.Container({'a': a, 'b': b})
>>> y = ivy.Container.static_avg_pool2d(x, (2, 2), (1, 1), "SAME")
>>> print(y)
{
a: (<class ivy.data_classes.array.array.Array> shape=[2, 1, 3, 2]),
b: (<class ivy.data_classes.array.array.Array> shape=[2, 4, 3, 2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"avg_pool2d",
x,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def avg_pool2d(
self: ivy.Container,
kernel: Union[int, Tuple[int], Tuple[int, int], ivy.Container],
strides: Union[int, Tuple[int], Tuple[int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NHWC",
count_include_pad: Union[bool, ivy.Container] = False,
ceil_mode: Union[bool, ivy.Container] = False,
divisor_override: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of `ivy.avg_pool2d`. This
method simply wraps the function, and so the docstring for
`ivy.avg_pool2d` also applies to this method with minimal changes.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
kernel
The size of the window to take a max over.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as divisor,
otherwise kernel_size will be used.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12).reshape((2, 1, 3, 2))
>>> b = ivy.arange(48).reshape((2, 4, 3, 2))
>>> x = ivy.Container({'a': a, 'b': b})
>>> y = x.avg_pool2d(2, 1, "SAME")
>>> print(y)
{
a: (<class ivy.data_classes.array.array.Array> shape=[2, 1, 3, 2]),
b: (<class ivy.data_classes.array.array.Array> shape=[2, 4, 3, 2])
}
"""
return self.static_avg_pool2d(
self,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_avg_pool3d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel: Union[int, Tuple[int], Tuple[int, int, int], ivy.Container],
strides: Union[int, Tuple[int], Tuple[int, int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NDHWC",
count_include_pad: Union[bool, ivy.Container] = False,
ceil_mode: Union[bool, ivy.Container] = False,
divisor_override: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.avg_pool3d. This method
simply wraps the function, and so the docstring for ivy.avg_pool3d also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
kernel
Convolution filters *[d,h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
NDHWC" or "NCDHW". Defaults to "NDHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as the divisor, otherwise
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(12).reshape((1, 2, 1, 3, 2))
>>> b = ivy.arange(48).reshape((2, 2, 2, 3, 2))
>>> x = ivy.Container({'a': a, 'b': b})
>>> print(ivy.Container.static_avg_pool3d(x, 2, 1, "VALID"))
{
a: ivy.array([], shape=(1, 1, 0, 2, 2)),
b: ivy.array([[[[[10., 11.],
[12., 13.]]]],
[[[[34., 35.],
[36., 37.]]]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"avg_pool3d",
x,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def avg_pool3d(
self: ivy.Container,
kernel: Union[int, Tuple[int], Tuple[int, int, int], ivy.Container],
strides: Union[int, Tuple[int], Tuple[int, int, int], ivy.Container],
padding: Union[str, ivy.Container],
/,
*,
data_format: Union[str, ivy.Container] = "NDHWC",
count_include_pad: Union[bool, ivy.Container] = False,
ceil_mode: Union[bool, ivy.Container] = False,
divisor_override: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.avg_pool3d. This method
simply wraps the function, and so the docstring for ivy.avg_pool3d also
applies to this method with minimal changes.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
kernel
Convolution filters *[d,h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating
the per-dimension paddings.
data_format
NDHWC" or "NCDHW". Defaults to "NDHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as the divisor, otherwise
out
optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> a = ivy.arange(24.).reshape((1, 2, 3, 4, 1))
>>> b = ivy.arange(48.).reshape((2, 4, 3, 2, 1))
>>> x = ivy.Container(a=a, b=b)
>>> print(x.avg_pool3d(2, 1, "VALID"))
{
a: ivy.array([[[[[8.5],
[9.5],
[10.5]],
[[12.5],
[13.5],
[14.5]]]]]),
b: (<class ivy.data_classes.array.array.Array> shape=[2, 3, 2, 1, 1])
}
"""
return self.static_avg_pool3d(
self,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_dct(
x: ivy.Container,
/,
*,
type: Union[Literal[1, 2, 3, 4], ivy.Container] = 2,
n: Optional[Union[int, ivy.Container]] = None,
axis: Union[int, ivy.Container] = -1,
norm: Optional[Union[Literal["ortho"], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.dct. This method simply
wraps the function, and so the docstring for ivy.dct also applies to
this method with minimal changes.
Parameters
----------
x
Container with the input signals.
type
The type of the dct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger than x is zero-padded.
norm
The type of normalization to be applied. Must be either None or "ortho".
out
optional output container, for writing the result to.
Returns
-------
ret
The transformed input.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> ivy.Container.static_dct(x, type=2, norm='ortho')
{
a: ivy.array([102., -51.5, 0., -5.39, 0., -1.61, 0.,
-0.406]),
b: ivy.array([12.7, -6.44, 0., -0.673, 0., -0.201, 0.,
-0.0507])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([ 8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([11., 54, 23., 13., 255., 255., 132., 182.]))
>>> n = ivy.Container(a=9, b=5)
>>> type = ivy.Container(a=2, b=4)
>>> norm = ivy.Container(a="ortho", b=None)
>>> ivy.Container.static_dct(x, type=type, n=n, norm=norm)
{
a: ivy.array([96., -28.2, -31.9, 22.9, -26., 19.8, -17., 10.9,
-5.89]),
b: ivy.array([242., -253., 286., -515., 467.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"dct",
x,
type=type,
n=n,
axis=axis,
norm=norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def dct(
self: ivy.Container,
/,
*,
type: Union[Literal[1, 2, 3, 4], ivy.Container] = 2,
n: Optional[Union[int, ivy.Container]] = None,
axis: Union[int, ivy.Container] = -1,
norm: Optional[Union[Literal["ortho"], ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.dct. This method simply
wraps the function, and so the docstring for ivy.dct also applies to
this method with minimal changes.
Parameters
----------
self
Container with the input signals.
type
The type of the dct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger then x is zero-padded.
norm
The type of normalization to be applied. Must be either None or "ortho".
out
optional output container, for writing the result to.
Returns
-------
ret
The transformed input.
Examples
--------
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> x.dct(type=2, norm='ortho')
{
a: ivy.array([102., -51.5, 0., -5.39, 0., -1.61, 0.,
-0.406]),
b: ivy.array([12.7, -6.44, 0., -0.673, 0., -0.201, 0.,
-0.0507])
}
"""
return self.static_dct(
self,
type=type,
n=n,
axis=axis,
norm=norm,
out=out,
)
@staticmethod
def static_idct(
x: ivy.Container,
/,
*,
type: Union[Literal[1, 2, 3, 4], ivy.Container] = 2,
n: Optional[Union[int, ivy.Container]] = None,
axis: Union[int, ivy.Container] = -1,
norm: Optional[Union[Literal["ortho"], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.idct. This method simply
wraps the function, and so the docstring for ivy.idct also applies to
this method with minimal changes.
Parameters
----------
x
Container with the input signals.
type
The type of the dct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger than x is zero-padded.
norm
The type of normalization to be applied. Must be either None or "ortho".
out
optional output container, for writing the result to.
Returns
-------
ret
The transformed input.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> ivy.Container.static_idct(x, type=2, norm='ortho')
{
a: ivy.array([79.49862671, -70.37691498, 30.00390816, -23.58938599,
13.92713165, -10.078475, 5.19664812, -1.95411837]),
b: ivy.array([9.93732834, -8.79711437, 3.75048852, -2.94867325, 1.74089146,
-1.25980937, 0.64958102, -0.2442648])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([ 8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([11., 54, 23., 13., 255., 255., 132., 182.]))
>>> n = ivy.Container(a=9, b=5)
>>> type = ivy.Container(a=2, b=4)
>>> norm = ivy.Container(a="ortho", b=None)
>>> ivy.Container.static_idct(x, type=type, n=n, norm=norm)
{
a: ivy.array([86.29723358, -66.6950531, 9.93914509, 2.88008738,
-16.18951225, 18.06697273, -17.57439804, 11.68861485,
-4.41308832]),
b: ivy.array([242.0700836, -253.2449036, 285.6711426, -514.501709,
467.4924011])
}
"""
return ContainerBase.cont_multi_map_in_function(
"idct",
x,
type=type,
n=n,
axis=axis,
norm=norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def idct(
self: ivy.Container,
/,
*,
type: Union[Literal[1, 2, 3, 4], ivy.Container] = 2,
n: Optional[Union[int, ivy.Container]] = None,
axis: Union[int, ivy.Container] = -1,
norm: Optional[Union[Literal["ortho"], ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.idct. This method
simply wraps the function, and so the docstring for ivy.idct also
applies to this method with minimal changes.
Parameters
----------
self
Container with the input signals.
type
The type of the idct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger then x is zero-padded.
norm
The type of normalization to be applied. Must be either None or "ortho".
out
optional output container, for writing the result to.
Returns
-------
ret
The transformed input.
Examples
--------
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> x.idct(type=2, norm='ortho')
{
a: ivy.array([79.49862671, -70.37691498, 30.00390816, -23.58938599,
13.92713165, -10.078475, 5.19664812, -1.95411837]),
b: ivy.array([9.94, -8.79711437, 3.76, -2.94867325, 1.74089146,
-1.25980937, 0.64958102, -0.2442648])
}
"""
return self.static_idct(
self,
type=type,
n=n,
axis=axis,
norm=norm,
out=out,
)
@staticmethod
def _static_fft(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
dim: Union[int, ivy.Container],
/,
*,
norm: Union[str, ivy.Container] = "backward",
n: Optional[Union[int, Tuple[int], ivy.Container]] = None,
out: Optional[ivy.Container] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.fft. This method simply
wraps the function, and so the docstring for ivy.fft also applies to
this method with minimal changes.
Parameters
----------
x
Container containing input volumes *[...,d_in,...]*,
where d_in indicates the dimension that needs FFT.
dim
The dimension along which to take the one dimensional FFT.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be
"backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
n
Optional argument indicating the sequence length, if given, the input
would be padded with zero or truncated to length n before performing FFT.
Should be a integer greater than 1.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The transformed input.
Examples
--------
>>> a = ivy.array(np.array([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]))
>>> b = ivy.array(np.exp(2j * np.pi * np.arange(8) / 8))
>>> c = ivy.Container(a=a, b=b)
>>> dims = ivy.Container(a=0, b=0)
>>> ivy.Container.static_fft(c, dims)
{
a: ivy.array([0.+0.j, 12.+0.j, 8.+0.j, 4.+0.j]),
b: ivy.array([-3.44509285e-16+1.14423775e-17j, 8.00000000e+00-8.11483250e-16j,
2.33486982e-16+1.22464680e-16j, 0.00000000e+00+1.22464680e-16j,
9.95799250e-17+2.33486982e-16j, 0.00000000e+00+7.66951701e-17j,
1.14423775e-17+1.22464680e-16j, 0.00000000e+00+1.22464680e-16j])
}
"""
return ContainerBase.cont_multi_map_in_function(
"fft",
x,
dim,
norm=norm,
n=n,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def fft(
self: ivy.Container,
dim: Union[int, ivy.Container],
/,
*,
norm: Union[str, ivy.Container] = "backward",
n: Optional[Union[int, Tuple[int], ivy.Container]] = None,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.fft. This method simply
wraps the function, and so the docstring for ivy.fft also applies to
this method with minimal changes.
Parameters
----------
self
Container containing input volumes *[...,d_in,...]*,
where d_in indicates the dimension that needs FFT.
dim
The dimension along which to take the one dimensional FFT.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be
"backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
n
Optional argument indicating the sequence length, if given, the input would
be padded with zero or truncated to length n before performing FFT.
Should be a integer greater than 1.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
Container containing the transformed inputs.
Examples
--------
>>> a = ivy.array(np.array([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]))
>>> b = ivy.array(np.exp(2j * np.pi * np.arange(8) / 8))
>>> c = ivy.Container(a=a, b=b)
>>> dims = ivy.Container(a=0, b=0)
>>> c.fft(dims)
{
a: ivy.array([0.+0.j, 12.+0.j, 8.+0.j, 4.+0.j]),
b: ivy.array([-3.44509285e-16+1.14423775e-17j, 8.00000000e+00-8.11483250e-16j,
2.33486982e-16+1.22464680e-16j, 0.00000000e+00+1.22464680e-16j,
9.95799250e-17+2.33486982e-16j, 0.00000000e+00+7.66951701e-17j,
1.14423775e-17+1.22464680e-16j, 0.00000000e+00+1.22464680e-16j])
}
"""
return self._static_fft(
self,
dim,
norm=norm,
n=n,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_ifft(
x: ivy.Container,
dim: Union[int, ivy.Container],
*,
norm: Union[str, ivy.Container] = "backward",
n: Optional[Union[int, Tuple[int], ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.ifft. This method simply
wraps the function, and so the docstring for ivy.ifft also applies to
this method with minimal changes.
Parameters
----------
x
Container containing input volumes *[...,d_in,...]*,
where d_in indicates the dimension that needs IFFT.
dim
The dimension along which to take the one dimensional IFFT.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be
"backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
n
Optional argument indicating the sequence length, if given, the input would
be padded with zero or truncated to length n before performing IFFT.
Should be a integer greater than 1.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The transformed input.
Examples
--------
>>> a = ivy.array(np.array([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]))
>>> b = ivy.array(np.exp(2j * np.pi * np.arange(8) / 8))
>>> c = ivy.Container(a=a, b=b)
>>> dims = ivy.Container(a=0, b=0)
>>> ivy.Container.static_ifft(c, dims)
{
a: ivy.array([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j]),
b: ivy.array([-4.30636606e-17+1.43029718e-18j, 0.00000000e+00+1.53080850e-17j,
1.43029718e-18+1.53080850e-17j, 0.00000000e+00+9.58689626e-18j,
1.24474906e-17+2.91858728e-17j, 0.00000000e+00+1.53080850e-17j,
2.91858728e-17+1.53080850e-17j, 1.00000000e+00-1.01435406e-16j])
}
"""
return ContainerBase.cont_multi_map_in_function(
"ifft",
x,
dim,
norm=norm,
n=n,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def ifft(
self: ivy.Container,
dim: Union[int, ivy.Container],
*,
norm: Union[str, ivy.Container] = "backward",
n: Optional[Union[int, Tuple[int], ivy.Container]] = None,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
):
"""ivy.Container instance method variant of ivy.ifft. This method
simply wraps the function, and so the docstring for ivy.ifft also
applies to this method with minimal changes.
Parameters
----------
self
Container containing input volumes *[...,d_in,...]*,
where d_in indicates the dimension that needs IFFT.
dim
The dimension along which to take the one dimensional IFFT.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be
"backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
n
Optional argument indicating the sequence length, if given, the input
would be padded with zero or truncated to length n before performing IFFT.
Should be a integer greater than 1.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
Container containing the transformed inputs.
Examples
--------
>>> a = ivy.array(np.array([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]))
>>> b = ivy.array(np.exp(2j * np.pi * np.arange(8) / 8))
>>> c = ivy.Container(a=a, b=b)
>>> dims = ivy.Container(a=0, b=0)
>>> c.ifft(dims)
{
a: ivy.array([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j]),
b: ivy.array([-4.30636606e-17+1.43029718e-18j, 0.00000000e+00+1.53080850e-17j,
1.43029718e-18+1.53080850e-17j, 0.00000000e+00+9.58689626e-18j,
1.24474906e-17+2.91858728e-17j, 0.00000000e+00+1.53080850e-17j,
2.91858728e-17+1.53080850e-17j, 1.00000000e+00-1.01435406e-16j])
}
"""
return self.static_ifft(
self,
dim,
norm=norm,
n=n,
out=out,
)
@staticmethod
def static_embedding(
weight: Union[ivy.Array, ivy.NativeArray, ivy.Container],
indices: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
max_norm: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"embedding",
weight,
indices,
max_norm=max_norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def embedding(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
indices: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
max_norm: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return self.static_embedding(
self,
indices,
max_norm=max_norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_dft(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Union[int, ivy.Container] = 1,
inverse: Union[bool, ivy.Container] = False,
onesided: Union[bool, ivy.Container] = False,
dft_length: Optional[Union[int, Tuple[int], ivy.Container]] = None,
norm: Union[str, ivy.Container] = "backward",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""
Parameters
----------
x
axis
inverse
onesided
dft_length
norm
key_chains
to_apply
prune_unapplied
map_sequences
out
"""
return ContainerBase.cont_multi_map_in_function(
"dft",
x,
axis=axis,
inverse=inverse,
onesided=onesided,
dft_length=dft_length,
norm=norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def dft(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Union[int, ivy.Container] = 1,
inverse: Union[bool, ivy.Container] = False,
onesided: Union[bool, ivy.Container] = False,
dft_length: Optional[Union[int, Tuple[int], ivy.Container]] = None,
norm: Union[str, ivy.Container] = "backward",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""
Parameters
----------
axis
inverse
onesided
dft_length
norm
key_chains
to_apply
prune_unapplied
map_sequences
out
"""
return self.static_dft(
self,
axis=axis,
inverse=inverse,
onesided=onesided,
dft_length=dft_length,
norm=norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_interpolate(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
size: Union[Sequence[int], int, ivy.Container],
/,
*,
mode: Union[
Literal[
"linear",
"bilinear",
"trilinear",
"nearest",
"area",
"nearest_exact",
"tf_area",
"bicubic",
],
ivy.Container,
] = "linear",
scale_factor: Optional[Union[Sequence[int], int, ivy.Container]] = None,
recompute_scale_factor: Optional[Union[bool, ivy.Container]] = None,
align_corners: Optional[Union[bool, ivy.Container]] = None,
antialias: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Down/up samples the input to the given size. The algorithm used for
interpolation is determined by mode.
Parameters
----------
x
Input array, Must have the shape
[batch x channels x [optional depth] x [optional height] x width].
size
Output size.
mode
Interpolation mode. Can be one of the following:
- linear
- bilinear
- trilinear
- nearest
- area
- tf_area
- bicubic
- mitchellcubic
- lanczos3
- lanczos5
- gaussian
scale_factor
Multiplier for spatial size that defines the output
size (overwriting `size`).
align_corners
If True, the corner pixels of the input and output tensors are aligned,
and thus preserving the values at the corner pixels. If False, the corner
pixels are not aligned, and the interpolation uses edge value padding for
out-of-boundary values.
only has an effect when mode is 'linear', 'bilinear',
'bicubic' or 'trilinear'. Default: False
antialias
If True, antialiasing is applied when downsampling an image.
Supported modes: 'bilinear', 'bicubic'.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
resized array
"""
return ContainerBase.cont_multi_map_in_function(
"interpolate",
x,
size,
mode=mode,
scale_factor=scale_factor,
recompute_scale_factor=recompute_scale_factor,
align_corners=align_corners,
antialias=antialias,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def interpolate(
self: ivy.Container,
size: Union[Sequence[int], int, ivy.Container],
/,
*,
mode: Union[
Literal[
"linear",
"bilinear",
"trilinear",
"nearest",
"area",
"nearest_exact",
"tf_area",
"bicubic",
],
ivy.Container,
] = "linear",
scale_factor: Optional[Union[Sequence[int], int, ivy.Container]] = None,
recompute_scale_factor: Optional[Union[bool, ivy.Container]] = None,
align_corners: Optional[Union[bool, ivy.Container]] = None,
antialias: Union[bool, ivy.Container] = False,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""Down/up samples the input to the given size. The algorithm used for
interpolation is determined by mode.
Parameters
----------
x
Input array, Must have the shape
[batch x channels x [optional depth] x [optional height] x width].
size
Output size.
mode
Interpolation mode. Can be one of the following:
- linear
- bilinear
- trilinear
- nearest
- area
- tf_area
- bicubic
- mitchellcubic
- lanczos3
- lanczos5
- gaussian
scale_factor
Multiplier for spatial size that defines the output
size (overwriting `size`).
align_corners
If True, the corner pixels of the input and output tensors are aligned,
and thus preserving the values at the corner pixels. If False, the corner
pixels are not aligned, and the interpolation uses edge value padding for
out-of-boundary values.
only has an effect when mode is 'linear', 'bilinear',
'bicubic' or 'trilinear'. Default: False
antialias
If True, antialiasing is applied when downsampling an image.
Supported modes: 'bilinear', 'bicubic'.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
resized array
"""
return self.static_interpolate(
self,
size,
mode=mode,
scale_factor=scale_factor,
recompute_scale_factor=recompute_scale_factor,
align_corners=align_corners,
antialias=antialias,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_adaptive_avg_pool1d(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
output_size: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.adaptive_avg_pool1d. This
method simply wraps the function, and so the docstring for
ivy.adaptive_avg_pool1d also applies to this method with minimal
changes.
Parameters
----------
input
Input array. Must have shape (N, C, L_in) or (C, L_in) where N is
the batch dimension, C is the feature dimension, and L_in is the spatial
dimension.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation. Will have shape (N, C, L_out) or
(C, L_out), where L_out = `output_size`
"""
return ContainerBase.cont_multi_map_in_function(
"adaptive_avg_pool1d",
input,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def adaptive_avg_pool1d(
self: ivy.Container,
output_size: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""Apply a 1D adaptive average pooling over an input signal composed of
several input planes.
Parameters
----------
self
Input container.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation.
"""
return self.static_adaptive_avg_pool1d(
self,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_adaptive_avg_pool2d(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
output_size: Union[Sequence[int], int, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
data_format: str = "NHWC",
) -> ivy.Container:
"""ivy.Container static method variant of ivy.adaptive_avg_pool2d. This
method simply wraps the function, and so the docstring for
ivy.adaptive_avg_pool2d also applies to this method with minimal
changes.
Parameters
----------
input
A 3D or 4D input array. Should have a floating-point data type.
output_size
Spatial output size.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
Returns
-------
The result of the pooling operation. Will have shape (N, C, S_0, S_1) or
(C, S_0, S_1), where S = `output_size`
"""
return ContainerBase.cont_multi_map_in_function(
"adaptive_avg_pool2d",
input,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
data_format=data_format,
)
def adaptive_avg_pool2d(
self: ivy.Container,
output_size: Union[int, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
data_format: str = "NHWC",
) -> ivy.Container:
"""Apply a 2D adaptive average pooling over an input signal composed of
several input planes.
Parameters
----------
self
Input container.
output_size
Spatial output size.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
Returns
-------
The result of the pooling operation.
"""
return self.static_adaptive_avg_pool2d(
self,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
data_format=data_format,
)
@staticmethod
def static_adaptive_max_pool2d(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
output_size: Union[Sequence[int], int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.adaptive_max_pool2d. This
method simply wraps the function, and so the docstring for
ivy.adaptive_max_pool2d also applies to this method with minimal
changes.
Parameters
----------
input
Input array. Must have shape (N, C, H_in, W_in) or (C, H_in, W_in) where N
is the batch dimension, C is the feature dimension, and H_in and W_in are
the 2 spatial dimensions.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation. Will have shape (N, C, S_0, S_1) or
(C, S_0, S_1), where S = `output_size`
"""
return ContainerBase.cont_multi_map_in_function(
"adaptive_max_pool2d",
input,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def adaptive_max_pool2d(
self: ivy.Container,
output_size: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""Apply a 2D adaptive maximum pooling over an input signal composed of
several input planes.
Parameters
----------
self
Input container.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation.
"""
return self.static_adaptive_max_pool2d(
self,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_adaptive_max_pool3d(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
output_size: Union[Sequence[int], int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"adaptive_max_pool3d",
input,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def adaptive_max_pool3d(
self: ivy.Container,
output_size: Union[int, ivy.Container],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
return self.static_adaptive_max_pool3d(
self,
output_size,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_ifftn(
x: ivy.Container,
s: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
axes: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
*,
norm: Union[str, ivy.Container] = "backward",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.ifftn.
This method simply wraps the function, and so the docstring for
ivy.ifftn also applies to this method with minimal changes.
Parameters
----------
x
Input array of complex numbers.
s
sequence of ints, optional
Shape (length of transformed axis) of the output (`s[0]` refers to axis 0,
`s[1]` to axis 1, etc.). If given shape is smaller than that of the input,
the input is cropped. If larger, input is padded with zeros. If `s` is not
given, shape of input along axes specified by axes is used.
axes
axes over which to compute the IFFT. If not given, last `len(s)` axes are
used, or all axes if `s` is also not specified. Repeated indices in axes
means inverse transform over that axis is performed multiple times.
norm
Optional argument, "backward", "ortho" or "forward".
Defaults to be "backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The truncated or zero-padded input, transformed along the axes indicated
by axes, or by a combination of s or x, as explained in the parameters
section above.
"""
return ContainerBase.cont_multi_map_in_function(
"ifftn",
x,
s=s,
axes=axes,
norm=norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def ifftn(
self: ivy.Container,
s: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
axes: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
*,
norm: Union[str, ivy.Container] = "backward",
out: Optional[Union[ivy.Array, ivy.Container]] = None,
):
"""ivy.Container static method variant of ivy.ifftn.
This method simply wraps the function, and so the docstring for
ivy.ifftn also applies to this method with minimal changes.
Parameters
----------
x
Input array of complex numbers.
s
sequence of ints, optional
Shape (length of transformed axis) of the output (`s[0]` refers to axis 0,
`s[1]` to axis 1, etc.). If given shape is smaller than that of the input,
the input is cropped. If larger, input is padded with zeros. If `s` is not
given, shape of input along axes specified by axes is used.
axes
axes over which to compute the IFFT. If not given, last `len(s)` axes are
used, or all axes if `s` is also not specified. Repeated indices in axes
means inverse transform over that axis is performed multiple times.
norm
Optional argument, "backward", "ortho" or "forward".
Defaults to be "backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
Container containing the transformed inputs
Examples
--------
>>> x = ivy.Container(
... a=ivy.array([[0.247306+0.908323j, 0.494955+0.90395j,
... 0.98193269+0.49560517j],
... [0.93280757+0.48075343j, 0.28526384+0.3351205j,
... 0.2343787 +0.83528011j],
... [0.18791352+0.30690572j, 0.82115787+0.96195183j,
... 0.44719226+0.72654048j]]),
... b=ivy.array([[0.24730653+0.90832391j, 0.49495562+0.9039565j,
... 0.98193269+0.49560517j],
... [0.93280757+0.48075343j, 0.28526384+0.3351205j,
... 0.2343787 +0.83528011j],
... [0.18791352+0.30690572j, 0.82115787+0.96195183j,
... 0.44719226+0.72654048j]]),
... )
>>> y = x.ifftn(s=[2, 1], axes=[0, 1], norm='ortho')
>>> print(y)
{
a: ivy.array([[0.8344667+0.98222595j],
[-0.48472244+0.30233797j]]),
b: ivy.array([[0.8344667+0.98222595j],
[-0.48472244+0.30233797j]])
}
"""
return self.static_ifftn(
self,
s=s,
axes=axes,
norm=norm,
out=out,
)
@staticmethod
def static_rfft(
x: ivy.Container,
/,
*,
n: Optional[Union[int, ivy.Container]] = None,
axis: Union[int, ivy.Container] = -1,
norm: Union[
Literal["backward", "ortho", "forward"], ivy.Container
] = "backward",
out: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.rfft.
This method simply wraps the function, and so the docstring for
ivy.rfft also applies to this method with minimal changes.
Parameters
----------
x
input array. Must have a real-valued floating-point data type.
n
length of the transformed axis of the input. If
- n is greater than the length of the input array, the input array
is zero-padded to length n.
- n is less than the length of the input array, the input array is
trimmed to length n.
- n is not provided, the length of the transformed axis of the
output must equal the length of the input along the axis specified
by axis. Default is ``None``.
axis
axis (dimension) over which to compute the Fourier transform.
If not set, the last axis (dimension) is used. Default is ``-1``.
norm
normalization mode. Should be one of the following modes:
- 'backward': no normalization.
- 'ortho': normalize by 1/sqrt(n) (i.e., make the FFT orthonormal).
- 'forward': normalize by 1/n.
Default is ``backward``.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
an array transformed along the axis (dimension) indicated by axis.
The returned array must have a complex-valued floating-point
data type determined by Type Promotion Rules.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.,1.,2.]),
... b=ivy.array([3.,4.,5.]))
>>> y = ivy.Container.static_rfft(x)
>>> print(y)
{
a: ivy.array([3.+0.j, -1.5+0.8660254j]),
b: ivy.array([12.+0.j, -1.5+0.8660254j])
}
"""
return ContainerBase.cont_multi_map_in_function(
"rfft",
x,
n=n,
axis=axis,
norm=norm,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def rfft(
self: ivy.Container,
/,
*,
n: Optional[Union[int, ivy.Container]] = None,
axis: Union[int, ivy.Container] = -1,
norm: Union[
Literal["backward", "ortho", "forward"], ivy.Container
] = "backward",
out: Optional[Union[ivy.Array, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
):
"""ivy.Container instance method variant of ivy.rfft. This method
simply wraps the function, and so the docstring for ivy.rfft also
applies to this method with minimal changes.
Parameters
----------
self
input array. Must have a real-valued floating-point data type.
n
length of the transformed axis of the input. If
- n is greater than the length of the input array, the input array
is zero-padded to length n.
- n is less than the length of the input array, the input array is
trimmed to length n.
- n is not provided, the length of the transformed axis of the
output must equal the length of the input along the axis specified
by axis. Default is ``None``.
axis
axis (dimension) over which to compute the Fourier transform.
If not set, the last axis (dimension) is used. Default is ``-1``.
norm
normalization mode. Should be one of the following modes:
- 'backward': no normalization.
- 'ortho': normalize by 1/sqrt(n) (i.e., make the FFT orthonormal).
- 'forward': normalize by 1/n.
Default is ``backward``.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
key_chains
The key-chains to apply or not apply the method to.
Default is ``None``.
to_apply
If True, the method will be applied to key_chains,
otherwise key_chains will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was
not applied. Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
an array transformed along the axis (dimension) indicated by axis.
The returned array must have a complex-valued floating-point
data type determined by Type Promotion Rules.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0.,1.,2.]),
... b=ivy.array([3.,4.,5.]))
>>> y = x.rfft()
>>> print(y)
{
a: ivy.array([3.+0.j, -1.5+0.8660254j]),
b: ivy.array([12.+0.j, -1.5+0.8660254j])
}
"""
return self.static_rfft(
self,
n=n,
axis=axis,
norm=norm,
out=out,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_rfftn(
x: ivy.Container,
s: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
axes: Optional[Union[int, Tuple[int, ...], ivy.Container]] = None,
*,
norm: Union[str, ivy.Container] = "backward",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.rfftn.
This method simply wraps the function, and so the docstring for
ivy.rfftn also applies to this method with minimal changes.
Parameters
----------
x
Input array of real numbers.
s
sequence of ints, optional
Shape (length of transformed axis) to use from the input (`s[0]` refers to
axis 0,`s[1]` to axis 1, etc.). The final element of `s` corresponds to `n`
for `rfft(x, n)`, while for the remaining axes, it corresponds to `n` for
`fft(x, n)`. Along any axis, if the given shape is smaller than that of the
input, the input is cropped. If it is larger,the input is padded with zeros.
If `s` is not given, the shape of the input along the axes specified by
`axes` is used.
axes
sequence of ints, optional
Axes over which to compute the FFT. If not given, the last `len(s)` axes
are used, or all axes if `s` is also not specified.
norm
Optional argument, "backward", "ortho" or "forward".
Defaults to be "backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
out
Optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The truncated or zero-padded input, transformed along the axes indicated by
`axes` or by a combination of `s` or `x`, as explained in the parameters
section above.
"""
return ContainerBase.cont_multi_map_in_function(
"rfftn",
x,
s=s,
axes=axes,
norm=norm,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def rfftn(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
s: Optional[Union[Sequence[int], ivy.Container]] = None,
axes: Optional[Union[int, Tuple[int], ivy.Container]] = None,
*,
norm: Union[str, ivy.Container] = "backward",
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""Compute the n-dimensional discrete Fourier Transform for real input.
Parameters
----------
axes : int or tuple of ints, optional
Axes over which to compute the FFT. If not given, the last `n` axes are
used.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output. Along each axis,
if the given shape is smaller than
that of the input, the input is cropped. If it is larger, the input
is padded with zeros.
norm : {'backward', 'ortho', 'forward'}, optional
Normalization mode. Default is 'backward'.
out : array-like, optional
Output array. Must have the same shape and type as the expected output.
Returns
-------
transformed : Container
The n-dimensional discrete Fourier Transform of the input.
"""
return self.static_rfftn(
self,
s=s,
axes=axes,
norm=norm,
out=out,
)
@staticmethod
def static_stft(
signals: ivy.Container,
frame_length: Union[int, ivy.Container],
frame_step: Union[int, ivy.Container],
/,
*,
fft_length: Optional[Union[int, ivy.Container]] = None,
window_fn: Optional[Union[Callable, ivy.Container]] = None,
pad_end: Optional[Union[bool, ivy.Container]] = False,
name: Optional[Union[str, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.stft.
This method simply wraps the function, and so the docstring for
ivy.stft also applies to this method with minimal changes.
Parameters
----------
signals
Input Arrays.
frame_length
An integer scalar Tensor. The window length in samples.
frame_step
An integer scalar Tensor. The number of samples to step.
fft_length, optional
An integer scalar Tensor. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing frame_length.
window_fn, optional
A callable that takes a window length
and a dtype keyword argument and returns a [window_length]
Tensor of samples in the provided datatype.
If set to None, no windowing is used.
pad_end, optional
Whether to pad the end of signals with zeros when the provided frame length
and step produces a frame that lies partially past its end.
name, optional
An optional name for the operation.
out, optional
Optional output array for writing the result.
Returns
-------
ret
A [..., frames, fft_unique_bins] Tensor of
complex64/complex128 STFT values where fft_unique_bins is
fft_length // 2 + 1 (the unique components of the FFT).
"""
return ContainerBase.cont_multi_map_in_function(
"stft",
signals,
frame_length,
frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=pad_end,
name=name,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def stft(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
frame_length: Union[int, ivy.Container],
frame_step: Union[int, ivy.Container],
/,
*,
fft_length: Optional[Union[int, ivy.Container]] = None,
window_fn: Optional[Union[Callable, ivy.Container]] = None,
pad_end: Optional[Union[bool, ivy.Container]] = False,
name: Optional[Union[str, ivy.Container]] = None,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""Compute the Short-time Fourier Transform of signals.
Parameters
----------
self
Input Arrays.
frame_length
An integer scalar Tensor. The window length in samples.
frame_step
An integer scalar Tensor. The number of samples to step.
fft_length
An integer scalar Tensor. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing frame_length.
window_fn
A callable that takes a window length and
a dtype keyword argument and returns a [window_length] Tensor of
samples in the provided datatype. If set to None, no windowing is used.
pad_end
Whether to pad the end of signals with zeros when the provided frame length
and step produces a frame that lies partially past its end.
name
An optional name for the operation.
out
Optional output array for writing the result.
Returns
-------
ret
A [..., frames, fft_unique_bins] Tensor of
complex64/complex128 STFT values where fft_unique_bins is
fft_length // 2 + 1 (the unique components of the FFT).
"""
return self.static_stft(
self,
frame_length,
frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=pad_end,
name=name,
out=out,
)
@staticmethod
def _static_sliding_window(
input: Union[ivy.Array, ivy.NativeArray, ivy.Container],
window_size: Union[int, Tuple[int, int], Tuple[int, int, int], ivy.Container],
/,
*,
stride: Union[int, Tuple[int, int], ivy.Container] = 1,
dilation: Union[int, Tuple[int, int], ivy.Container] = 1,
padding: Union[str, int, Sequence[Tuple[int, int]], ivy.Container] = "VALID",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sliding_window. This
method simply wraps the function, and so the docstring for
ivy.sliding_window also applies to this method with minimal changes.
Parameters
----------
input
An array representing the base area on which the window is going to
slide over.
window_size
Size of the sliding window for each dimension of the input.
stride
The stride of the sliding window for each dimension of input
padding
Either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’
(no padding), or a sequence of n (low, high) integer pairs that give the
padding to apply before and after each spatial dimension.
dilation
The stride between elements within a sliding window, must be > 0.
Returns
-------
ret
The result of the sliding window operation.
Examples
--------
>>> x = ivy.Container(
... a=ivy.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]]),
... b=ivy.array([[13, 14, 15, 16],
... [17, 18, 19, 20],
... [21, 22, 23, 24]])
... )
>>> result = ivy.Container._static_sliding_window(x, (2, 2))
>>> print(result)
{
a: ivy.array([[[ 1, 2, 5, 6],
[ 2, 3, 6, 7],
[ 3, 4, 7, 8]],
[[ 5, 6, 9, 10],
[ 6, 7, 10, 11],
[ 7, 8, 11, 12]]]),
b: ivy.array([[[13, 14, 17, 18],
[14, 15, 18, 19],
[15, 16, 19, 20]],
[[17, 18, 21, 22],
[18, 19, 22, 23],
[19, 20, 23, 24]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sliding_window",
input,
window_size,
stride=stride,
dilation=dilation,
padding=padding,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def sliding_window(
self: Union[ivy.Array, ivy.NativeArray, ivy.Container],
window_size: Union[int, Tuple[int, int], Tuple[int, int, int], ivy.Container],
/,
*,
stride: Union[int, Tuple[int, int], ivy.Container] = 1,
dilation: Union[int, Tuple[int, int], ivy.Container] = 1,
padding: Union[str, int, Sequence[Tuple[int, int]], ivy.Container] = "VALID",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sliding_window. This
method simply wraps the function, and so the docstring for
ivy.sliding_window also applies to this method with minimal changes.
Parameters
----------
input
An array representing the base area on which the window is going to
slide over.
window_size
Size of the sliding window for each dimension of the input.
stride
The stride of the sliding window for each dimension of input
padding
Either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’
(no padding), or a sequence of n (low, high) integer pairs that give the
padding to apply before and after each spatial dimension.
dilation
The stride between elements within a sliding window, must be > 0.
Returns
-------
ret
The result of the sliding window operation.
Examples
--------
>>> x = ivy.Container(
... a=ivy.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]]),
... b=ivy.array([[13, 14, 15, 16],
... [17, 18, 19, 20],
... [21, 22, 23, 24]])
... )
>>> x.sliding_window((2, 2))
{
a: ivy.array([[[ 1, 2, 5, 6],
[ 2, 3, 6, 7],
[ 3, 4, 7, 8]],
[[ 5, 6, 9, 10],
[ 6, 7, 10, 11],
[ 7, 8, 11, 12]]]),
b: ivy.array([[[13, 14, 17, 18],
[14, 15, 18, 19],
[15, 16, 19, 20]],
[[17, 18, 21, 22],
[18, 19, 22, 23],
[19, 20, 23, 24]]])
}
"""
return self._static_sliding_window(
self,
window_size,
stride=stride,
dilation=dilation,
padding=padding,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def static_max_unpool1d(
input: ivy.Container,
indices: ivy.Container,
kernel_size: Union[Tuple[int], int],
/,
*,
strides: Optional[Union[int, Tuple[int]]] = None,
padding: Union[int, Tuple[int]] = 0,
data_format: Union[str, ivy.Container] = "NCW",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.max_unpool1d.
Parameters
----------
input
Pooled input image *[batch_size, w, d_in]*.
indices
Indices obtained from the corresponding max pooling operation.
kernel_size
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NWC" or "NCW". Defaults to "NCW".
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
The result of the unpooling operation.
"""
return ContainerBase.cont_multi_map_in_function(
"max_unpool1d",
input,
indices,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def max_unpool1d(
self,
indices: Union[ivy.Array, ivy.NativeArray, ivy.Container],
kernel_size: Union[Tuple[int], int],
/,
*,
strides: Optional[Union[int, Tuple[int]]] = None,
padding: Union[int, Tuple[int]] = 0,
data_format: Optional[str] = "NCW",
) -> ivy.Container:
"""Compute a 1-D max unpooling given the 1-D pooled input x and its
indices.
Parameters
----------
self
Pooled input image *[batch_size, w, d_in]*.
indices
Indices obtained from the corresponding max pooling operation.
kernel_size
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NWC" or "NCW". Defaults to "NCW".
Returns
-------
ret
The result of the unpooling operation.
"""
return self.static_max_unpool1d(
self,
indices,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
)
@staticmethod
def static_rnn(
step_function: Callable,
inputs: ivy.Array,
initial_states: List[ivy.Array],
/,
*,
go_backwards: bool = False,
mask: Optional[ivy.Array] = None,
constants: Optional[ivy.Array] = None,
unroll: bool = False,
input_length: Optional[int] = None,
time_major: bool = False,
zero_output_for_mask: bool = False,
return_all_outputs: bool = True,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.rnn.
Parameters
----------
step_function
RNN step function.
inputs
Array of temporal data of shape (samples, time, ...).
initial_states
Array with shape (samples, state_size).
go_backwards
If True, do the iteration over the time dimension in reverse order and
return the reversed sequence.
mask
Binary array with shape (samples, time, 1), with a zero for every element
that is masked.
constants
List of constant values passed at each step.
unroll
Whether to use a pythonic while loop or ivy.while_loop
input_length
An integer or 1-D array, depending on whether the time dimension is
fixed-length. In case of variable length input, it is used for masking in
case there is no mask specified.
time_major
If True, the inputs and outputs will be in shape (timesteps, batch, ...)
whereas in the False case, it will be (batch, timesteps, ...).
zero_output_for_mask
If True, the otput for masked timestep will be zeros, whereas in the False
case, output from previous timestep is returned
return_all_outputs
If True, return the recurrent outputs for all timesteps in the sequence. If
False, only return the output for the last timestep.
Returns
-------
ret
A tuple of
- the latest output of the rnn of shape (samples, ...)
- the output of the rnn of shape (samples, time, ...) if
return_all_outputs=True else (samples, 1, ...)
- list of tensors, latest states returned by the step funciton, of shape
(samples, ...)
"""
return ContainerBase.cont_multi_map_in_function(
"rnn",
step_function,
inputs,
initial_states,
go_backwards=go_backwards,
mask=mask,
constants=constants,
unroll=unroll,
input_length=input_length,
time_major=time_major,
zero_output_for_mask=zero_output_for_mask,
return_all_outputs=return_all_outputs,
)
| ivy/ivy/data_classes/container/experimental/layers.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/layers.py",
"repo_id": "ivy",
"token_count": 54337
} | 12 |
# global
from typing import Optional, Union, List, Dict
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithLosses(ContainerBase):
@staticmethod
def _static_cross_entropy(
true: Union[ivy.Container, ivy.Array, ivy.NativeArray],
pred: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
epsilon: Union[float, ivy.Container] = 1e-7,
reduction: Union[str, ivy.Container] = "mean",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cross_entropy. This
method simply wraps the function, and so the docstring for
ivy.cross_entropy also applies to this method with minimal changes.
Parameters
----------
true
input array or container containing true labels.
pred
input array or container containing the predicted labels.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``,
the cross-entropy will be computed along the last dimension.
Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied.
Default: ``1e-7``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The cross-entropy loss between the given distributions.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([0, 0, 1]), b=ivy.array([1, 1, 0]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = ivy.Container.static_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array(1.20397282),
b: ivy.array(1.83258148)
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([0, 0, 1])
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = ivy.Container.static_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array(1.20397282),
b: ivy.array(1.60943794)
}
"""
return ContainerBase.cont_multi_map_in_function(
"cross_entropy",
true,
pred,
axis=axis,
epsilon=epsilon,
reduction=reduction,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def cross_entropy(
self: ivy.Container,
pred: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
epsilon: Union[float, ivy.Container] = 1e-7,
reduction: Union[str, ivy.Container] = "mean",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cross_entropy. This
method simply wraps the function, and so the docstring for
ivy.cross_entropy also applies to this method with minimal changes.
Parameters
----------
self
input container containing true labels.
pred
input array or container containing the predicted labels.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``,
the cross-entropy will be computed along the last dimension.
Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied.
Default: ``1e-7``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The cross-entropy loss between the given distributions.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = x.cross_entropy(y)
>>> print(z)
{
a: ivy.array(0.17027519),
b: ivy.array(0.53647931)
}
"""
return self._static_cross_entropy(
self,
pred,
axis=axis,
epsilon=epsilon,
reduction=reduction,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_binary_cross_entropy(
true: Union[ivy.Container, ivy.Array, ivy.NativeArray],
pred: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
from_logits: Union[bool, ivy.Container] = False,
epsilon: Union[float, ivy.Container] = 0.0,
reduction: Union[str, ivy.Container] = "mean",
pos_weight: Optional[Union[ivy.Container, ivy.Array, ivy.NativeArray]] = None,
axis: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.binary_cross_entropy.
This method simply wraps the function, and so the docstring for
ivy.binary_cross_entropy also applies to this method with minimal
changes.
Parameters
----------
true
input array or container containing true labels.
pred
input array or container containing Predicted labels.
from_logits
Whether `pred` is expected to be a logits tensor. By
default, we assume that `pred` encodes a probability distribution.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied. Default: ``0``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
pos_weight
a weight for positive examples. Must be an array with length equal
to the number of classes.
axis
Axis along which to compute crossentropy.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The binary cross entropy between the given distributions.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = ivy.Container.static_binary_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array([0.511, 0.223, 0.357]),
b: ivy.array([1.61, 0.223, 1.61])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([1 , 1, 0])
>>> y = ivy.Container(a=ivy.array([0.7, 0.8, 0.2]),b=ivy.array([0.2, 0.6, 0.7]))
>>> z = ivy.Container.static_binary_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array([0.357, 0.223, 0.223]),
b: ivy.array([1.61, 0.511, 1.2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"binary_cross_entropy",
true,
pred,
epsilon=epsilon,
from_logits=from_logits,
reduction=reduction,
pos_weight=pos_weight,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def binary_cross_entropy(
self: ivy.Container,
pred: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
from_logits: Union[bool, ivy.Container] = False,
epsilon: Union[float, ivy.Container] = 0.0,
reduction: Union[str, ivy.Container] = "mean",
pos_weight: Optional[Union[ivy.Container, ivy.Array, ivy.NativeArray]] = None,
axis: Optional[Union[int, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.binary_cross_entropy.
This method simply wraps the function, and so the docstring for
ivy.binary_cross_entropy also applies to this method with minimal
changes.
Parameters
----------
self
input container containing true labels.
pred
input array or container containing Predicted labels.
from_logits
Whether `pred` is expected to be a logits tensor. By
default, we assume that `pred` encodes a probability distribution.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when
calculating the loss. If epsilon is ``0``, no smoothing will be applied.
Default: ``0``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
pos_weight
a weight for positive examples. Must be an array with length equal
to the number of classes.
axis
Axis along which to compute crossentropy.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The binary cross entropy between the given distributions.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = x.binary_cross_entropy(y)
>>> print(z)
{
a: ivy.array(0.36354783),
b: ivy.array(1.14733934)
}
"""
return self._static_binary_cross_entropy(
self,
pred,
epsilon=epsilon,
from_logits=from_logits,
reduction=reduction,
pos_weight=pos_weight,
axis=axis,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sparse_cross_entropy(
true: Union[ivy.Container, ivy.Array, ivy.NativeArray],
pred: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
epsilon: Union[float, ivy.Container] = 1e-7,
reduction: Union[str, ivy.Container] = "mean",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sparse_cross_entropy.
This method simply wraps the function, and so the docstring for
ivy.sparse_cross_entropy also applies to this method with minimal
changes.
Parameters
----------
true
input array or container containing the true labels as logits.
pred
input array or container containing the predicted labels as logits.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``, the
cross-entropy will be computed along the last dimension. Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied.
Default: ``1e-7``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The sparse cross-entropy loss between the given distributions.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = ivy.Container.static_sparse_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array([1.61, 0.511, 0.511]),
b: ivy.array([0.223, 0.223, 1.61])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.array([1 , 1, 0])
>>> y = ivy.Container(a=ivy.array([0.7, 0.8, 0.2]),b=ivy.array([0.2, 0.6, 0.7]))
>>> z = ivy.Container.static_sparse_cross_entropy(x, y)
>>> print(z)
{
a: ivy.array([0.223, 0.223, 0.357]),
b: ivy.array([0.511, 0.511, 1.61])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sparse_cross_entropy",
true,
pred,
axis=axis,
epsilon=epsilon,
reduction=reduction,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sparse_cross_entropy(
self: ivy.Container,
pred: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
axis: Union[int, ivy.Container] = -1,
epsilon: Union[float, ivy.Container] = 1e-7,
reduction: Union[str, ivy.Container] = "mean",
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sparse_cross_entropy.
This method simply wraps the function, and so the docstring for
ivy.sparse_cross_entropy also applies to this method with minimal
changes.
Parameters
----------
self
input container containing the true labels as logits.
pred
input array or container containing the predicted labels as logits.
axis
the axis along which to compute the cross-entropy. If axis is ``-1``, the
cross-entropy will be computed along the last dimension. Default: ``-1``.
epsilon
a float in [0.0, 1.0] specifying the amount of smoothing when calculating
the loss. If epsilon is ``0``, no smoothing will be applied.
Default: ``1e-7``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The sparse cross-entropy loss between the given distributions.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 0, 0]),b=ivy.array([0, 0, 1]))
>>> y = ivy.Container(a=ivy.array([0.6, 0.2, 0.3]),b=ivy.array([0.8, 0.2, 0.2]))
>>> z = x.sparse_cross_entropy(y)
>>> print(z)
{
a: ivy.array([0.53647929, 0.1702752, 0.1702752]),
b: ivy.array([0.07438118, 0.07438118, 0.53647929])
}
"""
return self._static_sparse_cross_entropy(
self,
pred,
axis=axis,
epsilon=epsilon,
reduction=reduction,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/losses.py/0 | {
"file_path": "ivy/ivy/data_classes/container/losses.py",
"repo_id": "ivy",
"token_count": 9830
} | 13 |
# local
from .base import FactorizedTensor
import ivy
# global
from copy import deepcopy
import warnings
def _bisection_root_finder(fun, a, b, tol=1e-6, max_iter=100):
if fun(a) * fun(b) >= 0:
raise ValueError(
"Function values at the interval endpoints must have opposite signs"
)
for _ in range(max_iter):
c = (a + b) / 2
if fun(c) == 0 or (b - a) / 2 < tol:
return c
if fun(c) * fun(a) < 0:
b = c
else:
a = c
raise RuntimeError("Bisection algorithm did not converge")
class TuckerTensor(FactorizedTensor):
def __init__(self, tucker_tensor):
super().__init__()
shape, rank = TuckerTensor.validate_tucker_tensor(tucker_tensor)
core, factors = tucker_tensor
self.shape = tuple(shape)
self.rank = tuple(rank)
self.factors = factors
self.core = core
# Built-ins #
# ----------#
def __getitem__(self, index):
if index == 0:
return self.core
elif index == 1:
return self.factors
else:
raise IndexError(
f"You tried to access index {index} of a Tucker tensor.\n"
"You can only access index 0 and 1 of a Tucker tensor"
"(corresponding respectively to core and factors)"
)
def __setitem__(self, index, value):
if index == 0:
self.core = value
elif index == 1:
self.factors = value
else:
raise IndexError(
f"You tried to set index {index} of a Tucker tensor.\n"
"You can only set index 0 and 1 of a Tucker tensor"
"(corresponding respectively to core and factors)"
)
def __iter__(self):
yield self.core
yield self.factors
def __len__(self):
return 2
def __repr__(self):
message = f"Decomposed rank-{self.rank} TuckerTensor of shape {self.shape} "
return message
# Public Methods #
# ---------------#
def to_tensor(self):
return TuckerTensor.tucker_to_tensor(self)
def to_unfolded(self, mode):
return TuckerTensor.tucker_to_unfolded(self, mode)
def tucker_copy(self):
return TuckerTensor(
(
deepcopy(self.core),
[deepcopy(self.factors[i]) for i in range(len(self.factors))],
)
)
def to_vec(self):
return TuckerTensor.tucker_to_vec(self)
def mode_dot(
self,
matrix_or_vector,
mode,
keep_dim,
copy,
):
return TuckerTensor.tucker_mode_dot(
self, matrix_or_vector, mode, keep_dim=keep_dim, copy=copy
)
# Properties #
# ---------------#
@property
def n_param(self):
core, factors = self.core, self.factors
total_params = sum(int(ivy.prod(tensor.shape)) for tensor in [core] + factors)
return total_params
# Class Methods #
# ---------------#
@staticmethod
def validate_tucker_tensor(tucker_tensor):
core, factors = tucker_tensor
if len(factors) < 2:
raise ValueError(
"A Tucker tensor should be composed of at least two factors and a core."
f"However, {len(factors)} factor was given."
)
if len(factors) != len(core.shape):
raise ValueError(
"Tucker decompositions should have one factor per mode of the core"
f" tensor.However, core has {len(core.shape)} modes but"
f" {len(factors)} factors have been provided"
)
shape = []
rank = []
for i, factor in enumerate(factors):
current_shape, current_rank = factor.shape
if current_rank != ivy.shape(core)[i]:
raise ValueError(
"Factor `n` of Tucker decomposition should"
" verify:\nfactors[n].shape[1] = core.shape[n].However,"
f" factors[{i}].shape[1]={ivy.shape(factor)[1]} but"
f" core.shape[{i}]={ivy.shape(core)[i]}."
)
shape.append(current_shape)
rank.append(current_rank)
return tuple(shape), tuple(rank)
@staticmethod
def tucker_to_tensor(
tucker_tensor,
skip_factor=None,
transpose_factors=False,
):
core, factors = tucker_tensor
return ivy.multi_mode_dot(
core, factors, skip=skip_factor, transpose=transpose_factors
)
@staticmethod
def tucker_normalize(tucker_tensor):
core, factors = tucker_tensor
normalized_factors = []
for i, factor in enumerate(factors):
scales = ivy.sqrt(ivy.sum(ivy.abs(factor) ** 2, axis=0))
scales_non_zero = ivy.where(
scales == 0, ivy.ones(ivy.shape(scales), dtype=factor[0].dtype), scales
)
core = core * ivy.reshape(
scales, (1,) * i + (-1,) + (1,) * (len(core.shape) - i - 1)
)
normalized_factors.append(factor / ivy.reshape(scales_non_zero, (1, -1)))
return TuckerTensor((core, normalized_factors))
@staticmethod
def tucker_to_unfolded(
tucker_tensor,
mode=0,
skip_factor=None,
transpose_factors=False,
):
return ivy.unfold(
TuckerTensor.tucker_to_tensor(
tucker_tensor,
skip_factor=skip_factor,
transpose_factors=transpose_factors,
),
mode,
)
@staticmethod
def tucker_to_vec(
tucker_tensor,
skip_factor=None,
transpose_factors=False,
):
return ivy.reshape(
TuckerTensor.tucker_to_tensor(
tucker_tensor,
skip_factor=skip_factor,
transpose_factors=transpose_factors,
),
(-1,),
)
@staticmethod
def tucker_mode_dot(
tucker_tensor,
matrix_or_vector,
mode,
keep_dim=False,
copy=False,
):
shape, _ = TuckerTensor.validate_tucker_tensor(tucker_tensor)
core, factors = tucker_tensor
contract = False
if len(matrix_or_vector.shape) == 2: # Tensor times matrix
# Test for the validity of the operation
if matrix_or_vector.shape[1] != shape[mode]:
raise ValueError(
f"shapes {shape} and {matrix_or_vector.shape} not aligned in"
f" mode-{mode} multiplication: {shape[mode]} (mode = {mode}) !="
f" {matrix_or_vector.shape[1]} (dim 1 of matrix)"
)
elif len(matrix_or_vector.shape) == 1: # Tensor times vector
if matrix_or_vector.shape[0] != shape[mode]:
raise ValueError(
f"shapes {shape} and {matrix_or_vector.shape} not aligned for"
f" mode-{mode} multiplication: {shape[mode]} (mode = {mode}) !="
f" {matrix_or_vector.shape[0]} (vector size)"
)
if not keep_dim:
contract = True # Contract over that mode
else:
raise ValueError("Can only take n_mode_product with a vector or a matrix.")
if copy:
factors = [deepcopy(f) for f in factors]
core = deepcopy(core)
if contract:
print("contracting mode")
f = factors.pop(mode)
core = ivy.mode_dot(core, ivy.dot(matrix_or_vector, f), mode=mode)
else:
factors[mode] = ivy.dot(matrix_or_vector, factors[mode])
return TuckerTensor((core, factors))
@staticmethod
def validate_tucker_rank(
tensor_shape, rank="same", rounding="round", fixed_modes=None
):
if rounding == "ceil":
rounding_fun = ivy.ceil
elif rounding == "floor":
rounding_fun = ivy.floor
elif rounding == "round":
rounding_fun = ivy.round
else:
raise ValueError(
f"Rounding should be round, floor or ceil, but got {rounding}"
)
# rank is 'same' or float: choose rank so as to
# preserve a fraction of the original #parameters
if rank == "same":
rank = float(1)
if isinstance(rank, float):
n_modes_compressed = len(tensor_shape)
n_param_tensor = int(ivy.prod(tensor_shape))
if fixed_modes is not None:
tensor_shape = list(tensor_shape)
# sorted to be careful with the order when popping
# and reinserting to not remove/add at wrong index.
# list (mode, shape) that we removed as they will
# be kept the same, rank[i] =
fixed_modes = [
(mode, tensor_shape.pop(mode))
for mode in sorted(fixed_modes, reverse=True)
][::-1]
# number of parameters coming from the fixed modes
# (these don't have a variable size as a fun of fraction_param)
n_fixed_params = ivy.sum(
[s**2 for _, s in fixed_modes]
) # size of the factors
n_modes_compressed -= len(fixed_modes)
else:
n_fixed_params = 0
# Doesn't contain fixed_modes,
# those factors are accounted for in fixed_params
squared_dims = ivy.sum([s**2 for s in tensor_shape])
def fun(x):
return (
n_param_tensor * x**n_modes_compressed
+ squared_dims * x
+ n_fixed_params * x
- rank * n_param_tensor
)
# fraction_param = brentq(fun, 0.0, max(rank, 1.0))
fraction_param = _bisection_root_finder(fun, 0.0, max(rank, 1.0))
rank = [max(int(rounding_fun(s * fraction_param)), 1) for s in tensor_shape]
if fixed_modes is not None:
for mode, size in fixed_modes:
rank.insert(mode, size)
elif isinstance(rank, int):
n_modes = len(tensor_shape)
warnings.warn(
"Given only one int for 'rank' for decomposition a tensor of order"
f" {n_modes}. Using this rank for all modes."
)
if fixed_modes is None:
rank = [rank] * n_modes
else:
rank = [
rank if i not in fixed_modes else s
for (i, s) in enumerate(tensor_shape)
] # *n_mode
return rank
@staticmethod
def tucker_n_param(shape, rank):
core_params = ivy.prod(rank)
factors_params = ivy.sum([r * s for (r, s) in zip(rank, shape)])
return int(core_params + factors_params)
| ivy/ivy/data_classes/factorized_tensor/tucker_tensor.py/0 | {
"file_path": "ivy/ivy/data_classes/factorized_tensor/tucker_tensor.py",
"repo_id": "ivy",
"token_count": 5625
} | 14 |
use crate::{c_lib, Result};
use std::marker::PhantomData;
/// A device attached to a [`super::PjRtClient`].
pub struct PjRtDevice<'a> {
pub(super) device: c_lib::pjrt_device,
pub(super) marker: PhantomData<&'a super::PjRtClient>,
}
impl PjRtDevice<'_> {
/// The device unique identifier.
pub fn id(&self) -> usize {
(unsafe { c_lib::pjrt_device_id(self.device) }) as usize
}
pub fn process_index(&self) -> usize {
(unsafe { c_lib::pjrt_device_process_index(self.device) }) as usize
}
pub fn local_hardware_id(&self) -> usize {
(unsafe { c_lib::pjrt_device_local_hardware_id(self.device) }) as usize
}
#[allow(clippy::inherent_to_string)]
pub fn to_string(&self) -> String {
unsafe {
let ptr = c_lib::pjrt_device_to_string(self.device);
super::c_ptr_to_string(ptr)
}
}
pub fn kind(&self) -> String {
unsafe {
let ptr = c_lib::pjrt_device_kind(self.device);
super::c_ptr_to_string(ptr)
}
}
pub fn debug_string(&self) -> String {
unsafe {
let ptr = c_lib::pjrt_device_debug_string(self.device);
super::c_ptr_to_string(ptr)
}
}
pub fn transfer_to_infeed(&self, src: &super::Literal) -> Result<()> {
let status = unsafe { c_lib::pjrt_device_transfer_to_infeed(self.device, src.0) };
super::handle_status(status)?;
Ok(())
}
/// Transfer and return a value for the given shape from the outfeed queue.
pub fn transfer_from_outfeed(&self, dst: &mut super::Literal) -> Result<()> {
let status = unsafe { c_lib::pjrt_device_transfer_from_outfeed(self.device, dst.0) };
super::handle_status(status)?;
Ok(())
}
}
| ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_device.rs/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/pjrt_device.rs",
"repo_id": "ivy",
"token_count": 818
} | 15 |
# global
import numpy as np
import jax.numpy as jnp
from typing import Optional, Union, Sequence, List
# local
import ivy
from ivy.functional.backends.jax import JaxArray
from ivy.functional.ivy.data_type import _handle_nestable_dtype_info
ivy_dtype_dict = {
jnp.dtype("int8"): "int8",
jnp.dtype("int16"): "int16",
jnp.dtype("int32"): "int32",
jnp.dtype("int64"): "int64",
jnp.dtype("uint8"): "uint8",
jnp.dtype("uint16"): "uint16",
jnp.dtype("uint32"): "uint32",
jnp.dtype("uint64"): "uint64",
jnp.dtype("bfloat16"): "bfloat16",
jnp.dtype("float16"): "float16",
jnp.dtype("float32"): "float32",
jnp.dtype("float64"): "float64",
jnp.dtype("complex64"): "complex64",
jnp.dtype("complex128"): "complex128",
jnp.dtype("bool"): "bool",
jnp.int8: "int8",
jnp.int16: "int16",
jnp.int32: "int32",
jnp.int64: "int64",
jnp.uint8: "uint8",
jnp.uint16: "uint16",
jnp.uint32: "uint32",
jnp.uint64: "uint64",
jnp.bfloat16: "bfloat16",
jnp.float16: "float16",
jnp.float32: "float32",
jnp.float64: "float64",
jnp.complex64: "complex64",
jnp.complex128: "complex128",
jnp.bool_: "bool",
}
native_dtype_dict = {
"int8": jnp.dtype("int8"),
"int16": jnp.dtype("int16"),
"int32": jnp.dtype("int32"),
"int64": jnp.dtype("int64"),
"uint8": jnp.dtype("uint8"),
"uint16": jnp.dtype("uint16"),
"uint32": jnp.dtype("uint32"),
"uint64": jnp.dtype("uint64"),
"bfloat16": jnp.dtype("bfloat16"),
"float16": jnp.dtype("float16"),
"float32": jnp.dtype("float32"),
"float64": jnp.dtype("float64"),
"complex64": jnp.dtype("complex64"),
"complex128": jnp.dtype("complex128"),
"bool": jnp.dtype("bool"),
}
char_rep_dtype_dict = {
"?": "bool",
"i": int,
"i1": "int8",
"i2": "int16",
"i4": "int32",
"i8": "int64",
"f": float,
"f2": "float16",
"f4": "float32",
"f8": "float64",
"c": complex,
"c8": "complex64",
"c16": "complex128",
"u": "uint32",
"u1": "uint8",
"u2": "uint16",
"u4": "uint32",
"u8": "uint64",
}
class Finfo:
def __init__(self, jnp_finfo: jnp.finfo):
self._jnp_finfo = jnp_finfo
def __repr__(self):
return repr(self._jnp_finfo)
@property
def bits(self):
return self._jnp_finfo.bits
@property
def eps(self):
return float(self._jnp_finfo.eps)
@property
def max(self):
return float(self._jnp_finfo.max)
@property
def min(self):
return float(self._jnp_finfo.min)
@property
def smallest_normal(self):
return float(self._jnp_finfo.tiny)
# Array API Standard #
# -------------------#
def astype(
x: JaxArray,
dtype: jnp.dtype,
/,
*,
copy: bool = True,
out: Optional[JaxArray] = None,
) -> JaxArray:
dtype = ivy.as_native_dtype(dtype)
ivy.utils.assertions._check_jax_x64_flag(dtype)
if x.dtype == dtype:
return jnp.copy(x) if copy else x
return x.astype(dtype)
def broadcast_arrays(*arrays: JaxArray) -> List[JaxArray]:
try:
return jnp.broadcast_arrays(*arrays)
except ValueError as e:
raise ivy.utils.exceptions.IvyBroadcastShapeError(e) from e
def broadcast_to(
x: JaxArray,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions.check_shapes_broadcastable(x.shape, shape)
if x.ndim > len(shape):
return jnp.broadcast_to(x.reshape(-1), shape)
return jnp.broadcast_to(x, shape)
@_handle_nestable_dtype_info
def finfo(type: Union[jnp.dtype, str, JaxArray, np.ndarray], /) -> Finfo:
if isinstance(type, np.ndarray):
type = type.dtype.name
return Finfo(jnp.finfo(ivy.as_native_dtype(type)))
@_handle_nestable_dtype_info
def iinfo(type: Union[jnp.dtype, str, JaxArray, np.ndarray], /) -> np.iinfo:
if isinstance(type, np.ndarray):
type = type.dtype.name
return jnp.iinfo(ivy.as_native_dtype(type))
def result_type(*arrays_and_dtypes: Union[JaxArray, jnp.dtype]) -> ivy.Dtype:
if len(arrays_and_dtypes) <= 1:
return jnp.result_type(arrays_and_dtypes)
result = jnp.result_type(arrays_and_dtypes[0], arrays_and_dtypes[1])
for i in range(2, len(arrays_and_dtypes)):
result = jnp.result_type(result, arrays_and_dtypes[i])
return as_ivy_dtype(result)
# Extra #
# ------#
def as_ivy_dtype(
dtype_in: Union[jnp.dtype, str, int, float, complex, bool, np.dtype],
/,
) -> ivy.Dtype:
if dtype_in is int:
return ivy.default_int_dtype()
if dtype_in is float:
return ivy.default_float_dtype()
if dtype_in is complex:
return ivy.default_complex_dtype()
if dtype_in is bool:
return ivy.Dtype("bool")
if isinstance(dtype_in, np.dtype):
dtype_in = dtype_in.name
if isinstance(dtype_in, str):
if dtype_in in char_rep_dtype_dict:
return as_ivy_dtype(char_rep_dtype_dict[dtype_in])
if dtype_in in native_dtype_dict:
dtype_str = dtype_in
else:
raise ivy.utils.exceptions.IvyException(
"Cannot convert to ivy dtype."
f" {dtype_in} is not supported by JAX backend."
)
else:
dtype_str = ivy_dtype_dict[dtype_in]
if "uint" in dtype_str:
return ivy.UintDtype(dtype_str)
elif "int" in dtype_str:
return ivy.IntDtype(dtype_str)
elif "float" in dtype_str:
return ivy.FloatDtype(dtype_str)
elif "complex" in dtype_str:
return ivy.ComplexDtype(dtype_str)
elif "bool" in dtype_str:
return ivy.Dtype("bool")
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot recognize {dtype_str} as a valid Dtype."
)
def as_native_dtype(
dtype_in: Union[jnp.dtype, str, bool, int, float, np.dtype],
) -> jnp.dtype:
if dtype_in is int:
return ivy.default_int_dtype(as_native=True)
if dtype_in is float:
return ivy.default_float_dtype(as_native=True)
if dtype_in is complex:
return ivy.default_complex_dtype(as_native=True)
if dtype_in is bool:
return jnp.dtype("bool")
if isinstance(dtype_in, np.dtype):
dtype_in = dtype_in.name
if not isinstance(dtype_in, str):
return dtype_in
if dtype_in in char_rep_dtype_dict:
return as_native_dtype(char_rep_dtype_dict[dtype_in])
if dtype_in in native_dtype_dict.values():
return native_dtype_dict[ivy.Dtype(dtype_in)]
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot convert to Jax dtype. {dtype_in} is not supported by Jax."
)
def dtype(x: Union[JaxArray, np.ndarray], *, as_native: bool = False) -> ivy.Dtype:
if as_native:
return ivy.as_native_dtype(x.dtype)
return as_ivy_dtype(x.dtype)
def dtype_bits(dtype_in: Union[jnp.dtype, str, np.dtype], /) -> int:
dtype_str = as_ivy_dtype(dtype_in)
if "bool" in dtype_str:
return 1
return int(
dtype_str.replace("uint", "")
.replace("int", "")
.replace("bfloat", "")
.replace("float", "")
.replace("complex", "")
)
def is_native_dtype(dtype_in: Union[jnp.dtype, str], /) -> bool:
if not ivy.is_hashable_dtype(dtype_in):
return False
return dtype_in in ivy_dtype_dict
| ivy/ivy/functional/backends/jax/data_type.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/data_type.py",
"repo_id": "ivy",
"token_count": 3595
} | 16 |
import jax.numpy as jnp
from typing import Optional
from ivy.functional.backends.jax import JaxArray
def l1_normalize(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if not isinstance(x, JaxArray):
x = jnp.array(x)
if axis is None:
norm = jnp.sum(jnp.abs(jnp.ravel(x)))
denorm = norm * jnp.ones_like(x)
else:
norm = jnp.sum(jnp.abs(x), axis=axis, keepdims=True)
denorm = jnp.divide(norm, jnp.abs(x) + 1e-12)
return jnp.divide(x, denorm)
def l2_normalize(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if axis is None:
denorm = jnp.linalg.norm(x.flatten(), 2, axis)
else:
denorm = jnp.linalg.norm(x, 2, axis, keepdims=True)
denorm = jnp.maximum(denorm, 1e-12)
return x / denorm
def lp_normalize(
x: JaxArray,
/,
*,
p: float = 2,
axis: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if axis is None:
denorm = jnp.linalg.norm(x.flatten(), axis=axis, ord=p)
else:
denorm = jnp.linalg.norm(x, axis=axis, ord=p, keepdims=True)
denorm = jnp.maximum(denorm, 1e-12)
return jnp.divide(x, denorm)
| ivy/ivy/functional/backends/jax/experimental/norms.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/norms.py",
"repo_id": "ivy",
"token_count": 640
} | 17 |
# global
import jax.numpy as jnp
from typing import Tuple, Optional
from collections import namedtuple
# local
from ivy.functional.backends.jax import JaxArray
import ivy
def unique_all(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
by_value: bool = True,
) -> Tuple[JaxArray, JaxArray, JaxArray, JaxArray]:
Results = namedtuple(
"Results",
["values", "indices", "inverse_indices", "counts"],
)
values, indices, inverse_indices, counts = jnp.unique(
x,
return_index=True,
return_counts=True,
return_inverse=True,
axis=axis,
)
nan_count = jnp.sum(jnp.isnan(x)).item()
if nan_count > 1:
values = jnp.concatenate(
(
values,
jnp.full(
fill_value=jnp.nan, shape=(nan_count - 1,), dtype=values.dtype
),
),
axis=0,
)
counts = jnp.concatenate(
(
counts[:-1],
jnp.full(fill_value=1, shape=(nan_count,), dtype=counts.dtype),
),
axis=0,
)
nan_idx = jnp.where(jnp.isnan(x.flatten()))[0]
indices = jnp.concatenate((indices[:-1], nan_idx), axis=0).astype(indices.dtype)
if not by_value:
sort_idx = jnp.argsort(indices)
values = jnp.take(values, sort_idx, axis=axis)
counts = jnp.take(counts, sort_idx)
indices = jnp.take(indices, sort_idx)
inv_sort_idx = ivy.current_backend().invert_permutation(sort_idx)
inverse_indices = jnp.vectorize(lambda y: jnp.take(inv_sort_idx, y))(
inverse_indices
)
return Results(
values.astype(x.dtype),
indices,
inverse_indices,
counts,
)
def unique_counts(
x: JaxArray,
/,
) -> Tuple[JaxArray, JaxArray]:
v, c = jnp.unique(x, return_counts=True)
nan_count = jnp.count_nonzero(jnp.isnan(x))
if nan_count > 1:
nan_idx = jnp.where(jnp.isnan(v))
c = c.at[nan_idx].set(1)
v = jnp.append(v, jnp.full(nan_count - 1, jnp.nan)).astype(x.dtype)
c = jnp.append(c, jnp.full(nan_count - 1, 1)).astype("int32")
Results = namedtuple("Results", ["values", "counts"])
return Results(v, c)
def unique_inverse(
x: JaxArray,
/,
*,
axis: Optional[int] = None,
) -> Tuple[JaxArray, JaxArray]:
Results = namedtuple("Results", ["values", "inverse_indices"])
values, inverse_indices = jnp.unique(x, return_inverse=True, axis=axis)
nan_count = jnp.count_nonzero(jnp.isnan(x))
if nan_count > 1:
values = jnp.append(values, jnp.full(nan_count - 1, jnp.nan), axis=0).astype(
x.dtype
)
inverse_indices = jnp.reshape(inverse_indices, x.shape)
return Results(values, inverse_indices)
def unique_values(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray:
nan_count = jnp.count_nonzero(jnp.isnan(x))
if nan_count > 1:
unique = jnp.append(
jnp.unique(x.flatten()), jnp.full(nan_count - 1, jnp.nan)
).astype(x.dtype)
else:
unique = jnp.unique(x.flatten()).astype(x.dtype)
return unique
| ivy/ivy/functional/backends/jax/set.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/set.py",
"repo_id": "ivy",
"token_count": 1626
} | 18 |
"""Collection of MXNet gradient functions, wrapped to fit Ivy syntax and
signature."""
# global
from typing import Sequence, Union
import mxnet as mx
# local
from ivy.utils.exceptions import IvyNotImplementedException
def variable(x, /):
return x
def is_variable(x, /, *, exclusive=False):
return isinstance(x, mx.ndarray.NDArray)
def variable_data(x, /):
raise IvyNotImplementedException()
def execute_with_gradients(
func,
xs,
/,
*,
retain_grads: bool = False,
xs_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
ret_grad_idxs: Sequence[Sequence[Union[str, int]]] = ((0,),),
):
raise IvyNotImplementedException()
def value_and_grad(func):
raise IvyNotImplementedException()
def jac(func):
raise IvyNotImplementedException()
def grad(func, argnums=0):
raise IvyNotImplementedException()
def stop_gradient(x, /, *, preserve_type=True, out=None):
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/gradients.py",
"repo_id": "ivy",
"token_count": 357
} | 19 |
# global
from typing import Optional, Union, Sequence, List
import numpy as np
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.ivy.data_type import _handle_nestable_dtype_info
from . import backend_version
ivy_dtype_dict = {
np.dtype("int8"): "int8",
np.dtype("int16"): "int16",
np.dtype("int32"): "int32",
np.dtype("int64"): "int64",
np.dtype("uint8"): "uint8",
np.dtype("uint16"): "uint16",
np.dtype("uint32"): "uint32",
np.dtype("uint64"): "uint64",
# np.dtype("bfloat16"): "bfloat16",
np.dtype("float16"): "float16",
np.dtype("float32"): "float32",
np.dtype("float64"): "float64",
np.dtype("complex64"): "complex64",
np.dtype("complex128"): "complex128",
np.dtype("bool"): "bool",
np.int8: "int8",
np.int16: "int16",
np.int32: "int32",
np.int64: "int64",
np.uint8: "uint8",
np.uint16: "uint16",
np.uint32: "uint32",
np.uint64: "uint64",
np.float16: "float16",
np.float32: "float32",
np.float64: "float64",
np.complex64: "complex64",
np.complex128: "complex128",
np.bool_: "bool",
}
native_dtype_dict = {
"int8": np.dtype("int8"),
"int16": np.dtype("int16"),
"int32": np.dtype("int32"),
"int64": np.dtype("int64"),
"uint8": np.dtype("uint8"),
"uint16": np.dtype("uint16"),
"uint32": np.dtype("uint32"),
"uint64": np.dtype("uint64"),
"float16": np.dtype("float16"),
"float32": np.dtype("float32"),
"float64": np.dtype("float64"),
"complex64": np.dtype("complex64"),
"complex128": np.dtype("complex128"),
"bool": np.dtype("bool"),
}
char_rep_dtype_dict = {
"?": "bool",
"i": int,
"i1": "int8",
"i2": "int16",
"i4": "int32",
"i8": "int64",
"f": float,
"f2": "float16",
"f4": "float32",
"f8": "float64",
"c": complex,
"c8": "complex64",
"c16": "complex128",
"u": "uint32",
"u1": "uint8",
"u2": "uint16",
"u4": "uint32",
"u8": "uint64",
}
class Finfo:
def __init__(self, np_finfo: np.finfo):
self._np_finfo = np_finfo
def __repr__(self):
return repr(self._np_finfo)
@property
def bits(self):
return self._np_finfo.bits
@property
def eps(self):
return float(self._np_finfo.eps)
@property
def max(self):
return float(self._np_finfo.max)
@property
def min(self):
return float(self._np_finfo.min)
@property
def smallest_normal(self):
return float(self._np_finfo.tiny)
# Array API Standard #
# -------------------#
def astype(
x: np.ndarray,
dtype: np.dtype,
/,
*,
copy: bool = True,
out: Optional[ivy.Array] = None,
) -> np.ndarray:
dtype = ivy.as_native_dtype(dtype)
if x.dtype == dtype:
return np.copy(x) if copy else x
return x.astype(dtype)
def broadcast_arrays(*arrays: np.ndarray) -> List[np.ndarray]:
try:
return np.broadcast_arrays(*arrays)
except ValueError as e:
raise ivy.utils.exceptions.IvyBroadcastShapeError(e) from e
@with_unsupported_dtypes({"1.26.3 and below": ("complex",)}, backend_version)
def broadcast_to(
x: np.ndarray,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ivy.utils.assertions.check_shapes_broadcastable(x.shape, shape)
if x.ndim > len(shape):
return np.broadcast_to(x.reshape([-1]), shape)
return np.broadcast_to(x, shape)
@_handle_nestable_dtype_info
def finfo(type: Union[np.dtype, str, np.ndarray], /) -> Finfo:
if isinstance(type, np.ndarray):
type = type.dtype
return Finfo(np.finfo(ivy.as_native_dtype(type)))
@_handle_nestable_dtype_info
def iinfo(type: Union[np.dtype, str, np.ndarray], /) -> np.iinfo:
if isinstance(type, np.ndarray):
type = type.dtype
return np.iinfo(ivy.as_native_dtype(type))
def result_type(*arrays_and_dtypes: Union[np.ndarray, np.dtype]) -> ivy.Dtype:
if len(arrays_and_dtypes) <= 1:
return np.result_type(arrays_and_dtypes)
result = np.result_type(arrays_and_dtypes[0], arrays_and_dtypes[1])
for i in range(2, len(arrays_and_dtypes)):
result = np.result_type(result, arrays_and_dtypes[i])
return as_ivy_dtype(result)
# Extra #
# ------#
def as_ivy_dtype(
dtype_in: Union[np.dtype, str, int, float, complex, bool],
/,
) -> ivy.Dtype:
if dtype_in is int:
return ivy.default_int_dtype()
if dtype_in is float:
return ivy.default_float_dtype()
if dtype_in is complex:
return ivy.default_complex_dtype()
if dtype_in is bool:
return ivy.Dtype("bool")
if isinstance(dtype_in, str):
if dtype_in in char_rep_dtype_dict:
return as_ivy_dtype(char_rep_dtype_dict[dtype_in])
if dtype_in in native_dtype_dict:
dtype_str = dtype_in
else:
raise ivy.utils.exceptions.IvyException(
"Cannot convert to ivy dtype."
f" {dtype_in} is not supported by NumPy backend."
)
else:
dtype_str = ivy_dtype_dict[dtype_in]
if "uint" in dtype_str:
return ivy.UintDtype(dtype_str)
elif "int" in dtype_str:
return ivy.IntDtype(dtype_str)
elif "float" in dtype_str:
return ivy.FloatDtype(dtype_str)
elif "complex" in dtype_str:
return ivy.ComplexDtype(dtype_str)
elif "bool" in dtype_str:
return ivy.Dtype("bool")
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot recognize {dtype_str} as a valid Dtype."
)
@with_unsupported_dtypes({"1.26.3 and below": ("bfloat16",)}, backend_version)
def as_native_dtype(dtype_in: Union[np.dtype, str, bool, int, float], /) -> np.dtype:
if dtype_in is int:
return ivy.default_int_dtype(as_native=True)
if dtype_in is float:
return ivy.default_float_dtype(as_native=True)
if dtype_in is complex:
return ivy.default_complex_dtype(as_native=True)
if dtype_in is bool:
return np.dtype("bool")
if not isinstance(dtype_in, str):
return dtype_in
if dtype_in in char_rep_dtype_dict:
return as_native_dtype(char_rep_dtype_dict[dtype_in])
if dtype_in in native_dtype_dict.values():
return native_dtype_dict[ivy.Dtype(dtype_in)]
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot convert to numpy dtype. {dtype_in} is not supported by NumPy."
)
def dtype(x: np.ndarray, *, as_native: bool = False) -> ivy.Dtype:
if as_native:
return ivy.to_native(x).dtype
return as_ivy_dtype(x.dtype)
def dtype_bits(dtype_in: Union[np.dtype, str], /) -> int:
dtype_str = as_ivy_dtype(dtype_in)
if "bool" in dtype_str:
return 1
return int(
dtype_str.replace("uint", "")
.replace("int", "")
.replace("bfloat", "")
.replace("float", "")
.replace("complex", "")
)
def is_native_dtype(dtype_in: Union[np.dtype, str], /) -> bool:
if not ivy.is_hashable_dtype(dtype_in):
return False
return dtype_in in ivy_dtype_dict
| ivy/ivy/functional/backends/numpy/data_type.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/data_type.py",
"repo_id": "ivy",
"token_count": 3414
} | 20 |
import numpy as np
from typing import Optional
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
@with_unsupported_dtypes({"1.26.3 and below": ("float16",)}, backend_version)
def l1_normalize(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if axis is None:
norm = np.sum(np.abs(np.reshape(x, -1)))
denorm = norm * np.ones_like(x)
else:
norm = np.sum(np.abs(x), axis=axis, keepdims=True)
denorm = np.divide(norm, np.abs(x) + 1e-12)
return np.divide(x, denorm)
def l2_normalize(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if axis is None:
denorm = np.linalg.norm(x.flatten(), 2, axis)
else:
denorm = np.linalg.norm(x, 2, axis, keepdims=True)
denorm = np.maximum(denorm, 1e-12)
return x / denorm
def lp_normalize(
x: np.ndarray,
/,
*,
p: float = 2,
axis: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if axis is None:
denorm = np.linalg.norm(x.flatten(), axis=axis, ord=p)
else:
denorm = np.linalg.norm(x, axis=axis, ord=p, keepdims=True)
denorm = np.maximum(denorm, 1e-12)
return np.divide(x, denorm, out=out)
| ivy/ivy/functional/backends/numpy/experimental/norms.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/norms.py",
"repo_id": "ivy",
"token_count": 641
} | 21 |
from numbers import Number
from typing import Optional, Tuple, Union
import numpy as np
import ivy
# Array API Standard #
# ------------------ #
def argmax(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
select_last_index: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if select_last_index:
x = np.flip(x, axis=axis)
ret = np.argmax(x, axis=axis, keepdims=keepdims)
if axis is not None:
ret = np.array(x.shape[axis] - ret - 1)
else:
ret = np.array(x.size - ret - 1)
else:
ret = np.array(np.argmax(x, axis=axis, keepdims=keepdims))
if dtype:
dtype = ivy.as_native_dtype(dtype)
ret = ret.astype(dtype)
return ret
def argmin(
x: np.ndarray,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[np.dtype] = None,
select_last_index: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if select_last_index:
x = np.flip(x, axis=axis)
ret = np.argmin(x, axis=axis, keepdims=keepdims)
if axis is not None:
ret = np.array(x.shape[axis] - ret - 1)
else:
ret = np.array(x.size - ret - 1)
else:
ret = np.array(np.argmin(x, axis=axis, keepdims=keepdims))
if dtype:
dtype = ivy.as_native_dtype(dtype)
return ret.astype(dtype)
return ret
def nonzero(
x: np.ndarray,
/,
*,
as_tuple: bool = True,
size: Optional[int] = None,
fill_value: Number = 0,
) -> Union[np.ndarray, Tuple[np.ndarray]]:
res = np.nonzero(x)
if size is not None:
if isinstance(fill_value, float):
res = np.asarray(res, dtype=np.float64)
diff = size - res[0].shape[0]
if diff > 0:
res = np.pad(res, ((0, 0), (0, diff)), constant_values=fill_value)
elif diff < 0:
res = np.array(res)[:, :size]
if as_tuple:
return tuple(res)
return np.stack(res, axis=1)
def where(
condition: np.ndarray,
x1: np.ndarray,
x2: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return ivy.astype(np.where(condition, x1, x2), x1.dtype, copy=False)
# Extra #
# ----- #
def argwhere(
x: np.ndarray,
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.argwhere(x)
| ivy/ivy/functional/backends/numpy/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/searching.py",
"repo_id": "ivy",
"token_count": 1251
} | 22 |
import ivy
from ivy.functional.ivy.experimental.sparse_array import (
_verify_coo_components,
_verify_csr_components,
_is_data_not_indices_values_and_shape,
)
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
)
from ivy.utils.exceptions import IvyNotImplementedException
import paddle
# local
from .. import backend_version
def is_native_sparse_array(x: paddle.Tensor) -> bool:
return x.is_sparse_coo() or x.is_sparse_csr()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("int8",)}}, backend_version
)
def native_sparse_array(
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format="coo",
) -> paddle.Tensor:
format = format.lower()
if format not in ["coo", "csr"]:
raise IvyNotImplementedException(
"paddle only supports 'coo' and 'csr' sparse formats."
)
if _is_data_not_indices_values_and_shape(
data,
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
):
ivy.utils.assertions.check_true(
ivy.is_native_sparse_array(data), message="not a sparse array"
)
return data
if format == "coo":
_verify_coo_components(
indices=coo_indices, values=values, dense_shape=dense_shape
)
return paddle.sparse.sparse_coo_tensor(
indices=coo_indices,
values=values,
shape=dense_shape,
)
else:
_verify_csr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
return paddle.sparse.sparse_csr_tensor(
crows=crow_indices,
cols=col_indices,
values=values,
shape=dense_shape,
)
def native_sparse_array_to_indices_values_and_shape(x):
if not is_native_sparse_array(x):
raise ivy.utils.exceptions.IvyException("not a Paddle Sparse Array")
if x.is_sparse_coo():
return {"coo_indices": x.indices()}, x.values(), x.shape
else:
return (
{"crow_indices": x.crows(), "col_indices": x.cols()},
x.values(),
x.shape,
)
| ivy/ivy/functional/backends/paddle/experimental/sparse_array.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/sparse_array.py",
"repo_id": "ivy",
"token_count": 1197
} | 23 |
# global
import sys
import logging
import tensorflow as tf
for device in tf.config.experimental.list_physical_devices("GPU"):
try:
tf.config.experimental.set_memory_growth(device, True)
except RuntimeError as e:
logging.warning(f"can not set {device} to dynamically allocate memory. {e}")
from tensorflow.python.framework.dtypes import DType
from tensorflow.python.framework.tensor_shape import TensorShape
from tensorflow.python.types.core import Tensor
# local
import ivy
from ivy.func_wrapper import _dtype_from_version
backend_version = {"version": tf.__version__}
# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]
use = ivy.utils.backend.ContextManager(_module_in_memory)
# wrap dunder methods of native tensors to return NotImplemented to prioritize Ivy array methods.
def dunder_wrapper(func):
def rep_method(*args, **kwargs):
for arg in args:
if ivy.is_ivy_array(arg):
return NotImplemented
return func(*args, **kwargs)
return rep_method
# check for previously imported tensorflow modules
modules_to_patch = []
tensors_to_patch = []
tmp_globals = dict(globals())
for name, value in tmp_globals.items():
if value == "tensorflow.python.framework.ops.Tensor":
tensors_to_patch.append(name)
try:
if value.__name__ == "tensorflow":
modules_to_patch.append(name)
except AttributeError:
pass
methods_to_patch = [
"__add__",
"__sub__",
"__mul__",
"__div__",
"__truediv__",
"__floordiv__",
"__mod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__or__",
"__xor__",
"__pow__",
"__matmul__",
]
for module in modules_to_patch:
for method in methods_to_patch:
exec(
module
+ ".Tensor."
+ method
+ " = dunder_wrapper("
+ module
+ ".Tensor."
+ method
+ ")"
)
for tensor in tensors_to_patch:
for method in methods_to_patch:
exec(tensor + "." + method + " = dunder_wrapper(" + tensor + "." + method + ")")
NativeArray = Tensor
NativeDevice = str
NativeDtype = DType
NativeShape = TensorShape
NativeSparseArray = tf.SparseTensor
# devices
valid_devices = ("cpu", "gpu")
invalid_devices = ("tpu",)
# native data types
native_int8 = tf.int8
native_int16 = tf.int16
native_int32 = tf.int32
native_int64 = tf.int64
native_uint8 = tf.uint8
native_uint16 = tf.uint16
native_uint32 = tf.uint32
native_uint64 = tf.uint64
native_bfloat16 = tf.bfloat16
native_float16 = tf.float16
native_float32 = tf.float32
native_float64 = tf.float64
native_complex64 = tf.complex64
native_complex128 = tf.complex128
native_double = native_float64
native_bool = tf.bool
# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
# update these to add new dtypes
valid_dtypes = {
"2.15.0 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
)
}
valid_numeric_dtypes = {
"2.15.0 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
)
}
valid_int_dtypes = {
"2.15.0 and below": (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.uint16,
ivy.uint32,
ivy.uint64,
)
}
valid_float_dtypes = {
"2.15.0 and below": (ivy.bfloat16, ivy.float16, ivy.float32, ivy.float64)
}
valid_uint_dtypes = {
"2.15.0 and below": (ivy.uint8, ivy.uint16, ivy.uint32, ivy.uint64)
}
valid_complex_dtypes = {"2.15.0 and below": (ivy.complex64, ivy.complex128)}
# leave these untouched
valid_dtypes = _dtype_from_version(valid_dtypes, backend_version)
valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)
valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)
valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)
valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)
valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)
# invalid data types
# update these to add new dtypes
invalid_dtypes = {"2.15.0 and below": ()}
invalid_numeric_dtypes = {"2.15.0 and below": ()}
invalid_int_dtypes = {"2.15.0 and below": ()}
invalid_float_dtypes = {"2.15.0 and below": ()}
invalid_uint_dtypes = {"2.15.0 and below": ()}
invalid_complex_dtypes = {"2.15.0 and below": ()}
# leave these untouched
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)
invalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)
invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)
invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)
invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)
native_inplace_support = False
supports_gradients = True
def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
type = ivy.default_dtype()
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)
backend = "tensorflow"
# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
# sub-backends
from . import sub_backends
from .sub_backends import *
from . import module
from .module import Model
NativeModule = Model
| ivy/ivy/functional/backends/tensorflow/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/__init__.py",
"repo_id": "ivy",
"token_count": 2876
} | 24 |
from typing import Union, Optional, Tuple, List, Sequence
import tensorflow as tf
from functools import reduce as _reduce
from collections import namedtuple
import ivy
from ivy.functional.ivy.experimental.linear_algebra import _check_valid_dimension_size
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from .. import backend_version
@with_unsupported_dtypes(
{"2.15.0 and below": ("int", "float16", "bfloat16")}, backend_version
)
def eigh_tridiagonal(
alpha: Union[tf.Tensor, tf.Variable],
beta: Union[tf.Tensor, tf.Variable],
/,
*,
eigvals_only: bool = True,
select: str = "a",
select_range: Optional[
Union[Tuple[int, int], List[int], tf.Tensor, tf.Variable]
] = None,
tol: Optional[float] = None,
) -> Union[
tf.Tensor,
tf.Variable,
Tuple[Union[tf.Tensor, tf.Variable], Union[tf.Tensor, tf.Variable]],
]:
return tf.linalg.eigh_tridiagonal(
alpha,
beta,
eigvals_only=eigvals_only,
select=select,
select_range=select_range,
tol=tol,
)
def diagflat(
x: Union[tf.Tensor, tf.Variable],
/,
*,
offset: int = 0,
padding_value: float = 0,
align: str = "RIGHT_LEFT",
num_rows: Optional[int] = None,
num_cols: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
):
if len(x.shape) > 1:
x = tf.reshape(x, [-1])
if num_rows is None:
num_rows = -1
if num_cols is None:
num_cols = -1
ret = tf.linalg.diag(
x,
name="diagflat",
k=offset,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
align=align,
)
if ivy.exists(out):
ivy.inplace_update(out, ret)
return ret
def kron(
a: Union[tf.Tensor, tf.Variable],
b: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.kron(a, b)
def matrix_exp(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.linalg.expm(x)
@with_supported_dtypes(
{
"2.15.0 and below": (
"complex",
"float32",
"float64",
)
},
backend_version,
)
def eig(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[tf.Tensor]:
return tf.linalg.eig(x)
@with_supported_dtypes(
{
"2.15.0 and below": (
"complex",
"float32",
"float64",
)
},
backend_version,
)
def eigvals(
x: Union[tf.Tensor, tf.Variable],
/,
) -> Union[tf.Tensor, tf.Variable]:
return tf.linalg.eigvals(x)
def adjoint(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
_check_valid_dimension_size(x)
return tf.linalg.adjoint(x)
@with_unsupported_dtypes(
{"2.13.0 and below": ("int", "float16", "bfloat16", "float64")}, backend_version
)
def solve_triangular(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
# Multiplying with a mask matrix can stop gradients on the diagonal.
if unit_diagonal:
w = tf.constant(tf.eye(x1.shape[-2], batch_shape=x1.shape[:-2], dtype=x1.dtype))
x1 = w + (1 - w) * x1
return tf.linalg.triangular_solve(x1, x2, lower=not upper, adjoint=adjoint)
@with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"float16",
"float32",
"float64",
"int32",
"int64",
)
},
backend_version,
)
def multi_dot(
x: Sequence[Union[tf.Tensor, tf.Variable]],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> tf.Tensor:
# This implementation simply chains tf.tensordot multiple times
# TODO: reimplement this function once tf adds multi_dot or inplace updates
if len(x) < 2:
raise ValueError("Expecting at least two tensors.")
dot_out = _reduce(tf.matmul, x)
return dot_out
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def cond(
x: Union[tf.Tensor, tf.Variable],
/,
*,
p: Optional[Union[None, int, str]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
svd = tf.linalg.svd(x, compute_uv=False)
if len(x.shape) >= 3:
ax = len(x.shape) // 2
elif len(x.shape) >= 3 and p == -1:
ax = [-1, -2]
else:
ax = None
if p is None or p == 2:
k = tf.reduce_max(svd, axis=ax) / tf.reduce_min(svd, axis=ax)
elif p == "nuc":
svd_inv = tf.linalg.svd(tf.linalg.inv(x), compute_uv=False)
k = tf.reduce_sum(svd, axis=ax) * tf.reduce_sum(svd_inv, axis=ax)
elif p == "fro":
k = tf.norm(x, ord="euclidean", axis=[-2, -1]) * tf.norm(
tf.linalg.inv(x), ord="euclidean", axis=[-2, -1]
)
elif p < 0:
if p == -1:
k = tf.reduce_min(
tf.reduce_sum(tf.abs(x), axis=0), axis=ax
) * tf.reduce_min(tf.reduce_sum(tf.abs(tf.linalg.inv(x)), axis=0), axis=ax)
elif p == -2:
k = tf.reduce_min(svd, axis=ax) / tf.reduce_max(svd, axis=ax)
elif p == -float("inf"):
k = tf.reduce_min(
tf.reduce_sum(tf.abs(x), axis=1), axis=ax
) * tf.reduce_min(tf.reduce_sum(tf.abs(tf.linalg.inv(x)), axis=1), axis=ax)
else:
k = tf.norm(x, ord=p, axis=[-2, -1]) * tf.norm(
tf.linalg.inv(x), ord=p, axis=[-2, -1]
)
return k
@with_unsupported_dtypes(
{"2.15.0 and below": ("integer", "float16", "bfloat16")}, backend_version
)
def lu_factor(
x: Union[tf.Tensor, tf.Variable],
/,
*,
pivot: Optional[bool] = True,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
ret = tf.linalg.lu(x)
ret_tuple = namedtuple("lu_factor", ["LU", "p"])
return ret_tuple(ret.lu, ret.p)
def lu_solve(
lu: Union[tf.Tensor, tf.Variable],
p: Union[tf.Tensor, tf.Variable],
b: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.linalg.lu_solve(lu, p, b)
@with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"float16",
"float32",
"float64",
"int32",
"int64",
"complex64",
"complex128",
"bfloat16",
)
},
backend_version,
)
def dot(
a: tf.Tensor,
b: tf.Tensor,
/,
*,
out: Optional[tf.Tensor] = None,
) -> tf.Tensor:
a, b = ivy.promote_types_of_inputs(a, b)
return tf.experimental.numpy.dot(a, b)
| ivy/ivy/functional/backends/tensorflow/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 3586
} | 25 |
# global
from __future__ import annotations
import re
import os
import tensorflow as tf
import functools
import logging
from tensorflow.python.util import nest
from typing import NamedTuple, Callable, Any, Tuple, List, Dict, Type, Union
# A NodeDef holds two callables:
# - flatten_fn should take the collection and return a flat list of values.
# It can also return some context that is used in reconstructing the
# collection.
# - unflatten_fn should take a flat list of values and some context
# (returned by flatten_fn). It returns the collection by reconstructing
# it from the list and the context.
Context = Any
PyTree = Any
FlattenFunc = Callable[[PyTree], Tuple[List, Context]]
UnflattenFunc = Callable[[List, Context], PyTree]
class NodeDef(NamedTuple):
flatten_fn: FlattenFunc
unflatten_fn: UnflattenFunc
SUPPORTED_NODES: Dict[Type[Any], NodeDef] = {}
def _register_pytree_node(
typ: Any, flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc
) -> None:
SUPPORTED_NODES[typ] = NodeDef(flatten_fn, unflatten_fn)
def _dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
return list(d.values()), list(d.keys())
def _dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:
return {key: value for key, value in zip(context, values)}
_register_pytree_node(dict, _dict_flatten, _dict_unflatten)
def _get_node_type(pytree: Any) -> Any:
return type(pytree)
# A leaf is defined as anything that is not a Node.
def _is_leaf(pytree: PyTree) -> bool:
return _get_node_type(pytree) not in SUPPORTED_NODES.keys()
# A TreeSpec represents the structure of a pytree. It holds:
# "type": the type of root Node of the pytree
# context: some context that is useful in unflattening the pytree
# children_specs: specs for each child of the root Node
# num_leaves: the number of leaves
class TreeSpec:
def __init__(self, type, context, children_specs):
self.type: Any = type
self.context: Context = context
self.children_specs: List["TreeSpec"] = children_specs
self.num_leaves: int = sum([spec.num_leaves for spec in self.children_specs])
def get_keychains(self, prefix="", sep="/"):
keychains = []
for key, child_spec in zip(self.context, self.children_specs):
new_prefix = prefix + key + sep if prefix else key + sep
if child_spec.children_specs: # Non-leaf node
keychains.extend(child_spec.get_keychains(new_prefix, sep))
else: # Leaf node
keychains.append(new_prefix[: -len(sep)])
return keychains
def __repr__(self, indent: int = 0) -> str:
repr_prefix: str = f"TreeSpec({self.type.__name__}, {self.context}, ["
children_specs_str: str = ""
if len(self.children_specs):
indent += len(repr_prefix)
children_specs_str += self.children_specs[0].__repr__(indent)
children_specs_str += "," if len(self.children_specs) > 1 else ""
children_specs_str += ",".join(
[
"\n" + " " * indent + child.__repr__(indent)
for child in self.children_specs[1:]
]
)
repr_suffix: str = f"{children_specs_str}])"
return repr_prefix + repr_suffix
class LeafSpec(TreeSpec):
def __init__(self) -> None:
super().__init__(None, None, [])
self.num_leaves = 1
def __repr__(self, indent: int = 0) -> str:
return "*"
def tree_flatten(pytree: PyTree) -> Tuple[List[Any], TreeSpec]:
"""Flattens a pytree into a list of values and a TreeSpec that can be used
to reconstruct the pytree."""
if _is_leaf(pytree):
return [pytree], LeafSpec()
node_type = _get_node_type(pytree)
flatten_fn = _dict_flatten
child_pytrees, context = flatten_fn(pytree)
# Recursively flatten the children
result: List[Any] = []
children_specs: List["TreeSpec"] = []
for child in child_pytrees:
flat, child_spec = tree_flatten(child)
result += flat
children_specs.append(child_spec)
return result, TreeSpec(node_type, context, children_specs)
def tree_unflatten(values: List[Any], spec: TreeSpec) -> PyTree:
"""Given a list of values and a TreeSpec, builds a pytree.
This is the inverse operation of `tree_flatten`.
"""
if not isinstance(spec, TreeSpec):
raise TypeError(
f"tree_unflatten(values, spec): Expected `spec` to be instance of "
f"TreeSpec but got item of type {type(spec)}."
)
if len(values) != spec.num_leaves:
raise TypeError(
f"tree_unflatten(values, spec): `values` has length {len(values)} "
f"but the spec refers to a pytree that holds {spec.num_leaves} "
f"items ({spec})."
)
if isinstance(spec, LeafSpec):
return values[0]
unflatten_fn = _dict_unflatten
# Recursively unflatten the children
start = 0
end = 0
child_pytrees = []
for child_spec in spec.children_specs:
end += child_spec.num_leaves
child_pytrees.append(tree_unflatten(values[start:end], child_spec))
start = end
return unflatten_fn(child_pytrees, spec.context)
class ModelHelpers:
@staticmethod
@tf.autograph.experimental.do_not_convert
def _get_first_array(*args, **kwargs):
arr = None
flattened_args = tf.nest.flatten((args, kwargs))
arr_candidates = tf.nest.map_structure(
lambda x: x if isinstance(x, (tf.Tensor, tf.Variable)) else False,
flattened_args,
)
for arr_candidate in arr_candidates:
if arr_candidate is not False:
arr = arr_candidate
break
return arr
@staticmethod
@tf.autograph.experimental.do_not_convert
def _get_input_shapes(*args):
input_shapes = []
for x in args:
if isinstance(x, (tf.Tensor, tf.Variable)):
input_shapes.append(x.shape)
else:
try:
x = tf.convert_to_tensor(x)
input_shapes.append(x.shape)
except Exception:
input_shapes.append(None)
return input_shapes
@staticmethod
@tf.autograph.experimental.do_not_convert
def _extract_v(v, keychain_mappings: dict, orig_key_chain, /):
if ModelHelpers._dict_has_key_chain(v, orig_key_chain):
ret_cont = ModelHelpers._dict_at_key_chain(v, orig_key_chain)
else:
ret_cont = dict()
for old_kc, new_kc in keychain_mappings.items():
if orig_key_chain in old_kc:
# Check if `v` contains `new_kc` before replacing in `ret_cont`
if ModelHelpers._dict_has_key_chain(v, new_kc):
ret_cont = ModelHelpers._dict_set_at_key_chain(
ret_cont,
"/".join(old_kc.split("/")[1:]),
ModelHelpers._dict_at_key_chain(v, new_kc),
)
else:
continue
return ret_cont
@staticmethod
@tf.autograph.experimental.do_not_convert
def _remove_duplicate_variables(vs, created, /):
created_ids = tf.nest.map_structure(lambda x: id(x), created)
vs_ids = tf.nest.map_structure(lambda x: id(x), vs)
ids = {}
duplicate_keychains = []
keychain_mappings = {}
def unique_callback(x, kc):
ids[x] = kc
return x
def found_dup_callback(x, kc):
if ids[x] == kc:
return x
duplicate_keychains.append(kc)
keychain_mappings[kc] = ids[x]
return x
created_ids = nest.map_structure_with_paths(
lambda kc, x: unique_callback(x, kc), created_ids
)
vs_ids = nest.map_structure_with_paths(
lambda kc, x: (
unique_callback(x, kc) if x not in ids else found_dup_callback(x, kc)
),
vs_ids,
)
for dup_kc in duplicate_keychains:
vs = ModelHelpers._dict_prune_key_chain(vs, dup_kc)
return vs, keychain_mappings
@staticmethod
@tf.autograph.experimental.do_not_convert
def _dict_set_at_key_chain(in_dict, key_chain, val, inplace=False):
keys = re.split("[/.]", key_chain)
if inplace:
cont = in_dict
else:
cont = in_dict
sub_cont = cont
for key in keys[:-1]:
if key not in sub_cont:
sub_cont[key] = dict()
sub_cont = sub_cont[key]
sub_cont[keys[-1]] = val
return cont
@staticmethod
@tf.autograph.experimental.do_not_convert
def _dict_at_key_chain(dict, key_chain, ignore_key_errors=False):
keys = re.split("[/.]", key_chain)
ret = dict
for key in keys:
try:
ret = ret[key]
except KeyError as e:
if ignore_key_errors:
return
raise Exception(repr(e))
return ret
@staticmethod
@tf.autograph.experimental.do_not_convert
def _dict_has_key_chain(dict, key_chain):
keys = re.split("[/.]", key_chain)
ret = dict
for key in keys:
try:
ret = ret[key]
except KeyError:
return False
return True
@staticmethod
@tf.autograph.experimental.do_not_convert
def _dict_prune_key_chain(in_dict, key_chain):
keys_in_chain = re.split("[/.]", key_chain)
out_dict = {}
for key, value in in_dict.items():
if isinstance(value, dict):
if key == keys_in_chain[0]:
if len(keys_in_chain) == 1:
new_val = []
else:
new_val = ModelHelpers._dict_prune_key_chain(
value,
"/".join(keys_in_chain[1:]),
)
if len(new_val) > 0:
out_dict[key] = new_val
else:
if len(value) > 0:
out_dict[key] = value
else:
if len(keys_in_chain) != 1 or key != keys_in_chain[0]:
out_dict[key] = value
return out_dict
@staticmethod
@tf.autograph.experimental.do_not_convert
def _addindent(s_, numSpaces):
s = s_.split("\n")
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
class Model(tf.keras.Model, ModelHelpers):
_build_mode = None
_with_partial_v = None
_store_vars = True
_built = False
_v = None
_buffers = None
_module_dict = None
_args = None
_kwargs = None
_module_graph = None
_target = None
_lazy_traced = False
_training = None
_dynamic_backend = None
_device = None
_dtype = None
def __init__(
self,
/,
*args,
v=None,
buffers=None,
build_mode="on_init",
store_vars=True,
with_partial_v=False,
dynamic_backend=None,
training=True,
dtype=None,
device=None,
**kwargs,
):
super(Model, self).__init__(
trainable=training,
dtype=dtype,
)
self._build_mode = build_mode
self._with_partial_v = with_partial_v
self._store_vars = store_vars
self._built = False
self._v_from_constructor = v if isinstance(v, dict) or v is None else dict(v)
self._v = v if v is not None else dict()
self._buffers = dict(buffers or {})
self._module_dict = dict()
self._args = args
self._kwargs = kwargs
self._module_graph = None
self._target = None
self._lazy_traced = False
self._training = training
self._dynamic_backend = dynamic_backend
self._device = device or "cpu"
self._dtype = dtype or tf.float32
if build_mode != "on_init":
return
self.build(*args, dynamic_backend=dynamic_backend, **kwargs)
@tf.autograph.experimental.do_not_convert
def _find_variables(
self,
/,
*,
obj=None,
without_initialisation=False,
_visited=None,
):
_visited = _visited or {}
vs = dict()
if id(obj) in _visited:
return vs
_visited[id(obj)] = True
if isinstance(obj, Model) and obj is not self:
if not obj._built and without_initialisation:
return lambda: obj._build_and_return_v(
*obj._args, dynamic_backend=self._dynamic_backend, **obj._kwargs
)
return obj._build_and_return_v(
*obj._args, dynamic_backend=obj._dynamic_backend, **obj._kwargs
)
elif isinstance(obj, (list, tuple)):
for i, v in enumerate(obj):
ret = self._find_variables(
obj=v,
without_initialisation=without_initialisation,
_visited=_visited,
)
if ret:
vs[f"v{str(i)}"] = ret
return vs
elif isinstance(obj, dict):
for k, v in obj.items():
ret = self._find_variables(
obj=v,
without_initialisation=without_initialisation,
_visited=_visited,
)
if ret:
vs[k[1:] if k[0] == "_" else k] = ret
return vs
elif not hasattr(obj, "__dict__"):
return vs
for k, v in obj.__dict__.items():
if (
v is not None
and k[0:2] != "__"
and not k.startswith(
(
"_module_dict",
"_self_",
)
)
):
ret = self._find_variables(
obj=v,
without_initialisation=without_initialisation,
_visited=_visited,
)
if ret:
vs[k[1:] if k[0] == "_" else k] = ret
return vs
@tf.autograph.experimental.do_not_convert
def _find_buffers(self):
if hasattr(self, "_module_dict"):
for key, sub_module in self._module_dict.items():
if len(sub_module._buffers) > 0:
self._buffers[key] = sub_module._buffers
@tf.autograph.experimental.do_not_convert
def _build_and_return_v(self, *args, **kwargs):
if not self._built:
self.build(*args, **kwargs)
return self.v
@tf.autograph.experimental.do_not_convert
def _assign_weights(self):
model_weights = {}
existing_ids = [id(w) for w in self.weights]
# trainable weights
flattened_v, v_spec = tree_flatten(self.v)
flattened_kc = v_spec.get_keychains()
new_weights = [None] * len(flattened_v)
for i, (kc, x) in enumerate(zip(flattened_kc, flattened_v)):
new_weights[i] = (
self.add_weight(name=kc, shape=x.shape, dtype=x.dtype, trainable=True)
if x is not None and id(x) not in existing_ids
else x
)
if isinstance(x, tf.Variable):
new_weights[i].assign(x.value())
if new_weights[i] is not None:
model_weights[id(new_weights[i])] = new_weights[i].numpy()
self.v = tree_unflatten(new_weights, v_spec)
# non-trainable weights
flattened_buf, buf_spec = tree_flatten(self.buffers)
flattened_kc = buf_spec.get_keychains()
new_buf = [None] * len(flattened_buf)
for i, (kc, x) in enumerate(zip(flattened_kc, flattened_buf)):
new_buf[i] = (
self.add_weight(name=kc, shape=x.shape, dtype=x.dtype, trainable=False)
if x is not None and id(x) not in existing_ids
else x
)
if isinstance(x, tf.Variable):
new_buf[i].assign(x.value())
if new_buf[i] is not None:
model_weights[id(new_buf[i])] = new_buf[i].numpy()
self.buffers = tree_unflatten(new_buf, buf_spec)
def _sort_weights(model_weights, weights):
sorted_weights = []
for weight in weights:
sorted_weights.append(model_weights[id(weight)])
return sorted_weights
if model_weights:
self.set_weights(_sort_weights(model_weights, self.weights))
@tf.autograph.experimental.do_not_convert
def build(
self,
*args,
from_call=False,
device=None,
dtype=None,
dynamic_backend=None,
**kwargs,
):
self._device = device or self._device
self._dtype = dtype or self._dtype
self._dynamic_backend = dynamic_backend or self._dynamic_backend
# return False if not from_call but build_mode is on_call
if not from_call and self._build_mode == "on_call":
return self.v
# build local Module, and any child modules flagged with "explicit" build mode
# this gets the child modules initialised at best, their weights
# remain un-generated
built = self._build(*args, **kwargs) or True
# this creates weights for this Module only
created = self._create_variables(device=self._device, dtype=dtype)
created = (
created.cont_to_dict() if hasattr(created, "cont_to_dict") else created
)
# build variables based on locally built layers, if v not passed in constructor
created_n_found = dict(
**self._find_variables(
obj=self,
without_initialisation=(
True
if self._v_from_constructor and not self._with_partial_v
else False
),
),
**created,
)
if self._v_from_constructor:
# TODO: Add logic here for when `v` is passed in the constructor
raise Exception("TODO: Implement this logic")
else:
self._v = created_n_found
# remove duplicates
self._v, keychain_mappings = self._remove_duplicate_variables(self._v, created)
# build any child 'on_call' layers
if not built and from_call:
# TODO: Add logic for here
raise Exception("TODO: Implement this logic")
# flag built and remove local variables if specified
self._built = bool(built)
v_ret = self.v
if not self._store_vars:
# ToDo: verify variables in self.v are released once this method exits
self._v = dict()
# compute the module dict
self._compute_module_dict()
# once all variables built, find and assign buffers
self._find_buffers()
# also assign the keras model trainable and non-trainable weights now
self._assign_weights()
# wrap call methods if the model is fully built
if built:
self._wrap_call_methods(keychain_mappings, obj=self)
return v_ret if bool(v_ret) or isinstance(built, bool) else built
@tf.autograph.experimental.do_not_convert
def _wrap_call_methods(
self, keychain_mappings, /, *, key="", obj=None, _visited=None
):
_visited = _visited or {}
if id(obj) in _visited or not isinstance(key, str):
return
_visited[id(obj)] = True
if isinstance(obj, Model) and obj is not self:
orig_key_chain = key[1:] if key[0] == "_" else key
obj.__call__ = self._fn_with_var_arg(
obj.__call__, self._extract_v, keychain_mappings, orig_key_chain
)
return
elif isinstance(obj, (list, tuple)):
for i, val in enumerate(obj):
self._wrap_call_methods(
keychain_mappings,
key=f"{key}/v{str(i)}",
obj=val,
_visited=_visited,
)
return
elif isinstance(obj, dict):
for k, val in obj.items():
k = f"{key}/{k}" if key != "" and isinstance(k, str) else k
self._wrap_call_methods(
keychain_mappings, key=k, obj=val, _visited=_visited
)
return
for k, val in obj.module_dict.items():
if k.startswith(("__", "_self_")):
continue
k = f"{key}/{k}" if key != "" else k
if val is not None:
self._wrap_call_methods(
keychain_mappings, key=k, obj=val, _visited=_visited
)
return
@tf.autograph.experimental.do_not_convert
def _call(self, *args, v=None, buffers=None, **kwargs):
if not self._built or not self.built:
if not self._built:
first_arr = self._get_first_array(*args, **kwargs)
self.build(
*args,
**kwargs,
from_call=True,
dtype=first_arr.dtype if first_arr is not None else tf.float32,
)
if not self.built:
# Don't use `keras` build method
if os.environ.get("USE_KERAS_BUILD", "False").lower() == "false":
self.inputs = tf.nest.flatten(args)
input_shapes = self._get_input_shapes(*args)
if len(input_shapes) == 0:
input_shapes = tf.TensorShape(None)
elif len(input_shapes) == 1:
input_shapes = input_shapes[0]
super(Model, self).build(input_shapes) # noqa: UP008
# If `v` was provided, replace with the module's v
replace_v = False
if v is not None:
v_orig = self.v
self._v = v
replace_v = True
# If `buffers` were provided, replace with the module's buffers
replace_buffers = False
if buffers is not None:
buffers_orig = self.buffers
self._buffers = buffers
replace_buffers = True
if replace_v or replace_buffers:
# Call the forward pass
ret = super(Model, self).__call__(*args, **kwargs) # noqa: UP008
# Replace v, buffers if needed
self._v = v_orig if replace_v else self._v
self._buffers = buffers_orig if replace_buffers else self._buffers
return ret
elif hasattr(self.__call__, "wrapped"):
return self.__call__(*args, **kwargs)
return super(Model, self).__call__(*args, **kwargs) # noqa: UP008
@tf.autograph.experimental.do_not_convert
def _rebuild(self):
logging.warning(
"Building the module again as a trainable module was modified, "
'please use the "explicit" or "on_call" build_modes instead '
'of "on_init" to avoid repetitive building after each addition'
)
self._v = dict()
self._built = False
self.build(*self._args, **self._kwargs)
@tf.autograph.experimental.do_not_convert
def _compute_module_dict(self):
self._module_dict = dict()
for key, value in self.__dict__.items():
if isinstance(value, Model):
if "stateful" in value.__module__ or hasattr(value, "_frontend_module"):
self._module_dict[key] = value
else:
self._module_dict[key] = value._module_dict
@tf.autograph.experimental.do_not_convert
def _fn_with_var_arg_wrapper(
self, *a, fn, v_fn, keychain_mappings, orig_key_chain, **kw
):
if "v" in kw:
del kw["v"]
v = v_fn(self.v, keychain_mappings, orig_key_chain)
return fn(*a, **kw, v=v)
@tf.autograph.experimental.do_not_convert
def _fn_with_var_arg(self, fn, v_fn, /, keychain_mappings, orig_key_chain):
_fn_with_var_arg_wrapper = functools.partial(
self._fn_with_var_arg_wrapper,
fn=fn,
v_fn=v_fn,
keychain_mappings=keychain_mappings,
orig_key_chain=orig_key_chain,
)
_fn_with_var_arg_wrapper.wrapped = True
return _fn_with_var_arg_wrapper
@tf.autograph.experimental.do_not_convert
def register_buffer(self, name: str, value: Union[tf.Tensor, tf.Variable]):
if value is not None:
self._buffers.update({name: value})
else:
self.__setattr__(name, value)
@tf.autograph.experimental.do_not_convert
def register_parameter(self, name: str, value: Union[tf.Tensor, tf.Variable]):
self._v.update({name: value})
@tf.autograph.experimental.do_not_convert
def train(self, mode: bool = True):
self._training = mode
for module in self.children():
module.train(mode)
self.trainable = mode
return self
@tf.autograph.experimental.do_not_convert
def eval(self):
return self.train(mode=False)
@tf.autograph.experimental.do_not_convert
def call(self, inputs, training=None, mask=None):
raise NotImplementedError(
"When subclassing the `Model` class, you should implement a `call` method."
)
# Methods to be Optionally Overridden #
# -----------------------------------#
@tf.autograph.experimental.do_not_convert
def _create_variables(self, *, device=None, dtype=None):
return {}
@tf.autograph.experimental.do_not_convert
def _build(self, *args, **kwargs) -> bool:
return True
@tf.autograph.experimental.do_not_convert
def _forward(self, *args, **kwargs):
raise NotImplementedError(
"When subclassing the `Model` class, you should "
"implement a `_forward` method."
)
@tf.autograph.experimental.do_not_convert
def _extra_repr(self) -> str:
return ""
# Properties #
# -----------#
@property
def device(self):
return self._device
@property
def dtype(self):
return self._dtype
@property
def build_mode(self):
return self._build_mode
@property
def training(self):
return self._training
@property
def v(self):
return self._v
@property
def buffers(self):
return self._buffers
@property
def state_dict(self):
return {**self.v, **self.buffers}
@property
def module_dict(self):
return self._module_dict
# Dunder Methods #
# ---------------#
@tf.autograph.experimental.do_not_convert
def __call__(
self,
*args,
v=None,
buffers=None,
**kwargs,
):
# TODO: Temp workaround to avoid `call`` from being transformed by AutoGraph
if not hasattr(self.__class__.call, "autograph_info__"):
setattr(self.__class__.call, "autograph_info__", True)
ret = self._call(*args, v=v, buffers=buffers, **kwargs)
return ret
@tf.autograph.experimental.do_not_convert
def __getattr__(self, name):
if name == "v":
if not super().__getattribute__("_v") and not getattr( # noqa: E501
self, "_built", False
):
return self._build_and_return_v(
*self._args, dynamic_backend=self._dynamic_backend, **self._kwargs
)
_dict = super().__getattribute__("__dict__")
if name in _dict:
return _dict[name]
elif "_v" in _dict and name in _dict["_v"]:
return _dict["_v"][name]
return super().__getattribute__(name)
@tf.autograph.experimental.do_not_convert
def __setattr__(self, name, value):
if name in ["v", "buffers"]:
name = "_" + name
if isinstance(value, Model):
ret = super().__setattr__(name, value)
if (
hasattr(self, "_build_mode")
and self.build_mode == "on_init"
and getattr(self, "_built", False)
):
self._rebuild()
return ret
elif isinstance(value, tf.Variable) and not name.startswith("_"):
ret = self.register_parameter(name, value)
if (
hasattr(self, "_build_mode")
and self.build_mode == "on_init"
and getattr(self, "_built", False)
):
self._rebuild()
return ret
return super().__setattr__(name, value)
@tf.autograph.experimental.do_not_convert
def __delattr__(self, name):
if hasattr(self, name):
if isinstance(getattr(self, name), Model):
super().__delattr__(name)
if self.build_mode == "on_init":
self._rebuild()
return
super().__delattr__(name)
@tf.autograph.experimental.do_not_convert
def __repr__(self):
extra_lines = []
extra_repr = self._extra_repr()
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
for key in self.v.keys():
if isinstance(getattr(self, key, None), Model):
mod_str = repr(getattr(self, key))
mod_str = self._addindent(mod_str, 2)
child_lines.append(f"({key}): {mod_str}")
lines = extra_lines + child_lines
main_str = f"{self.__class__.__name__}("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
| ivy/ivy/functional/backends/tensorflow/module.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/module.py",
"repo_id": "ivy",
"token_count": 15068
} | 26 |
# global
from typing import Optional, Union, Sequence, List
import numpy as np
import torch
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.ivy.data_type import _handle_nestable_dtype_info
from . import backend_version
ivy_dtype_dict = {
torch.int8: "int8",
torch.int16: "int16",
torch.int32: "int32",
torch.int64: "int64",
torch.uint8: "uint8",
torch.bfloat16: "bfloat16",
torch.float16: "float16",
torch.float32: "float32",
torch.float64: "float64",
torch.complex64: "complex64",
torch.complex128: "complex128",
torch.bool: "bool",
}
native_dtype_dict = {
"int8": torch.int8,
"int16": torch.int16,
"int32": torch.int32,
"int64": torch.int64,
"uint8": torch.uint8,
"bfloat16": torch.bfloat16,
"float16": torch.float16,
"float32": torch.float32,
"float64": torch.float64,
"complex64": torch.complex64,
"complex128": torch.complex128,
"bool": torch.bool,
}
class Finfo:
def __init__(self, torch_finfo: torch.finfo):
self._torch_finfo = torch_finfo
def __repr__(self):
return repr(self._torch_finfo)
@property
def bits(self):
return self._torch_finfo.bits
@property
def eps(self):
return self._torch_finfo.eps
@property
def max(self):
return self._torch_finfo.max
@property
def min(self):
return self._torch_finfo.min
@property
def smallest_normal(self):
return self._torch_finfo.tiny
# Array API Standard #
# -------------------#
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def astype(
x: torch.Tensor,
dtype: torch.dtype,
/,
*,
copy: bool = True,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
dtype = ivy.as_native_dtype(dtype)
if x.dtype == dtype:
return x.clone() if copy else x
return x.to(dtype)
def broadcast_arrays(*arrays: torch.Tensor) -> List[torch.Tensor]:
try:
return list(torch.broadcast_tensors(*arrays))
except RuntimeError as e:
raise ivy.utils.exceptions.IvyBroadcastShapeError(e) from e
def broadcast_to(
x: torch.Tensor,
/,
shape: Union[ivy.NativeShape, Sequence[int]],
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
ivy.utils.assertions.check_shapes_broadcastable(x.shape, shape)
if x.ndim > len(shape):
return torch.broadcast_to(x.reshape(-1), shape)
return torch.broadcast_to(x, shape)
@_handle_nestable_dtype_info
def finfo(type: Union[torch.dtype, str, torch.Tensor, np.ndarray], /) -> Finfo:
if isinstance(type, (torch.Tensor, np.ndarray)):
type = type.dtype
return Finfo(torch.finfo(ivy.as_native_dtype(type)))
@_handle_nestable_dtype_info
def iinfo(type: Union[torch.dtype, str, torch.Tensor, np.ndarray], /) -> torch.iinfo:
if isinstance(type, (torch.Tensor, np.ndarray)):
type = type.dtype
return torch.iinfo(ivy.as_native_dtype(type))
def result_type(*arrays_and_dtypes: Union[torch.tensor, torch.dtype]) -> ivy.Dtype:
input = []
for val in arrays_and_dtypes:
torch_val = as_native_dtype(val)
if isinstance(torch_val, torch.dtype):
torch_val = torch.tensor(1, dtype=torch_val)
input.append(torch_val)
result = torch.tensor(1, dtype=torch.result_type(input[0], input[1]))
for i in range(2, len(input)):
result = torch.tensor(1, dtype=torch.result_type(result, input[i]))
return as_ivy_dtype(result.dtype)
# Extra #
# ------#
def as_ivy_dtype(
dtype_in: Union[torch.dtype, str, int, float, complex, bool, np.dtype],
/,
) -> ivy.Dtype:
if dtype_in is int:
return ivy.default_int_dtype()
if dtype_in is float:
return ivy.default_float_dtype()
if dtype_in is complex:
return ivy.default_complex_dtype()
if dtype_in is bool:
return ivy.Dtype("bool")
if isinstance(dtype_in, np.dtype):
dtype_in = dtype_in.name
if isinstance(dtype_in, str):
if dtype_in in native_dtype_dict:
dtype_str = dtype_in
else:
raise ivy.utils.exceptions.IvyException(
"Cannot convert to ivy dtype."
f" {dtype_in} is not supported by PyTorch backend."
)
else:
dtype_str = ivy_dtype_dict[dtype_in]
if "uint" in dtype_str:
return ivy.UintDtype(dtype_str)
elif "int" in dtype_str:
return ivy.IntDtype(dtype_str)
elif "float" in dtype_str:
return ivy.FloatDtype(dtype_str)
elif "complex" in dtype_str:
return ivy.ComplexDtype(dtype_str)
elif "bool" in dtype_str:
return ivy.Dtype("bool")
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot recognize {dtype_str} as a valid Dtype."
)
@with_unsupported_dtypes({"2.2 and below": ("uint16",)}, backend_version)
def as_native_dtype(
dtype_in: Union[torch.dtype, str, bool, int, float, np.dtype],
) -> torch.dtype:
if dtype_in is int:
return ivy.default_int_dtype(as_native=True)
if dtype_in is float:
return ivy.default_float_dtype(as_native=True)
if dtype_in is complex:
return ivy.default_complex_dtype(as_native=True)
if dtype_in is bool:
return torch.bool
if isinstance(dtype_in, np.dtype):
dtype_in = dtype_in.name
if not isinstance(dtype_in, str):
return dtype_in
if dtype_in in native_dtype_dict:
return native_dtype_dict[ivy.Dtype(dtype_in)]
else:
raise ivy.utils.exceptions.IvyException(
f"Cannot convert to PyTorch dtype. {dtype_in} is not supported by PyTorch."
)
def dtype(x: Union[torch.tensor, np.ndarray], *, as_native: bool = False) -> ivy.Dtype:
if as_native:
return ivy.as_native_dtype(x.dtype)
return as_ivy_dtype(x.dtype)
def dtype_bits(dtype_in: Union[torch.dtype, str, np.dtype], /) -> int:
dtype_str = as_ivy_dtype(dtype_in)
if "bool" in dtype_str:
return 1
return int(
dtype_str.replace("torch.", "")
.replace("uint", "")
.replace("int", "")
.replace("bfloat", "")
.replace("float", "")
.replace("complex", "")
)
def is_native_dtype(dtype_in: Union[torch.dtype, str], /) -> bool:
if not ivy.is_hashable_dtype(dtype_in):
return False
return bool(dtype_in in ivy_dtype_dict and isinstance(dtype_in, torch.dtype))
| ivy/ivy/functional/backends/torch/data_type.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/data_type.py",
"repo_id": "ivy",
"token_count": 2986
} | 27 |
import torch
from typing import Literal, Optional, Tuple
from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes
from .. import backend_version
def l1_normalize(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nn.functional.normalize(x, p=1, dim=axis, out=out)
l1_normalize.support_native_out = True
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def l2_normalize(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nn.functional.normalize(x, p=2, dim=axis, out=out)
l2_normalize.support_native_out = True
@with_supported_dtypes({"2.2 and below": ("float",)}, backend_version)
def local_response_norm(
x: torch.Tensor,
size,
/,
*,
bias: Optional[float] = 1.0,
alpha: Optional[float] = 1.0,
beta: Optional[float] = 0.5,
average: bool = False,
data_format: Optional[Literal["NHWC", "NCHW"]] = "NHWC",
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if data_format == "NHWC":
x = torch.permute(x, (0, 3, 1, 2))
alpha = alpha * size if not average else alpha
ret = torch.nn.functional.local_response_norm(
x, size, alpha=alpha, beta=beta, k=bias
)
if data_format == "NHWC":
ret = torch.permute(ret, (0, 2, 3, 1))
return ret
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def batch_norm(
x: torch.Tensor,
mean: torch.Tensor,
variance: torch.Tensor,
/,
*,
scale: Optional[torch.Tensor] = None,
offset: Optional[torch.Tensor] = None,
training: Optional[bool] = False,
eps: Optional[float] = 1e-5,
momentum: Optional[float] = 1e-1,
data_format: Optional[str] = "NSC",
out: Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
xdims = x.ndim
if data_format == "NSC":
x = torch.permute(x, dims=(0, xdims - 1, *range(1, xdims - 1)))
runningmean = mean.detach().clone()
runningvariance = variance.detach().clone()
xnormalized = torch.nn.functional.batch_norm(
x,
runningmean,
runningvariance,
weight=scale,
bias=offset,
training=training,
eps=eps,
momentum=momentum,
)
if data_format == "NSC":
xnormalized = torch.permute(xnormalized, dims=(0, *range(2, xdims), 1))
return xnormalized, runningmean, runningvariance
batch_norm.partial_mixed_handler = (
lambda x, mean, variance, scale=None, offset=None, **kwargs: (
x.ndim > 1
and mean.ndim == 1
and variance.ndim == 1
and (scale is None or scale.ndim == 1)
and (offset is None or offset.ndim == 1)
)
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def instance_norm(
x: torch.Tensor,
mean: torch.Tensor,
variance: torch.Tensor,
/,
*,
scale: Optional[torch.Tensor] = None,
offset: Optional[torch.Tensor] = None,
training: Optional[bool] = False,
eps: Optional[float] = 0e-5,
momentum: Optional[float] = 1e-1,
data_format: Optional[str] = "NSC",
out: Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
runningmean = mean.clone()
runningvariance = variance.clone()
# reshape from N, *S, C to N, C, *S
xdims = x.ndim
if data_format == "NSC":
x = torch.permute(x, dims=(0, xdims - 1, *range(1, xdims - 1)))
xnormalized = torch.nn.functional.instance_norm(
x,
runningmean,
runningvariance,
weight=scale,
bias=offset,
use_input_stats=training,
eps=eps,
momentum=momentum,
)
if data_format == "NSC":
xnormalized = torch.permute(xnormalized, dims=(0, *range(2, xdims), 1))
return xnormalized, runningmean, runningvariance
instance_norm.partial_mixed_handler = (
lambda x, mean, variance, scale=None, offset=None, **kwargs: (
x.ndim > 1
and mean.ndim == 1
and variance.ndim == 1
and (scale is None or scale.ndim == 1)
and (offset is None or offset.ndim == 1)
)
)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version)
def group_norm(
x: torch.Tensor,
num_groups: int = 1,
/,
*,
offset: Optional[torch.Tensor] = None,
scale: Optional[torch.Tensor] = None,
eps: Optional[float] = 1e-5,
data_format: Optional[str] = "NSC",
out: Optional[torch.Tensor] = None,
):
xdims = x.ndim
if data_format == "NSC":
x = torch.permute(x, dims=(0, xdims - 1, *range(1, xdims - 1)))
xnormalized = torch.nn.functional.group_norm(
x, num_groups, weight=scale, bias=offset, eps=eps
)
if data_format == "NSC":
xnormalized = torch.permute(xnormalized, dims=(0, *range(2, xdims), 1))
return xnormalized
@with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version)
def lp_normalize(
x: torch.Tensor,
/,
*,
p: float = 2,
axis: Optional[int] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.nn.functional.normalize(x, p=p, dim=axis, out=out)
lp_normalize.support_native_out = True
| ivy/ivy/functional/backends/torch/experimental/norms.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/norms.py",
"repo_id": "ivy",
"token_count": 2395
} | 28 |
from numbers import Number
from typing import Optional, Tuple, Union
import torch
import torch.nn.functional as tnf
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
# Array API Standard #
# ------------------ #
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex",
"bool",
)
},
backend_version,
)
def argmax(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
select_last_index: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if select_last_index:
if axis is None:
x = torch.flip(x, dims=[axes for axes in range(x.ndim)])
ret = torch.argmax(x, dim=axis, keepdim=keepdims)
ret = x.numel() - ret - 1
else:
x = torch.flip(x, dims=(axis,))
ret = torch.argmax(x, dim=axis, keepdim=keepdims)
ret = x.shape[axis] - ret - 1
else:
ret = torch.argmax(x, dim=axis, keepdim=keepdims)
if dtype:
dtype = ivy.as_native_dtype(dtype)
return ret.to(dtype=dtype)
return ret
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex",
"bool",
)
},
backend_version,
)
def argmin(
x: torch.Tensor,
/,
*,
axis: Optional[int] = None,
keepdims: bool = False,
dtype: Optional[torch.dtype] = None,
select_last_index: bool = False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if select_last_index:
if axis is None:
x = torch.flip(x, dims=[axes for axes in range(x.ndim)])
ret = torch.argmin(x, dim=axis, keepdim=keepdims)
ret = x.numel() - ret - 1
else:
x = torch.flip(x, dims=(axis,))
ret = torch.argmin(x, dim=axis, keepdim=keepdims)
ret = x.shape[axis] - ret - 1
else:
ret = torch.argmin(x, dim=axis, keepdim=keepdims)
if dtype:
dtype = ivy.as_native_dtype(dtype)
return ret.to(dtype=dtype)
return ret
def nonzero(
x: torch.Tensor,
/,
*,
as_tuple: bool = True,
size: Optional[int] = None,
fill_value: Number = 0,
) -> Union[torch.Tensor, Tuple[torch.Tensor]]:
res = torch.stack(torch.nonzero(x, as_tuple=True))
if size is not None:
if isinstance(fill_value, float):
res = res.to(dtype=torch.float64)
diff = size - res[0].shape[0]
if diff > 0:
res = tnf.pad(res, (0, diff), value=fill_value)
elif diff < 0:
res = res[:, :size]
res = tuple(res)
if as_tuple:
return res
return torch.stack(res, dim=1)
def where(
condition: torch.Tensor,
x1: Union[float, int, torch.Tensor],
x2: Union[float, int, torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
if condition.dtype is not torch.bool:
condition = condition.to(bool)
return ivy.astype(torch.where(condition, x1, x2), x1.dtype, copy=False)
# Extra #
# ----- #
def argwhere(
x: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return torch.argwhere(x)
| ivy/ivy/functional/backends/torch/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/searching.py",
"repo_id": "ivy",
"token_count": 1643
} | 29 |
# global
import ivy
import ivy.functional.frontends.jax as jax_frontend
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
class _IndexUpdateHelper:
__slots__ = ("array",)
def __init__(self, array):
self.array = array
def __getitem__(self, index):
return _IndexUpdateRef(self.array, index)
def __setitem__(self, index):
return _IndexUpdateRef(self.array, index)
def __repr__(self):
return f"_IndexUpdateHelper({repr(self.array)})"
class _IndexUpdateRef:
__slots__ = ("array", "index")
def __init__(self, array, index):
self.array = array
self.index = index
def __repr__(self):
return f"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})"
def get(
self, indices_are_sorted=False, unique_indices=False, mode=None, fill_value=None
):
return _rewriting_take(
self.array,
self.index,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices,
mode=mode,
fill_value=fill_value,
)
def set(self, values, indices_are_sorted=False, unique_indices=False, mode=None):
ret = ivy.copy_array(self.array) # break inplace op
if hasattr(values, "ivy_array"):
ret[self.index] = values.ivy_array
else:
ret[self.index] = values
return jax_frontend.Array(ret)
# --- Helpers --- #
# --------------- #
@to_ivy_arrays_and_back
def _rewriting_take(
arr, idx, indices_are_sorted=False, unique_indices=False, mode=None, fill_value=None
):
return ivy.get_item(arr, idx)
| ivy/ivy/functional/frontends/jax/_src/numpy/lax_numpy.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/_src/numpy/lax_numpy.py",
"repo_id": "ivy",
"token_count": 744
} | 30 |
# local
import ivy
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.frontends.jax.numpy import (
jax_numpy_casting_table,
promote_types_jax,
)
from ivy.functional.frontends.numpy import dtype as np_dtype
from ivy import with_supported_dtypes
@to_ivy_arrays_and_back
def can_cast(from_, to, casting="safe"):
ivy.utils.assertions.check_elem_in_list(
casting,
["no", "equiv", "safe", "same_kind", "unsafe"],
message="casting must be one of [no, equiv, safe, same_kind, unsafe]",
)
if ivy.is_array(from_):
from_ = ivy.as_ivy_dtype(from_.dtype)
elif isinstance(from_, (str, type)):
from_ = ivy.as_ivy_dtype(from_)
elif isinstance(from_, np_dtype):
from_ = from_._ivy_dtype
else:
raise ivy.utils.exceptions.IvyException(
"from_ must be one of dtype, dtype specifier, scalar type, or array, "
)
if isinstance(to, (str, type)):
to = ivy.as_ivy_dtype(to)
elif isinstance(to, np_dtype):
to = to._ivy_dtype
else:
raise ivy.utils.exceptions.IvyException(
"to must be one of dtype, or dtype specifier"
)
if casting in ["no", "equiv"]:
return from_ == to
if casting == "safe":
return to in jax_numpy_casting_table[from_]
if casting == "same_kind":
if from_ == to or "bool" in from_:
return True
elif ivy.is_int_dtype(from_) and ("float" in to or "complex" in to):
return True
elif ivy.is_float_dtype(from_) and ("float" in to or "complex" in to):
if "bfloat" in from_ and "float16" in to:
return False
return True
elif ivy.is_uint_dtype(from_) and (
"int" in to or "float" in to or "complex" in to
):
return True
elif (
ivy.is_int_dtype(from_)
and ivy.is_int_dtype(to)
and not ivy.is_uint_dtype(to)
):
return True
elif "complex" in from_ and "bfloat16" in to:
return True
else:
return to in jax_numpy_casting_table[from_]
if casting == "unsafe":
return True
return False
@with_supported_dtypes(
{"2.15.0 and below": ("float16", "float32", "float64")},
"jax",
)
@to_ivy_arrays_and_back
def finfo(dtype):
return ivy.finfo(dtype)
@with_supported_dtypes(
{"2.15.0 and below": ("integer",)},
"jax",
)
@to_ivy_arrays_and_back
def iinfo(int_type):
return ivy.iinfo(int_type)
def promote_types(type1, type2, /):
if isinstance(type1, np_dtype):
type1 = type1._ivy_dtype
if isinstance(type2, np_dtype):
type2 = type2._ivy_dtype
return np_dtype(promote_types_jax(type1, type2))
@to_ivy_arrays_and_back
def result_type(*args):
return ivy.result_type(*args)
| ivy/ivy/functional/frontends/jax/numpy/dtype.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/dtype.py",
"repo_id": "ivy",
"token_count": 1407
} | 31 |
from . import function
from .function import *
| ivy/ivy/functional/frontends/mindspore/ops/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mindspore/ops/__init__.py",
"repo_id": "ivy",
"token_count": 11
} | 32 |
from . import methods
from .methods import *
| ivy/ivy/functional/frontends/numpy/broadcast/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/broadcast/__init__.py",
"repo_id": "ivy",
"token_count": 12
} | 33 |
# global
import functools
from typing import Callable, Any
import inspect
import platform
# local
import ivy
import ivy.functional.frontends.numpy as np_frontend
# --- Helpers --- #
# --------------- #
# general casting
def _assert_array(args, dtype, scalar_check=False, casting="safe"):
if args and dtype:
if not scalar_check:
ivy.utils.assertions.check_all_or_any_fn(
*args,
fn=lambda x: np_frontend.can_cast(
x, ivy.as_ivy_dtype(dtype), casting=casting
),
type="all",
message=f"type of input is incompatible with dtype: {dtype}",
)
else:
assert_fn = None if casting == "safe" else ivy.exists
if ivy.is_bool_dtype(dtype):
assert_fn = ivy.is_bool_dtype
if ivy.is_int_dtype(dtype):
def assert_fn(x): # noqa F811
return not ivy.is_float_dtype(x)
if assert_fn:
ivy.utils.assertions.check_all_or_any_fn(
*args,
fn=lambda x: (
assert_fn(x)
if ivy.shape(x) == ()
else np_frontend.can_cast(
x, ivy.as_ivy_dtype(dtype), casting=casting
)
),
type="all",
message=f"type of input is incompatible with dtype: {dtype}",
)
# no casting
def _assert_no_array(args, dtype, scalar_check=False, none=False):
if args:
first_arg = args[0]
fn_func = ivy.as_ivy_dtype(dtype) if ivy.exists(dtype) else ivy.dtype(first_arg)
def assert_fn(x):
return ivy.dtype(x) == fn_func
if scalar_check:
def assert_fn(x): # noqa F811
return (
ivy.dtype(x) == fn_func
if ivy.shape(x) != ()
else _casting_no_special_case(ivy.dtype(x), fn_func, none)
)
ivy.utils.assertions.check_all_or_any_fn(
*args,
fn=assert_fn,
type="all",
message=f"type of input is incompatible with dtype: {dtype}",
)
def _assert_no_scalar(args, dtype, none=False):
if args:
first_arg = args[0]
ivy.utils.assertions.check_all_or_any_fn(
*args,
fn=lambda x: type(x) == type(first_arg), # noqa: E721
type="all",
message=f"type of input is incompatible with dtype: {dtype}",
)
if dtype:
if ivy.is_int_dtype(dtype):
check_dtype = int
elif ivy.is_float_dtype(dtype):
check_dtype = float
else:
check_dtype = bool
ivy.utils.assertions.check_equal(
type(args[0]),
check_dtype,
message=f"type of input is incompatible with dtype: {dtype}",
as_array=False,
)
if ivy.as_ivy_dtype(dtype) not in ["float64", "int8", "int64", "uint8"]:
if isinstance(args[0], int):
ivy.utils.assertions.check_elem_in_list(
dtype,
["int16", "int32", "uint16", "uint32", "uint64"],
inverse=True,
)
elif isinstance(args[0], float):
ivy.utils.assertions.check_equal(
dtype, "float32", inverse=True, as_array=False
)
def _assert_scalar(args, dtype):
if args and dtype:
assert_fn = None
if ivy.is_int_dtype(dtype):
def assert_fn(x): # noqa F811
return not isinstance(x, float)
elif ivy.is_bool_dtype(dtype):
def assert_fn(x):
return isinstance(x, bool)
if assert_fn:
ivy.utils.assertions.check_all_or_any_fn(
*args,
fn=assert_fn,
type="all",
message=f"type of input is incompatible with dtype: {dtype}",
)
def _casting_no_special_case(dtype1, dtype, none=False):
if dtype == "float16":
allowed_dtypes = ["float32", "float64"]
if not none:
allowed_dtypes += ["float16"]
return dtype1 in allowed_dtypes
if dtype in ["int8", "uint8"]:
if none:
return ivy.is_int_dtype(dtype1) and dtype1 not in ["int8", "uint8"]
return ivy.is_int_dtype(dtype1)
return dtype1 == dtype
def _check_C_order(x):
if isinstance(x, ivy.Array):
return True
elif isinstance(x, np_frontend.ndarray):
if x._f_contiguous:
return False
else:
return True
else:
return None
def _count_operands(subscript):
if "->" in subscript:
input_subscript, output_index = subscript.split("->")
else:
input_subscript = subscript
return len(input_subscript.split(","))
def _ivy_to_numpy(x: Any) -> Any:
if isinstance(x, ivy.Array) or ivy.is_native_array(x):
a = np_frontend.ndarray(x, _init_overload=True)
return a
else:
return x
def _ivy_to_numpy_order_F(x: Any) -> Any:
if isinstance(x, ivy.Array) or ivy.is_native_array(x):
a = np_frontend.ndarray(
0, order="F"
) # TODO Find better initialisation workaround
a.ivy_array = x
return a
else:
return x
def _native_to_ivy_array(x):
if isinstance(x, ivy.NativeArray):
return ivy.array(x)
return x
def _numpy_frontend_to_ivy(x: Any) -> Any:
if hasattr(x, "ivy_array"):
return x.ivy_array
else:
return x
def _set_order(args, order):
ivy.utils.assertions.check_elem_in_list(
order,
["C", "F", "A", "K", None],
message="order must be one of 'C', 'F', 'A', or 'K'",
)
if order in ["K", "A", None]:
check_order = ivy.nested_map(
_check_C_order, args, include_derived={"tuple": True}, shallow=False
)
if all(v is None for v in check_order) or any(
ivy.multi_index_nest(check_order, ivy.all_nested_indices(check_order))
):
order = "C"
else:
order = "F"
return order
def _to_ivy_array(x):
return _numpy_frontend_to_ivy(_native_to_ivy_array(x))
# --- Main --- #
# ------------ #
def from_zero_dim_arrays_to_scalar(fn: Callable) -> Callable:
@functools.wraps(fn)
def _from_zero_dim_arrays_to_scalar(*args, **kwargs):
"""Call the function, and then convert all 0 dimensional array
instances in the function to float numbers if out argument is not
provided.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with 0 dimensional arrays as float numbers.
"""
# call unmodified function
ret = fn(*args, **kwargs)
if ("out" in kwargs and kwargs["out"] is None) or "out" not in kwargs:
if isinstance(ret, tuple):
# converting every scalar element of the tuple to float
data = tuple(ivy.native_array(i) for i in ret)
data = ivy.copy_nest(data, to_mutable=True)
ret_idx = ivy.nested_argwhere(data, lambda x: x.shape == ())
try:
ivy.map_nest_at_indices(
data,
ret_idx,
lambda x: np_frontend.numpy_dtype_to_scalar[ivy.dtype(x)](x),
)
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
"Casting to specified type is unsupported"
) from e
return tuple(data)
else:
# converting the scalar to float
data = ivy.native_array(ret)
if data.shape == ():
try:
return np_frontend.numpy_dtype_to_scalar[ivy.dtype(data)](data)
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
f"Casting to {ivy.dtype(data)} is unsupported"
) from e
return ret
_from_zero_dim_arrays_to_scalar.from_zero_dim_arrays_to_scalar = True
return _from_zero_dim_arrays_to_scalar
def handle_numpy_casting(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_numpy_casting(*args, casting="same_kind", dtype=None, **kwargs):
"""Check numpy casting type.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, or raise IvyException if error is thrown.
"""
ivy.utils.assertions.check_elem_in_list(
casting,
["no", "equiv", "safe", "same_kind", "unsafe"],
message="casting must be one of [no, equiv, safe, same_kind, unsafe]",
)
args = list(args)
args_scalar_idxs = ivy.nested_argwhere(
args, lambda x: isinstance(x, (int, float, bool))
)
args_scalar_to_check = ivy.multi_index_nest(args, args_scalar_idxs)
args_idxs = ivy.nested_argwhere(args, ivy.is_array)
args_to_check = ivy.multi_index_nest(args, args_idxs)
if casting in ["no", "equiv"]:
none = not dtype
if none:
dtype = args_to_check[0].dtype if args_to_check else None
_assert_no_array(
args_to_check,
dtype,
scalar_check=(args_to_check and args_scalar_to_check),
none=none,
)
_assert_no_scalar(args_scalar_to_check, dtype, none=none)
elif casting in ["same_kind", "safe"]:
_assert_array(
args_to_check,
dtype,
scalar_check=(args_to_check and args_scalar_to_check),
casting=casting,
)
_assert_scalar(args_scalar_to_check, dtype)
if ivy.exists(dtype):
ivy.map_nest_at_indices(
args, args_idxs, lambda x: ivy.astype(x, ivy.as_ivy_dtype(dtype))
)
return fn(*args, **kwargs)
_handle_numpy_casting.handle_numpy_casting = True
return _handle_numpy_casting
def handle_numpy_casting_special(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_numpy_casting_special(*args, casting="same_kind", dtype=None, **kwargs):
"""Check numpy casting type for special cases where output must be type
bool.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, or raise IvyException if error is thrown.
"""
ivy.utils.assertions.check_elem_in_list(
casting,
["no", "equiv", "safe", "same_kind", "unsafe"],
message="casting must be one of [no, equiv, safe, same_kind, unsafe]",
)
if ivy.exists(dtype):
ivy.utils.assertions.check_equal(
ivy.as_ivy_dtype(dtype),
"bool",
message="output is compatible with bool only",
as_array=False,
)
return fn(*args, **kwargs)
_handle_numpy_casting_special.handle_numpy_casting_special = True
return _handle_numpy_casting_special
def handle_numpy_dtype(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_numpy_dtype(*args, dtype=None, **kwargs):
if len(args) > (dtype_pos + 1):
dtype = args[dtype_pos]
kwargs = {
**dict(
zip(
list(inspect.signature(fn).parameters.keys())[
dtype_pos + 1 : len(args)
],
args[dtype_pos + 1 :],
)
),
**kwargs,
}
args = args[:dtype_pos]
elif len(args) == (dtype_pos + 1):
dtype = args[dtype_pos]
args = args[:-1]
return fn(*args, dtype=np_frontend.to_ivy_dtype(dtype), **kwargs)
dtype_pos = list(inspect.signature(fn).parameters).index("dtype")
_handle_numpy_dtype.handle_numpy_dtype = True
return _handle_numpy_dtype
def handle_numpy_out(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_numpy_out(*args, **kwargs):
if "out" not in kwargs:
keys = list(inspect.signature(fn).parameters.keys())
if fn.__name__ == "einsum":
out_pos = 1 + _count_operands(args[0])
else:
out_pos = keys.index("out")
kwargs = {
**dict(
zip(
keys[keys.index("out") :],
args[out_pos:],
)
),
**kwargs,
}
args = args[:out_pos]
return fn(*args, **kwargs)
_handle_numpy_out.handle_numpy_out = True
return _handle_numpy_out
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_ivy_arrays_np(*args, **kwargs):
"""Convert all `ndarray` instances in both the positional and keyword
arguments into `ivy.Array` instances, and then call the function with
the updated arguments.
Parameters
----------
args
The arguments to be passed to the function.
kwargs
The keyword arguments to be passed to the function.
Returns
-------
The return of the function, with ivy arrays passed in the arguments.
"""
# convert all arrays in the inputs to ivy.Array instances
ivy_args = ivy.nested_map(_to_ivy_array, args, include_derived={"tuple": True})
ivy_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived={"tuple": True}
)
return fn(*ivy_args, **ivy_kwargs)
_inputs_to_ivy_arrays_np.inputs_to_ivy_arrays_numpy = True
return _inputs_to_ivy_arrays_np
def outputs_to_frontend_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _outputs_to_frontend_arrays(*args, order="K", **kwargs):
"""Call the function, and then convert all `ivy.Array` instances
returned by the function into `ndarray` instances.
Returns
-------
The return of the function, with ivy arrays as numpy arrays.
"""
# handle order and call unmodified function
# ToDo: Remove this default dtype setting
# once frontend specific backend setting is added
set_default_dtype = False
if not ("dtype" in kwargs and ivy.exists(kwargs["dtype"])) and any(
not (ivy.is_array(i) or hasattr(i, "ivy_array")) for i in args
):
if ivy.current_backend_str() == "jax":
import jax
jax.config.update("jax_enable_x64", True)
(
ivy.set_default_int_dtype("int64")
if platform.system() != "Windows"
else ivy.set_default_int_dtype("int32")
)
ivy.set_default_float_dtype("float64")
set_default_dtype = True
if contains_order:
if len(args) >= (order_pos + 1):
order = args[order_pos]
args = args[:-1]
order = _set_order(args, order)
try:
ret = fn(*args, order=order, **kwargs)
finally:
if set_default_dtype:
ivy.unset_default_int_dtype()
ivy.unset_default_float_dtype()
else:
try:
ret = fn(*args, **kwargs)
finally:
if set_default_dtype:
ivy.unset_default_int_dtype()
ivy.unset_default_float_dtype()
if not ivy.array_mode:
return ret
# convert all returned arrays to `ndarray` instances
if order == "F":
return ivy.nested_map(
_ivy_to_numpy_order_F, ret, include_derived={"tuple": True}
)
else:
return ivy.nested_map(_ivy_to_numpy, ret, include_derived={"tuple": True})
if "order" in list(inspect.signature(fn).parameters.keys()):
contains_order = True
order_pos = list(inspect.signature(fn).parameters).index("order")
else:
contains_order = False
_outputs_to_frontend_arrays.outputs_to_frontend_arrays_numpy = True
return _outputs_to_frontend_arrays
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
"""Wrap `fn` so it receives and returns `ivy.Array` instances.
Wrap `fn` so that input arrays are all converted to `ivy.Array` instances and
return arrays are all converted to `ndarray` instances.
"""
return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
| ivy/ivy/functional/frontends/numpy/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/func_wrapper.py",
"repo_id": "ivy",
"token_count": 9256
} | 34 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting_special,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting_special
@from_zero_dim_arrays_to_scalar
def _isfinite(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.isfinite(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting_special
@from_zero_dim_arrays_to_scalar
def _isinf(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.isinf(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting_special
@from_zero_dim_arrays_to_scalar
def _isnan(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.isnan(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
| ivy/ivy/functional/frontends/numpy/logic/array_type_testing.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/logic/array_type_testing.py",
"repo_id": "ivy",
"token_count": 750
} | 35 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
# atleast_1d
@to_ivy_arrays_and_back
def atleast_1d(
*arys,
):
return ivy.atleast_1d(*arys)
@to_ivy_arrays_and_back
def atleast_2d(*arys):
return ivy.atleast_2d(*arys)
@to_ivy_arrays_and_back
def atleast_3d(*arys):
return ivy.atleast_3d(*arys)
# broadcast_arrays
@to_ivy_arrays_and_back
def broadcast_arrays(*args):
return ivy.broadcast_arrays(*args)
# expand_dims
@to_ivy_arrays_and_back
def expand_dims(
a,
axis,
):
return ivy.expand_dims(a, axis=axis)
# squeeze
@to_ivy_arrays_and_back
def squeeze(
a,
axis=None,
):
return ivy.squeeze(a, axis=axis)
| ivy/ivy/functional/frontends/numpy/manipulation_routines/changing_number_of_dimensions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/changing_number_of_dimensions.py",
"repo_id": "ivy",
"token_count": 343
} | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.