text
stringlengths 17
362k
| id
stringlengths 13
115
| metadata
dict | __index_level_0__
int64 0
75
|
---|---|---|---|
import jax.numpy as jnp
from ivy.functional.backends.jax import JaxArray
# local
from ivy.func_wrapper import (
with_supported_device_and_dtypes,
)
from . import backend_version
def huber_loss(
input: JaxArray, target: JaxArray, /, *, delta: float = 1.0, reduction: str = "mean"
) -> JaxArray:
residual = jnp.abs(input - target)
quadratic_loss = 0.5 * (residual**2)
linear_loss = delta * residual - 0.5 * (delta**2)
loss = jnp.where(residual < delta, quadratic_loss, linear_loss)
if reduction == "mean":
loss = jnp.mean(loss)
elif reduction == "sum":
loss = jnp.sum(loss)
return loss
def smooth_l1_loss(
input: JaxArray,
target: JaxArray,
/,
*,
beta: float = 1.0,
reduction: str = "mean",
) -> JaxArray:
if beta < 1e-5:
loss = jnp.abs(input - target)
else:
diff = jnp.abs(input - target)
loss = jnp.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta)
if reduction == "mean":
return jnp.mean(loss)
elif reduction == "sum":
return jnp.sum(loss)
else:
return loss
def soft_margin_loss(
input: JaxArray,
target: JaxArray,
/,
*,
reduction: str = "mean",
) -> JaxArray:
loss = jnp.sum(jnp.log1p(jnp.exp(-input * target))) / jnp.size(input)
if reduction == "mean":
return jnp.mean(loss)
elif reduction == "sum":
return jnp.sum(loss)
else:
return loss
def _apply_loss_reduction(loss: JaxArray, reduction: str) -> JaxArray:
if reduction == "sum":
return jnp.sum(loss)
elif reduction == "mean":
return jnp.mean(loss)
else: # reduction == "none"
return loss
def _validate_poisson_nll_params(
input,
label,
epsilon,
reduction,
allowed_dtypes=["float16", "float32", "float64"],
):
# Validate dtypes
for parameter, name in zip([input, label], ["input", "label"]):
if parameter.dtype not in allowed_dtypes:
raise TypeError(
f"The dtype of '{name}' in poisson_nll_loss should be one of"
f" {allowed_dtypes}, but received {parameter.dtype}."
)
# Validate epsilon
if epsilon <= 0:
raise ValueError(
"The value of `epsilon` in poisson_nll_loss should be positive, but"
f" received {epsilon}, which is not allowed."
)
# Validate reduction
if reduction not in ["sum", "mean", "none"]:
raise ValueError(
"The value of 'reduction' in poisson_nll_loss should be 'sum', 'mean' or"
f" 'none', but received {reduction}, which is not allowed."
)
# Validate shape
if input.shape != label.shape:
raise ValueError(
f"The shape of 'input' ({input.shape}) must be the same as the shape of"
f" 'label' ({label.shape})."
)
return True
@with_supported_device_and_dtypes(
{
"0.4.18 and below": {
"cpu": ("float16", "float32", "float64"),
}
},
backend_version,
)
def poisson_nll_loss(
input: JaxArray,
target: JaxArray,
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> JaxArray:
input_arr = jnp.asarray(input, dtype=input.dtype)
target_arr = jnp.asarray(target, dtype=input.dtype)
# check params
_validate_poisson_nll_params(input_arr, target_arr, eps, reduction)
if log_input:
loss = jnp.exp(input_arr) - target_arr * input_arr
else:
loss = input_arr - target_arr * jnp.log(input_arr + eps)
if full:
point_five = jnp.array(0.5, dtype=target_arr.dtype)
two_pi = jnp.array(2 * jnp.pi, dtype=target_arr.dtype)
striling_approx_term = (
(target_arr * jnp.log(target_arr))
- target_arr
+ (point_five * jnp.log(two_pi * target_arr))
)
zeroes = jnp.zeros_like(target_arr, dtype=target_arr.dtype)
ones = jnp.ones_like(target_arr, dtype=target_arr.dtype)
cond = jnp.logical_and(target_arr >= zeroes, target_arr <= ones)
loss = loss + jnp.where(cond, zeroes, striling_approx_term)
return _apply_loss_reduction(loss, reduction)
@with_supported_device_and_dtypes(
{
"0.4.18 and below": {
"cpu": ("float32", "float64"),
}
},
backend_version,
)
def hinge_embedding_loss(
input: JaxArray,
target: JaxArray,
*,
margin: float = 1.0,
reduction: str = "mean",
) -> JaxArray:
zero_ = jnp.zeros([1], dtype=input.dtype)
relu_part = jnp.maximum(margin - input, 0)
loss = jnp.where(target == 1.0, input, zero_) + jnp.where(
target == -1.0, relu_part, zero_
)
return _apply_loss_reduction(loss, reduction)
| ivy/ivy/functional/backends/jax/experimental/losses.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/losses.py",
"repo_id": "ivy",
"token_count": 2211
} | 16 |
"""Collection of Jax random functions, wrapped to fit Ivy syntax and
signature."""
# global
import jax
import jax.numpy as jnp
import jaxlib.xla_extension
from typing import Optional, Union, Sequence
# local
import ivy
from ivy.functional.ivy.random import (
_check_bounds_and_get_shape,
_randint_check_dtype_and_bound,
_check_valid_scale,
)
from ivy.functional.backends.jax import JaxArray
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
# Extra #
# ------#
class RNGWrapper:
def __init__(self):
self.key = jax.random.PRNGKey(0)
RNG = RNGWrapper()
def _setRNG(key):
global RNG
RNG.key = key
def _getRNG():
global RNG
return RNG.key
def random_uniform(
*,
low: Union[float, JaxArray] = 0.0,
high: Union[float, JaxArray] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: jaxlib.xla_extension.Device = None,
dtype: jnp.dtype,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
shape = _check_bounds_and_get_shape(low, high, shape).shape
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
return jax.random.uniform(
rng_input, shape, minval=low, maxval=high, dtype=jnp.float32
).astype(dtype)
def random_normal(
*,
mean: Union[float, JaxArray] = 0.0,
std: Union[float, JaxArray] = 1.0,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: jaxlib.xla_extension.Device = None,
dtype: jnp.dtype,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
_check_valid_scale(std)
shape = _check_bounds_and_get_shape(mean, std, shape).shape
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
return jax.random.normal(rng_input, shape, dtype=dtype) * std + mean
@with_unsupported_dtypes({"0.4.24 and below": ("bfloat16",)}, backend_version)
def multinomial(
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
probs: Optional[JaxArray] = None,
replace: bool = True,
device: jaxlib.xla_extension.Device = None,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
if probs is None:
probs = (
jnp.ones(
(
batch_size,
population_size,
)
)
/ population_size
)
orig_probs_shape = list(probs.shape)
num_classes = orig_probs_shape[-1]
probs_flat = jnp.reshape(probs, (-1, orig_probs_shape[-1]))
probs_flat = probs_flat / jnp.sum(probs_flat, -1, keepdims=True, dtype="float64")
probs_stack = jnp.split(probs_flat, probs_flat.shape[0])
samples_stack = [
jax.random.choice(rng_input, num_classes, (num_samples,), replace, p=prob[0])
for prob in probs_stack
]
samples_flat = jnp.stack(samples_stack)
return jnp.reshape(samples_flat, orig_probs_shape[:-1] + [num_samples])
def randint(
low: Union[int, JaxArray],
high: Union[int, JaxArray],
/,
*,
shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
device: jaxlib.xla_extension.Device = None,
dtype: Optional[Union[jnp.dtype, ivy.Dtype]] = None,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if not dtype:
dtype = ivy.default_int_dtype()
dtype = ivy.as_native_dtype(dtype)
_randint_check_dtype_and_bound(low, high, dtype)
shape = _check_bounds_and_get_shape(low, high, shape).shape
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
return jax.random.randint(rng_input, shape, low, high, dtype)
def seed(*, seed_value: int = 0) -> None:
_setRNG(jax.random.PRNGKey(seed_value))
return
def shuffle(
x: JaxArray,
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[JaxArray] = None,
) -> JaxArray:
if x.shape == ():
return x
if seed:
rng_input = jax.random.PRNGKey(seed)
else:
RNG_, rng_input = jax.random.split(_getRNG())
_setRNG(RNG_)
# jax.random.shuffle is deprecated; identical behaviour reproduced with
# jax.random.permutation
return jax.random.permutation(key=rng_input, x=x, axis=axis, independent=True)
| ivy/ivy/functional/backends/jax/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/random.py",
"repo_id": "ivy",
"token_count": 2216
} | 17 |
from typing import Union, Optional, Tuple
import mxnet as mx
import numpy as np
from ivy.utils.exceptions import IvyNotImplementedException
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def kaiser_bessel_derived_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def vorbis_window(
window_length: Union[(None, mx.ndarray.NDArray)],
*,
dtype: None = np.float32,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def hann_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def tril_indices(
n_rows: int, n_cols: Optional[int] = None, k: int = 0, /, *, device: str
) -> Tuple[(Union[(None, mx.ndarray.NDArray)], ...)]:
raise IvyNotImplementedException()
def blackman_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[None] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/creation.py",
"repo_id": "ivy",
"token_count": 683
} | 18 |
def if_else(cond, body_fn, orelse_fn, vars):
# back-compatibility
if isinstance(cond, bool):
v = cond
def cond(*_):
return v
cond = cond(**vars)
if cond:
return body_fn(**vars)
else:
return orelse_fn(**vars)
def while_loop(test_fn, body_fn, vars):
result = vars
if isinstance(vars, dict):
result = list(vars.values())
while test_fn(*result):
result = body_fn(*result)
if not isinstance(result, tuple):
result = (result,)
return result
| ivy/ivy/functional/backends/numpy/control_flow_ops.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/control_flow_ops.py",
"repo_id": "ivy",
"token_count": 264
} | 19 |
import numpy as np
from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_device_and_dtypes,
)
from . import backend_version
@with_unsupported_dtypes({"1.26.3 and below": ("bool",)}, backend_version)
@_scalar_output_to_0d_array
def huber_loss(
input: np.ndarray,
target: np.ndarray,
/,
*,
delta: float = 1.0,
reduction: str = "mean",
) -> np.ndarray:
abs_diff = np.abs(input - target)
quadratic_loss = 0.5 * (abs_diff**2)
linear_loss = delta * (abs_diff - 0.5 * delta)
loss = np.where(abs_diff <= delta, quadratic_loss, linear_loss)
if reduction == "sum":
return np.sum(loss)
elif reduction == "mean":
return np.mean(loss)
else:
return loss
# Implementation of smooth_l1_loss in the given format
@with_unsupported_dtypes({"1.26.3 and below": ("bool",)}, backend_version)
@_scalar_output_to_0d_array
def smooth_l1_loss(
input: np.ndarray,
target: np.ndarray,
/,
*,
beta: float = 1.0,
reduction: str = "mean",
) -> np.ndarray:
if beta < 1e-5:
loss = np.abs(input - target)
else:
diff = np.abs(input - target)
loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta)
if reduction == "mean":
return np.mean(loss)
elif reduction == "sum":
return np.sum(loss)
else:
return loss
@with_unsupported_dtypes({"1.26.3 and below": ("bool",)}, backend_version)
@_scalar_output_to_0d_array
def soft_margin_loss(
input: np.ndarray,
target: np.ndarray,
/,
*,
reduction: str = "mean",
) -> np.ndarray:
loss = np.sum(np.log1p(np.exp(-input * target))) / input.size
if reduction == "mean":
return np.mean(loss)
elif reduction == "sum":
return np.sum(loss)
else:
return loss
def _apply_loss_reduction(loss: np.ndarray, reduction: str) -> np.ndarray:
if reduction == "sum":
return np.sum(loss)
elif reduction == "mean":
return np.mean(loss)
else: # reduction == "none"
return loss
def _validate_poisson_nll_params(
input,
label,
epsilon,
reduction,
allowed_dtypes=["float16", "float32", "float64"],
):
# Validate dtypes
for parameter, name in zip([input, label], ["input", "label"]):
if parameter.dtype not in allowed_dtypes:
raise TypeError(
f"The dtype of '{name}' in poisson_nll_loss should be one of"
f" {allowed_dtypes}, but received {parameter.dtype}."
)
# Validate epsilon
if epsilon <= 0:
raise ValueError(
"The value of `epsilon` in poisson_nll_loss should be positive, but"
f" received {epsilon}, which is not allowed."
)
# Validate reduction
if reduction not in ["sum", "mean", "none"]:
raise ValueError(
"The value of 'reduction' in poisson_nll_loss should be 'sum', 'mean' or"
f" 'none', but received {reduction}, which is not allowed."
)
# Validate shape
if input.shape != label.shape:
raise ValueError(
f"The shape of 'input' ({input.shape}) must be the same as the shape of"
f" 'label' ({label.shape})."
)
return True
@with_supported_device_and_dtypes(
{
"1.26.0 and below": {
"cpu": ("float16", "float32", "float64"),
}
},
backend_version,
)
@_scalar_output_to_0d_array
def poisson_nll_loss(
input: np.ndarray,
target: np.ndarray,
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> np.ndarray:
input_arr = np.asarray(input)
target_arr = np.asarray(target, dtype=input.dtype)
_validate_poisson_nll_params(input_arr, target_arr, eps, reduction)
if log_input:
loss = np.exp(input_arr) - target_arr * input_arr
else:
loss = input_arr - target_arr * np.log(input_arr + eps)
if full:
point_five = np.array(0.5, dtype=target_arr.dtype)
two_pi = np.array(2 * np.pi, dtype=target_arr.dtype)
striling_approx_term = (
(target_arr * np.log(target_arr))
- target_arr
+ (point_five * np.log(two_pi * target_arr))
)
zeroes = np.zeros_like(target_arr, dtype=target_arr.dtype)
ones = np.ones_like(target_arr, dtype=target_arr.dtype)
cond = np.logical_and(target_arr >= zeroes, target_arr <= ones)
loss = loss + np.where(cond, zeroes, striling_approx_term)
return _apply_loss_reduction(loss, reduction)
@with_supported_device_and_dtypes(
{
"1.26.0 and below": {
"cpu": ("float32", "float64"),
}
},
backend_version,
)
def hinge_embedding_loss(
input: np.ndarray,
target: np.ndarray,
*,
margin: float = 1.0,
reduction: str = "mean",
) -> np.ndarray:
zero_ = np.zeros([1], dtype=input.dtype)
relu_part = np.maximum(margin - input, 0)
loss = np.where(target == 1.0, input, zero_) + np.where(
target == -1.0, relu_part, zero_
)
return _apply_loss_reduction(loss, reduction)
| ivy/ivy/functional/backends/numpy/experimental/losses.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/losses.py",
"repo_id": "ivy",
"token_count": 2380
} | 20 |
# global
from typing import Optional, Union, Literal
import paddle
import paddle.nn.functional as F
# local
import ivy.functional.backends.paddle as paddle_backend
from ivy.func_wrapper import (
with_unsupported_device_and_dtypes,
with_supported_dtypes,
with_supported_device_and_dtypes,
)
from . import backend_version
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "bfloat16")}}, backend_version
)
def logit(
x: paddle.Tensor,
/,
*,
eps: Optional[float] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out=None,
):
if x.dtype in [paddle.float32, paddle.float64]:
return paddle.logit(x, eps)
if eps is None:
nan = paddle_backend.squeeze(
paddle.to_tensor(float("nan"), dtype=x.dtype), axis=-1
)
x = paddle_backend.where(
paddle_backend.logical_or(
paddle_backend.greater(x, 1), paddle_backend.less(x, 0)
),
nan,
x,
)
else:
x = paddle_backend.minimum(paddle_backend.maximum(x, eps), 1 - eps)
return paddle_backend.log(
paddle_backend.divide(x, paddle_backend.subtract(1, x))
).cast(x.dtype)
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, backend_version)
def thresholded_relu(
x: paddle.Tensor,
/,
*,
threshold: Optional[Union[int, float]] = 0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return F.thresholded_relu(x, threshold=threshold)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64")}, backend_version
)
def relu6(
x: paddle.Tensor, /, *, complex_mode="jax", out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(x):
if x.real > 0 and x.real <= 6:
return x.astype(x.dtype)
else:
return paddle_backend.zeros_like(x).astype(x.dtype)
return F.relu6(x)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64")}, backend_version
)
def logsigmoid(
input: paddle.Tensor, /, *, complex_mode="jax", out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(input):
return paddle_backend.log(
paddle_backend.divide(
1.0, (paddle_backend.add(1.0, paddle_backend.exp(-input)))
)
)
return F.log_sigmoid(input)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64")}, backend_version
)
def selu(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
ret = paddle_backend.multiply(
scale,
paddle_backend.where(
paddle_backend.greater(x, 0),
x,
paddle_backend.multiply(alpha, paddle_backend.expm1(x)),
),
)
return ret
return F.selu(x)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64")}, backend_version
)
def silu(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if paddle.is_complex(x):
return x * (1.0 / (1.0 + paddle_backend.exp(-x)))
return F.silu(x)
@with_supported_dtypes(
{"2.6.0 and below": ("complex", "float32", "float64")}, backend_version
)
def elu(
x: paddle.Tensor, /, *, alpha: float = 1.0, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if paddle.is_complex(x):
ret = (
paddle_backend.where(
paddle_backend.greater(x, 0),
x,
paddle_backend.multiply(alpha, paddle_backend.expm1(x)),
),
)
return ret
return F.elu(x, alpha=alpha)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16")}}, backend_version
)
def hardtanh(
x: paddle.Tensor,
/,
*,
max_val: float = 1.0,
min_val: float = -1.0,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x.dtype in [paddle.float32, paddle.float64]:
return F.hardtanh(x, min=min_val, max=max_val)
if paddle.is_complex(x):
ret = (
paddle_backend.where(
paddle_backend.greater(x, max_val),
max_val,
paddle_backend.where(paddle_backend.less(x, min_val), min_val, x),
),
)
return ret
return F.hardtanh(x.cast("float32"), min=min_val, max=max_val).cast(x.dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16")}}, backend_version
)
def tanhshrink(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if x.dtype in [paddle.float32, paddle.float64]:
return F.tanhshrink(x)
if paddle.is_complex(x):
return paddle.complex(F.tanhshrink(x.real()), F.tanhshrink(x.imag()))
return F.tanhshrink(x.cast("float32")).cast(x.dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16")}}, backend_version
)
def threshold(
x: paddle.Tensor,
/,
*,
threshold: float,
value: float,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if x.dtype in [paddle.float32, paddle.float64]:
return paddle_backend.where(paddle_backend.greater(x, threshold), x, value)
if paddle.is_complex(x):
return paddle_backend.where(paddle_backend.greater(x, threshold), x, value)
x = x.cast("float32")
return paddle_backend.where(paddle_backend.greater(x, threshold), x, value).cast(
x.dtype
)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16")}}, backend_version
)
def softshrink(
x: paddle.Tensor, /, *, lambd: float = 0.5, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if x.dtype in [paddle.float32, paddle.float64]:
return F.softshrink(x, threshold=lambd)
if paddle.is_complex(x):
return paddle.complex(
F.softshrink(x.real(), threshold=lambd),
F.softshrink(x.img(), threshold=lambd),
)
return F.softshrink(x.cast("float32"), threshold=lambd).cast(x.dtype)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("bfloat16", "float16")}}, backend_version
)
def celu(
x: paddle.Tensor,
/,
*,
alpha: float = 1.0,
complex_mode="jax",
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return F.celu(x, alpha=alpha)
@with_supported_device_and_dtypes(
{
"2.6.0 and below": {
"cpu": ("float32", "float64"),
"gpu": ("uint16", "float16", "float32", "float64"),
}
},
backend_version,
)
def scaled_tanh(
x: paddle.Tensor,
/,
*,
alpha: float = 1.7159,
beta: float = 0.67,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
return paddle.stanh(x, scale_a=beta, scale_b=alpha)
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "bfloat16")}},
backend_version,
)
def hardshrink(
x: paddle.Tensor, /, *, lambd: float = 0.5, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if x.dtype in [paddle.float32, paddle.float64]:
return F.hardshrink(x, threshold=lambd)
if paddle.is_complex(x):
return paddle.complex(
F.hardshrink(x.real(), threshold=lambd),
F.hardshrink(x.img(), threshold=lambd),
)
return F.hardshrink(x.cast("float32"), threshold=lambd).cast(x.dtype)
@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, backend_version)
def hardsilu(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
return F.hardswish(x)
| ivy/ivy/functional/backends/paddle/experimental/activations.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/experimental/activations.py",
"repo_id": "ivy",
"token_count": 3668
} | 21 |
import os
from ivy.utils.backend.sub_backend_handler import find_available_sub_backends
sub_backends_loc = __file__.rpartition(os.path.sep)[0]
available_sub_backends = find_available_sub_backends(sub_backends_loc)
current_sub_backends = []
| ivy/ivy/functional/backends/paddle/sub_backends/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/sub_backends/__init__.py",
"repo_id": "ivy",
"token_count": 88
} | 22 |
# global
import tensorflow as tf
from typing import Callable
# local
import ivy
from ivy.func_wrapper import inputs_to_native_arrays
from ivy.functional.ivy.gradients import _get_required_float_variables
from ivy.functional.ivy.gradients import (
_flatten_containers,
_rebuild_flattened_containers,
)
def bind_custom_gradient_function(func, custom_grad_fn):
@tf.custom_gradient
def custom_module(x):
x, _, _, _, _ = _get_required_float_variables(x, xs_grad_idxs=None)
ret = func(x)
def grad(upstream):
return custom_grad_fn((x, ret), upstream)
return ivy.to_native((ret, grad), nested=True, include_derived=True)
return inputs_to_native_arrays(custom_module)
def vjp(func: Callable, *primals):
flattened_primals, ret_idxs = _flatten_containers(primals)
native_flattened_primals = ivy.to_native(flattened_primals, nested=True)
def grad_fn(*x_in):
return _flatten_containers(
ivy.to_native(
func(
*ivy.to_ivy(
_rebuild_flattened_containers(x_in, ret_idxs), nested=True
)
),
nested=True,
include_derived=True,
)
)
with tf.GradientTape(persistent=True, watch_accessed_variables=False) as tape:
tape.watch(native_flattened_primals)
flat_primals_out, func_ret_idxs = grad_fn(*native_flattened_primals)
primals_out = _rebuild_flattened_containers(flat_primals_out, func_ret_idxs)
def vjpfun(x_in):
grads = tape.gradient(
flat_primals_out,
native_flattened_primals,
output_gradients=ivy.to_native(_flatten_containers(x_in)[0], nested=True),
)
return _rebuild_flattened_containers(
ivy.to_ivy(grads, nested=True, include_derived=True), ret_idxs
)
return (ivy.to_ivy(primals_out, nested=True, include_derived=True), vjpfun)
def jvp(func: Callable, primals, tangents):
flattened_primals, ret_idxs = _flatten_containers(primals)
flattened_tangents, _ = _flatten_containers(tangents)
def grad_fn(*x_in):
return _flatten_containers(
ivy.to_native(
func(
*ivy.to_ivy(
_rebuild_flattened_containers(x_in, ret_idxs), nested=True
)
),
nested=True,
include_derived=True,
)
)
flattened_primals = ivy.to_native(flattened_primals, nested=True)
flattened_tangents = ivy.to_native(flattened_tangents, nested=True)
with tf.autodiff.ForwardAccumulator(
flattened_primals,
flattened_tangents,
) as acc:
flat_primals_out, func_ret_idxs = grad_fn(*flattened_primals)
tangents_out = acc.jvp(flat_primals_out)
return ivy.to_ivy(
(
_rebuild_flattened_containers(flat_primals_out, func_ret_idxs),
_rebuild_flattened_containers(tangents_out, func_ret_idxs),
),
nested=True,
include_derived=True,
)
| ivy/ivy/functional/backends/tensorflow/experimental/gradients.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/gradients.py",
"repo_id": "ivy",
"token_count": 1540
} | 23 |
# global
from typing import Union, Optional, Tuple, Literal, List, NamedTuple, Sequence
from collections import namedtuple
import tensorflow as tf
from tensorflow.python.framework.dtypes import DType
# local
import ivy
from ivy import inf
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from . import backend_version
# Array API Standard #
# -------------------#
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def cholesky(
x: Union[tf.Tensor, tf.Variable],
/,
*,
upper: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not upper:
ret = tf.linalg.cholesky(x)
else:
axes = list(range(len(x.shape) - 2)) + [len(x.shape) - 1, len(x.shape) - 2]
ret = tf.transpose(tf.linalg.cholesky(tf.transpose(x, perm=axes)), perm=axes)
return ret
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"complex",
"float16",
)
},
backend_version,
)
def cross(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
axisa: int = -1,
axisb: int = -1,
axisc: int = -1,
axis: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.cross(
x1, x2, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis
)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def det(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.linalg.det(x)
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def diagonal(
x: Union[tf.Tensor, tf.Variable],
/,
*,
offset: int = 0,
axis1: int = -2,
axis2: int = -1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.diagonal(x, offset, axis1=axis1, axis2=axis2)
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def eig(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[Union[tf.Tensor, tf.Variable]]:
result_tuple = NamedTuple(
"eig",
[
("eigenvalues", Union[tf.Tensor, tf.Variable]),
("eigenvectors", Union[tf.Tensor, tf.Variable]),
],
)
eigenvalues, eigenvectors = tf.linalg.eig(x)
return result_tuple(eigenvalues, eigenvectors)
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def eigh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
UPLO: str = "L",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Tuple[Union[tf.Tensor, tf.Variable]]:
if UPLO not in ("L", "U"):
raise ValueError("UPLO argument must be 'L' or 'U'")
result_tuple = NamedTuple(
"eigh",
[
("eigenvalues", Union[tf.Tensor, tf.Variable]),
("eigenvectors", Union[tf.Tensor, tf.Variable]),
],
)
if UPLO == "L":
eigenvalues, eigenvectors = tf.linalg.eigh(x)
elif UPLO == "U":
axes = list(range(len(x.shape) - 2)) + [len(x.shape) - 1, len(x.shape) - 2]
eigenvalues, eigenvectors = tf.linalg.eigh(tf.transpose(x, perm=axes))
return result_tuple(eigenvalues, eigenvectors)
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def eigvalsh(
x: Union[tf.Tensor, tf.Variable],
/,
*,
UPLO: str = "L",
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if UPLO not in ("L", "U"):
raise ValueError("UPLO argument must be 'L' or 'U'")
if UPLO == "L":
return tf.linalg.eigh(x)[0]
elif UPLO == "U":
axes = list(range(len(x.shape) - 2)) + [len(x.shape) - 1, len(x.shape) - 2]
ret = tf.linalg.eigh(tf.transpose(x, perm=axes))[0]
return ret
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"int8",
"uint8",
"int16",
"uint16",
"uint32",
"uint64",
"complex",
)
},
backend_version,
)
# noinspection PyUnusedLocal,PyShadowingBuiltins
def inner(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.inner(x1, x2)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
)
},
backend_version,
)
def inv(
x: Union[tf.Tensor, tf.Variable],
/,
*,
adjoint: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.linalg.inv(x, adjoint=adjoint)
@with_unsupported_dtypes(
{"2.15.0 and below": ("float16", "bfloat16", "bool")}, backend_version
)
def matmul(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
transpose_a: bool = False,
transpose_b: bool = False,
adjoint_a: bool = False,
adjoint_b: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
dtype_from = tf.as_dtype(x1.dtype)
if transpose_a:
x1 = tf.linalg.matrix_transpose(x1)
if transpose_b:
x2 = tf.linalg.matrix_transpose(x2)
if adjoint_a:
x1 = tf.linalg.adjoint(x1)
if adjoint_b:
x2 = tf.linalg.adjoint(x2)
if dtype_from.is_unsigned or dtype_from == tf.int8 or dtype_from == tf.int16:
x1 = tf.cast(x1, tf.int64)
x2 = tf.cast(x2, tf.int64)
if x1.dtype != x2.dtype:
x1 = tf.cast(x1, dtype_from)
x2 = tf.cast(x2, dtype_from)
if (
x1.shape == ()
or x2.shape == ()
or (len(x1.shape) == len(x2.shape) == 1 and x1.shape != x2.shape)
or (len(x1.shape) == len(x2.shape) == 1 and x1.shape != x2.shape)
or (len(x1.shape) == 1 and len(x2.shape) >= 2 and x1.shape[0] != x2.shape[-2])
or (len(x2.shape) == 1 and len(x1.shape) >= 2 and x2.shape[0] != x1.shape[-1])
or (len(x1.shape) >= 2 and len(x2.shape) >= 2 and x1.shape[-1] != x2.shape[-2])
):
raise ivy.utils.exceptions.IvyException("Error,shapes not compatible")
x1_padded = False
x1_padded_2 = False
x2_padded = False
if len(x1.shape) == len(x2.shape) == 1:
if x1.shape == 0:
ret = tf.constant(0)
else:
ret = tf.reduce_sum(tf.math.multiply(x1, x2))
ret = tf.cast(ret, dtype=dtype_from) # return ret
else:
if len(x1.shape) == 1:
if len(x2.shape) == 2:
x1_padded_2 = True
elif len(x2.shape) > 2:
x1_padded = True
x1 = tf.expand_dims(x1, axis=0)
elif len(x2.shape) == 1 and len(x1.shape) >= 2:
x2 = tf.expand_dims(x2, axis=1)
x2_padded = True
ret = tf.matmul(x1, x2)
ret = ivy.to_native(ivy.astype(ret, dtype_from, copy=False))
if x1_padded_2:
ret = ret[0]
elif x1_padded:
ret = tf.squeeze(ret, axis=-2)
elif x2_padded:
ret = tf.squeeze(ret, axis=-1)
return ret
@with_supported_dtypes(
{"2.15.0 and below": ("float32", "float64", "complex")}, backend_version
)
def matrix_norm(
x: Union[tf.Tensor, tf.Variable],
/,
*,
ord: Union[int, float, Literal[inf, -inf, "fro", "nuc"]] = "fro",
axis: Tuple[int, int] = (-2, -1),
keepdims: bool = False,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if dtype is not None:
x = ivy.astype(x, dtype)
if ord == "nuc":
x = tf.experimental.numpy.moveaxis(x, axis, (-2, -1))
ret = tf.reduce_sum(
tf.linalg.svd(x, compute_uv=False),
axis=-1,
)
elif ord == -1:
ret = tf.reduce_min(
tf.reduce_sum(tf.abs(x), axis=axis[0], keepdims=True),
axis=axis,
keepdims=keepdims,
)
elif ord == -2:
x = tf.experimental.numpy.moveaxis(x, axis, (-2, -1))
ret = tf.reduce_min(
tf.linalg.svd(x, compute_uv=False),
axis=-1,
)
elif ord == float("-inf"):
ret = tf.reduce_min(
tf.reduce_sum(tf.abs(x), axis=axis[1], keepdims=True),
axis=axis,
keepdims=keepdims,
)
else:
ret = tf.norm(x, ord=ord, axis=axis, keepdims=keepdims)
ret = tf.cast(ret, ret.dtype.real_dtype)
if keepdims and ord in [-2, "nuc"]:
for dim in axis:
ret = tf.expand_dims(ret, dim % tf.rank(x))
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def matrix_power(
x: Union[tf.Tensor, tf.Variable],
n: int,
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if n == 0:
return tf.broadcast_to(tf.eye(x.shape[-2], dtype=x.dtype), x.shape)
elif n < 0:
x = tf.linalg.inv(x)
n = abs(n)
if n == 1:
return x
elif n == 2:
return x @ x
elif n == 3:
return (x @ x) @ x
z = result = None
while n > 0:
z = x if z is None else (z @ z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else (result @ z)
# replace any -0 with 0
result = tf.where(tf.equal(result, -0), tf.zeros_like(result), result)
return result
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "complex")},
backend_version,
)
# noinspection PyPep8Naming
def matrix_rank(
x: Union[tf.Tensor, tf.Variable],
/,
*,
atol: Optional[Union[float, Tuple[float]]] = None,
rtol: Optional[Union[float, Tuple[float]]] = None,
hermitian: Optional[bool] = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if (tf.rank(x) < 2) or (0 in x.shape):
return tf.convert_to_tensor(0, dtype=tf.int64)
# we don't use the native matrix_rank function because the behaviour of the
# tolerance argument is difficult to unify,
# and the native implementation is compositional
if hermitian:
svd_values = tf.abs(tf.linalg.eigh(x)[0])
else:
svd_values = tf.linalg.svd(x, compute_uv=False)
sigma = tf.reduce_max(svd_values, axis=-1, keepdims=False)
atol = (
atol
if atol is not None
else tf.experimental.numpy.finfo(x.dtype).eps * max(x.shape[-2:]) * sigma
)
rtol = rtol if rtol is not None else 0.0
tol = tf.maximum(atol, rtol * sigma)
# make sure it's broadcastable again with svd_values
tol = tf.expand_dims(tol, axis=-1)
ret = tf.math.count_nonzero(svd_values > tol, axis=-1)
return ret
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
)
},
backend_version,
)
def matrix_transpose(
x: Union[tf.Tensor, tf.Variable],
/,
*,
conjugate: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if conjugate:
x = tf.math.conj(x)
return tf.linalg.matrix_transpose(x)
# noinspection PyUnusedLocal,PyShadowingBuiltins
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def outer(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.outer(x1, x2)
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "complex")},
backend_version,
)
def pinv(
x: Union[tf.Tensor, tf.Variable],
/,
*,
rtol: Optional[Union[float, Tuple[float]]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if rtol is None:
ret = tf.linalg.pinv(x)
else:
x, rtol = ivy.promote_types_of_inputs(x, rtol)
ret = tf.linalg.pinv(x, rtol)
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def qr(
x: Union[tf.Tensor, tf.Variable],
/,
*,
mode: str = "reduced",
out: Optional[
Tuple[Union[tf.Tensor, tf.Variable], Union[tf.Tensor, tf.Variable]]
] = None,
) -> Tuple[Union[tf.Tensor, tf.Variable], Union[tf.Tensor, tf.Variable]]:
res = namedtuple("qr", ["Q", "R"])
if mode == "reduced":
q, r = tf.linalg.qr(x, full_matrices=False)
ret = res(q, r)
elif mode == "complete":
q, r = tf.linalg.qr(x, full_matrices=True)
ret = res(q, r)
else:
raise ivy.utils.exceptions.IvyException(
"Only 'reduced' and 'complete' qr modes are allowed "
"for the tensorflow backend."
)
return ret
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def slogdet(
x: Union[tf.Tensor, tf.Variable],
/,
) -> Tuple[Union[tf.Tensor, tf.Variable], Union[tf.Tensor, tf.Variable]]:
results = NamedTuple("slogdet", [("sign", tf.Tensor), ("logabsdet", tf.Tensor)])
sign, logabsdet = tf.linalg.slogdet(x)
return results(sign, logabsdet)
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "complex")},
backend_version,
)
def solve(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
adjoint: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if adjoint:
x1 = tf.linalg.adjoint(x1)
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
expanded_last = False
if len(x2.shape) <= 1:
if x2.shape[-1] == x1.shape[-1]:
expanded_last = True
x2 = tf.expand_dims(x2, axis=1)
output_shape = tuple(tf.broadcast_static_shape(x1.shape[:-2], x2.shape[:-2]))
# in case any of the input arrays are empty
is_empty_x1 = tf.equal(tf.size(x1), 0)
is_empty_x2 = tf.equal(tf.size(x2), 0)
if is_empty_x1 or is_empty_x2:
for i in range(len(x1.shape) - 2):
x2 = tf.expand_dims(x2, axis=0)
output_shape = list(output_shape)
output_shape.append(x2.shape[-2])
output_shape.append(x2.shape[-1])
ret = tf.constant([])
ret = tf.reshape(ret, output_shape)
else:
x1 = tf.broadcast_to(x1, output_shape + x1.shape[-2:])
x2 = tf.broadcast_to(x2, output_shape + x2.shape[-2:])
ret = tf.linalg.solve(x1, x2)
if expanded_last:
ret = tf.squeeze(ret, axis=-1)
return ret
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "complex")},
backend_version,
)
def svd(
x: Union[tf.Tensor, tf.Variable],
/,
*,
full_matrices: bool = True,
compute_uv: bool = True,
) -> Union[Union[tf.Tensor, tf.Variable], Tuple[Union[tf.Tensor, tf.Variable], ...]]:
if compute_uv:
results = namedtuple("svd", "U S Vh")
batch_shape = tf.shape(x)[:-2]
num_batch_dims = len(batch_shape)
transpose_dims = list(range(num_batch_dims)) + [
num_batch_dims + 1,
num_batch_dims,
]
D, U, V = tf.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
VT = tf.transpose(V, transpose_dims)
return results(U, D, VT)
else:
results = namedtuple("svd", "S")
D = tf.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
return results(D)
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, backend_version)
def svdvals(
x: Union[tf.Tensor, tf.Variable],
/,
*,
driver: Optional[str] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
# TODO: handling the driver argument
ret = tf.linalg.svd(x, compute_uv=False)
return ret
@with_supported_dtypes({"2.15.0 and below": ("float32",)}, backend_version)
def tensordot(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
axes: Union[int, Tuple[List[int], List[int]]] = 2,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(ivy.promote_types(x1.dtype, x2.dtype))
ret = tf.cast(tf.tensordot(x1, x2, axes=axes), dtype)
return ret
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "complex")},
backend_version,
)
def trace(
x: Union[tf.Tensor, tf.Variable],
/,
*,
offset: int = 0,
axis1: int = 0,
axis2: int = 1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if not isinstance(x, tf.Variable):
if len(x) == 0:
return ivy.array([])
return tf.experimental.numpy.trace(x, offset=offset, axis1=axis1, axis2=axis2)
@with_unsupported_dtypes(
{"2.15.0 and below": ("int16", "int8", "bool", "unsigned")}, backend_version
)
def vecdot(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
axis: int = -1,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(ivy.promote_types(x1.dtype, x2.dtype))
return tf.cast(tf.tensordot(x1, x2, axes=(axis, axis)), dtype)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"bfloat16",
"integer",
)
},
backend_version,
)
def vector_norm(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2,
dtype: Optional[DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if dtype and x.dtype != dtype:
x = tf.cast(x, dtype)
abs_x = tf.abs(x)
if ord == 0:
return tf.reduce_sum(tf.cast(x != 0, abs_x.dtype), axis=axis, keepdims=keepdims)
elif ord == inf:
return tf.reduce_max(abs_x, axis=axis, keepdims=keepdims)
elif ord == -inf:
return tf.reduce_min(abs_x, axis=axis, keepdims=keepdims)
else:
return tf.reduce_sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
# Extra #
# ----- #
@with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version)
def diag(
x: Union[tf.Tensor, tf.Variable],
/,
*,
k: int = 0,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.diag(x, k=k)
@with_unsupported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "complex", "unsigned")},
backend_version,
)
def vander(
x: Union[tf.Tensor, tf.Variable],
/,
*,
N: Optional[int] = None,
increasing: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.experimental.numpy.vander(x, N=N, increasing=increasing)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float64",
"complex",
)
},
backend_version,
)
def vector_to_skew_symmetric_matrix(
vector: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = tf.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = tf.zeros(batch_shape + [1, 1], dtype=vector.dtype)
# BS x 1 x 3
row1 = tf.concat((zs, -a3s, a2s), -1)
row2 = tf.concat((a3s, zs, -a1s), -1)
row3 = tf.concat((-a2s, a1s, zs), -1)
# BS x 3 x 3
ret = tf.concat((row1, row2, row3), -2)
return ret
| ivy/ivy/functional/backends/tensorflow/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/linear_algebra.py",
"repo_id": "ivy",
"token_count": 10315
} | 24 |
def if_else(cond, body_fn, orelse_fn, vars):
# back-compatibility
if isinstance(cond, bool):
v = cond
def cond(*_):
return v
if callable(cond):
cond = cond(**vars)
else:
cond = bool(cond)
if cond:
return body_fn(**vars)
else:
return orelse_fn(**vars)
def while_loop(test_fn, body_fn, vars):
if isinstance(vars, dict):
result = list(vars.values())
else:
result = list(vars)
while test_fn(*result) is True:
result = body_fn(*result)
if not isinstance(result, tuple):
result = (result,)
return result
| ivy/ivy/functional/backends/torch/control_flow_ops.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/control_flow_ops.py",
"repo_id": "ivy",
"token_count": 314
} | 25 |
from typing import Optional
import torch
from ivy.func_wrapper import (
with_unsupported_dtypes,
with_supported_device_and_dtypes,
with_supported_dtypes,
)
from . import backend_version
# Assuming ivy and backend_version are imported and defined properly
@with_unsupported_dtypes(
{"2.2 and below": ("unit8", "int8", "int16", "int32", "int64", "bool")},
backend_version,
)
def l1_loss(
input: torch.Tensor,
target: torch.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> torch.Tensor:
return torch.nn.functional.l1_loss(
input,
target,
reduction=reduction,
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"complex",
"uint8",
"int8",
"int16",
"int32",
"int64",
"bool",
)
},
backend_version,
)
def smooth_l1_loss(
input: torch.Tensor,
target: torch.Tensor,
/,
*,
beta: Optional[float] = 1.0,
reduction: Optional[str] = "mean",
) -> torch.Tensor:
return torch.nn.functional.smooth_l1_loss(
input,
target,
beta=beta,
reduction=reduction,
)
@with_unsupported_dtypes(
{"2.2 and below": ("uint8", "int8", "int16", "int32", "int64", "bool")},
backend_version,
)
def huber_loss(
input: torch.Tensor,
target: torch.Tensor,
/,
*,
reduction: Optional[str] = "mean",
delta: Optional[float] = 1.0,
) -> torch.Tensor:
return torch.nn.functional.huber_loss(
input, target, reduction=reduction, delta=delta
)
@with_unsupported_dtypes(
{
"2.2 and below": (
"float16",
"uint8",
"int8",
"int16",
"int32",
"int64",
"bool",
)
},
backend_version,
)
def soft_margin_loss(
input: torch.Tensor,
target: torch.Tensor,
/,
*,
reduction: Optional[str] = "mean",
) -> torch.Tensor:
return torch.nn.functional.soft_margin_loss(
input,
target,
reduction=reduction,
)
@with_supported_dtypes(
{"2.2 and below": ("float",)},
backend_version,
)
def kl_div(
input: torch.Tensor,
target: torch.Tensor,
/,
*,
reduction: Optional[str] = "mean",
log_target=False,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
loss = torch.nn.functional.kl_div(
input, target, reduction=reduction, log_target=log_target
)
return loss
@with_supported_device_and_dtypes(
{
"2.15.0 and below": {
"cpu": (
"float32",
"float64",
"int8",
"int16",
"int32",
"int64",
"uint8",
"complex64",
"complex128",
),
}
},
backend_version,
)
def poisson_nll_loss(
input: torch.Tensor,
target: torch.Tensor,
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> torch.Tensor:
return torch.nn.functional.poisson_nll_loss(
input, target, log_input=log_input, full=full, eps=eps, reduction=reduction
)
@with_supported_device_and_dtypes(
{
"2.2 and below": {
"cpu": ("float16", "float32", "float64"),
"gpu": ("float16", "float32", "float64"),
}
},
backend_version,
)
def hinge_embedding_loss(
input: torch.Tensor,
target: torch.Tensor,
*,
margin: float = 1.0,
reduction: str = "mean",
) -> torch.Tensor:
return torch.nn.functional.hinge_embedding_loss(
input, target, margin=margin, reduction=reduction
)
| ivy/ivy/functional/backends/torch/experimental/losses.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/losses.py",
"repo_id": "ivy",
"token_count": 1852
} | 26 |
import torch
from typing import Optional, List
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
@with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, backend_version)
def layer_norm(
x: torch.Tensor,
normalized_idxs: List[int],
/,
*,
scale: Optional[torch.Tensor] = None,
offset: Optional[torch.Tensor] = None,
eps: float = 1e-05,
new_std: float = 1.0,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
normalized_shape = x.shape[normalized_idxs[0] :]
xnormalized = torch.nn.functional.layer_norm(
x, normalized_shape, weight=scale, bias=offset, eps=eps
)
return torch.multiply(xnormalized, new_std)
| ivy/ivy/functional/backends/torch/norms.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/norms.py",
"repo_id": "ivy",
"token_count": 285
} | 27 |
from . import lax_numpy
from . import array_methods
| ivy/ivy/functional/frontends/jax/_src/numpy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/_src/numpy/__init__.py",
"repo_id": "ivy",
"token_count": 16
} | 28 |
# global
from numbers import Number
from typing import Union, Tuple, Iterable
# local
import ivy
from ivy.utils.exceptions import handle_exceptions
import ivy.functional.frontends.jax as jax_frontend
# Constructing dtypes are required as ivy.<dtype>
# will change dynamically on the backend and may not be available
_int8 = ivy.IntDtype("int8")
_int16 = ivy.IntDtype("int16")
_int32 = ivy.IntDtype("int32")
_int64 = ivy.IntDtype("int64")
_uint8 = ivy.UintDtype("uint8")
_uint16 = ivy.UintDtype("uint16")
_uint32 = ivy.UintDtype("uint32")
_uint64 = ivy.UintDtype("uint64")
_bfloat16 = ivy.FloatDtype("bfloat16")
_float16 = ivy.FloatDtype("float16")
_float32 = ivy.FloatDtype("float32")
_float64 = ivy.FloatDtype("float64")
_complex64 = ivy.ComplexDtype("complex64")
_complex128 = ivy.ComplexDtype("complex128")
_bool = ivy.Dtype("bool")
# jax-numpy casting table
jax_numpy_casting_table = {
_bool: [
_bool,
_int8,
_int16,
_int32,
_int64,
_uint8,
_uint16,
_uint32,
_uint64,
_float16,
_float32,
_float64,
_complex64,
_complex128,
_bfloat16,
],
_int8: [
_int8,
_int16,
_int32,
_int64,
_float16,
_float32,
_float64,
_complex64,
_complex128,
_bfloat16,
],
_int16: [
_int16,
_int32,
_int64,
_float32,
_float64,
_complex64,
_complex128,
],
_int32: [
_int32,
_int64,
_float64,
_complex128,
],
_int64: [
_int64,
_float64,
_complex128,
],
_uint8: [
_int16,
_int32,
_int64,
_uint8,
_uint16,
_uint32,
_uint64,
_float16,
_float32,
_float64,
_complex64,
_complex128,
_bfloat16,
],
_uint16: [
_int32,
_int64,
_uint16,
_uint32,
_uint64,
_float32,
_float64,
_complex64,
_complex128,
],
_uint32: [
_int64,
_uint32,
_uint64,
_float64,
_complex128,
],
_uint64: [
_uint64,
_float64,
_complex128,
],
_float16: [
_float16,
_float32,
_float64,
_complex64,
_complex128,
],
_float32: [
_float32,
_float64,
_complex64,
_complex128,
],
_float64: [
_float64,
_complex128,
],
_complex64: [_complex64, ivy.complex128],
_complex128: [_complex128],
_bfloat16: [
_bfloat16,
_float32,
_float64,
_complex64,
_complex128,
],
}
# jax-numpy type promotion table
# data type promotion
jax_promotion_table = {
(_bool, _bool): _bool,
(_bool, _uint8): _uint8,
(_bool, _uint16): _uint16,
(_bool, _uint32): _uint32,
(_bool, _uint64): _uint64,
(_bool, _int8): _int8,
(_bool, _int16): _int16,
(_bool, _int32): _int32,
(_bool, _int64): _int64,
(_bool, _bfloat16): _bfloat16,
(_bool, _float16): _float16,
(_bool, _float32): _float32,
(_bool, _float64): _float64,
(_bool, _complex64): _complex64,
(_bool, _complex128): _complex128,
(_uint8, _bool): _uint8,
(_uint8, _uint8): _uint8,
(_uint8, _uint16): _uint16,
(_uint8, _uint32): _uint32,
(_uint8, _uint64): _uint64,
(_uint8, _int8): _int16,
(_uint8, _int16): _int16,
(_uint8, _int32): _int32,
(_uint8, _int64): _int64,
(_uint8, _bfloat16): _bfloat16,
(_uint8, _float16): _float16,
(_uint8, _float32): _float32,
(_uint8, _float64): _float64,
(_uint8, _complex64): _complex64,
(_uint8, _complex128): _complex128,
(_uint16, _bool): _uint16,
(_uint16, _uint8): _uint16,
(_uint16, _uint16): _uint16,
(_uint16, _uint32): _uint32,
(_uint16, _uint64): _uint64,
(_uint16, _int8): _int32,
(_uint16, _int16): _int32,
(_uint16, _int32): _int32,
(_uint16, _int64): _int64,
(_uint16, _bfloat16): _bfloat16,
(_uint16, _float16): _float16,
(_uint16, _float32): _float32,
(_uint16, _float64): _float64,
(_uint16, _complex64): _complex64,
(_uint16, _complex128): _complex128,
(_uint32, _bool): _uint32,
(_uint32, _uint8): _uint32,
(_uint32, _uint16): _uint32,
(_uint32, _uint32): _uint32,
(_uint32, _uint64): _uint64,
(_uint32, _int8): _int64,
(_uint32, _int16): _int64,
(_uint32, _int32): _int64,
(_uint32, _int64): _int64,
(_uint32, _bfloat16): _bfloat16,
(_uint32, _float16): _float16,
(_uint32, _float32): _float32,
(_uint32, _float64): _float64,
(_uint32, _complex64): _complex64,
(_uint32, _complex128): _complex128,
(_uint64, _bool): _uint64,
(_uint64, _uint8): _uint64,
(_uint64, _uint16): _uint64,
(_uint64, _uint32): _uint64,
(_uint64, _uint64): _uint64,
(_uint64, _int8): _float64,
(_uint64, _int16): _float64,
(_uint64, _int32): _float64,
(_uint64, _int64): _float64,
(_uint64, _bfloat16): _bfloat16,
(_uint64, _float16): _float16,
(_uint64, _float32): _float32,
(_uint64, _float64): _float64,
(_uint64, _complex64): _complex64,
(_uint64, _complex128): _complex128,
(_int8, _bool): _int8,
(_int8, _uint8): _int16,
(_int8, _uint16): _int32,
(_int8, _uint32): _int64,
(_int8, _uint64): _float64,
(_int8, _int8): _int8,
(_int8, _int16): _int16,
(_int8, _int32): _int32,
(_int8, _int64): _int64,
(_int8, _bfloat16): _bfloat16,
(_int8, _float16): _float16,
(_int8, _float32): _float32,
(_int8, _float64): _float64,
(_int8, _complex64): _complex64,
(_int8, _complex128): _complex128,
(_int16, _bool): _int16,
(_int16, _uint8): _int16,
(_int16, _uint16): _int32,
(_int16, _uint32): _int64,
(_int16, _uint64): _float64,
(_int16, _int8): _int16,
(_int16, _int16): _int16,
(_int16, _int32): _int32,
(_int16, _int64): _int64,
(_int16, _bfloat16): _bfloat16,
(_int16, _float16): _float16,
(_int16, _float32): _float32,
(_int16, _float64): _float64,
(_int16, _complex64): _complex64,
(_int16, _complex128): _complex128,
(_int32, _bool): _int32,
(_int32, _uint8): _int32,
(_int32, _uint16): _int32,
(_int32, _uint32): _int64,
(_int32, _uint64): _float64,
(_int32, _int8): _int32,
(_int32, _int16): _int32,
(_int32, _int32): _int32,
(_int32, _int64): _int64,
(_int32, _bfloat16): _bfloat16,
(_int32, _float16): _float16,
(_int32, _float32): _float32,
(_int32, _float64): _float64,
(_int32, _complex64): _complex64,
(_int32, _complex128): _complex128,
(_int64, _bool): _int64,
(_int64, _uint8): _int64,
(_int64, _uint16): _int64,
(_int64, _uint32): _int64,
(_int64, _uint64): _float64,
(_int64, _int8): _int64,
(_int64, _int16): _int64,
(_int64, _int32): _int64,
(_int64, _int64): _int64,
(_int64, _bfloat16): _bfloat16,
(_int64, _float16): _float16,
(_int64, _float32): _float32,
(_int64, _float64): _float64,
(_int64, _complex64): _complex64,
(_int64, _complex128): _complex128,
(_bfloat16, _bool): _bfloat16,
(_bfloat16, _uint8): _bfloat16,
(_bfloat16, _uint16): _bfloat16,
(_bfloat16, _uint32): _bfloat16,
(_bfloat16, _uint64): _bfloat16,
(_bfloat16, _int8): _bfloat16,
(_bfloat16, _int16): _bfloat16,
(_bfloat16, _int32): _bfloat16,
(_bfloat16, _int64): _bfloat16,
(_bfloat16, _bfloat16): _bfloat16,
(_bfloat16, _float16): _float32,
(_bfloat16, _float32): _float32,
(_bfloat16, _float64): _float64,
(_bfloat16, _complex64): _complex64,
(_bfloat16, _complex128): _complex128,
(_float16, _bool): _float16,
(_float16, _uint8): _float16,
(_float16, _uint16): _float16,
(_float16, _uint32): _float16,
(_float16, _uint64): _float16,
(_float16, _int8): _float16,
(_float16, _int16): _float16,
(_float16, _int32): _float16,
(_float16, _int64): _float16,
(_float16, _bfloat16): _float32,
(_float16, _float16): _float16,
(_float16, _float32): _float32,
(_float16, _float64): _float64,
(_float16, _complex64): _complex64,
(_float16, _complex128): _complex128,
(_float32, _bool): _float32,
(_float32, _uint8): _float32,
(_float32, _uint16): _float32,
(_float32, _uint32): _float32,
(_float32, _uint64): _float32,
(_float32, _int8): _float32,
(_float32, _int16): _float32,
(_float32, _int32): _float32,
(_float32, _int64): _float32,
(_float32, _bfloat16): _float32,
(_float32, _float16): _float32,
(_float32, _float32): _float32,
(_float32, _float64): _float64,
(_float32, _complex64): _complex64,
(_float32, _complex128): _complex128,
(_float64, _bool): _float64,
(_float64, _uint8): _float64,
(_float64, _uint16): _float64,
(_float64, _uint32): _float64,
(_float64, _uint64): _float64,
(_float64, _int8): _float64,
(_float64, _int16): _float64,
(_float64, _int32): _float64,
(_float64, _int64): _float64,
(_float64, _bfloat16): _float64,
(_float64, _float16): _float64,
(_float64, _float32): _float64,
(_float64, _float64): _float64,
(_float64, _complex64): _complex128,
(_float64, _complex128): _complex128,
(_complex64, _bool): _complex64,
(_complex64, _int8): _complex64,
(_complex64, _int16): _complex64,
(_complex64, _int32): _complex64,
(_complex64, _int64): _complex64,
(_complex64, _uint8): _complex64,
(_complex64, _uint16): _complex64,
(_complex64, _uint32): _complex64,
(_complex64, _uint64): _complex64,
(_complex64, _float16): _complex64,
(_complex64, _float32): _complex64,
(_complex64, _float64): _complex128,
(_complex64, _bfloat16): _complex64,
(_complex64, _complex64): _complex64,
(_complex64, _complex128): _complex128,
(_complex128, _bool): _complex128,
(_complex128, _int8): _complex128,
(_complex128, _int16): _complex128,
(_complex128, _int32): _complex128,
(_complex128, _int64): _complex128,
(_complex128, _uint8): _complex128,
(_complex128, _uint16): _complex128,
(_complex128, _uint32): _complex128,
(_complex128, _uint64): _complex128,
(_complex128, _float16): _complex128,
(_complex128, _float32): _complex128,
(_complex128, _float64): _complex128,
(_complex128, _bfloat16): _complex128,
(_complex128, _complex64): _complex128,
(_complex128, _complex128): _complex128,
}
dtype_replacement_dict = {
_int64: _int32,
_uint64: _uint32,
_float64: _float32,
_complex128: _complex64,
}
@handle_exceptions
def promote_types_jax(
type1: Union[ivy.Dtype, ivy.NativeDtype],
type2: Union[ivy.Dtype, ivy.NativeDtype],
/,
) -> ivy.Dtype:
"""Promote the datatypes type1 and type2, returning the data type they
promote to.
Parameters
----------
type1
the first of the two types to promote
type2
the second of the two types to promote
Returns
-------
ret
The type that both input types promote to
"""
try:
ret = jax_promotion_table[(ivy.as_ivy_dtype(type1), ivy.as_ivy_dtype(type2))]
except KeyError as e:
raise ivy.utils.exceptions.IvyException(
"these dtypes are not type promotable"
) from e
return ret
def _handle_x64_promotion(d):
if not jax_frontend.config.jax_enable_x64:
d = dtype_replacement_dict[d] if d in dtype_replacement_dict else d
return d
@handle_exceptions
def promote_types_of_jax_inputs(
x1: Union[ivy.Array, Number, Iterable[Number]],
x2: Union[ivy.Array, Number, Iterable[Number]],
/,
) -> Tuple[ivy.Array, ivy.Array]:
"""Promote the dtype of the given native array inputs to a common dtype
based on type promotion rules.
While passing float or integer values or any other non-array input
to this function, it should be noted that the return will be an
array-like object. Therefore, outputs from this function should be
used as inputs only for those functions that expect an array-like or
tensor-like objects, otherwise it might give unexpected results.
"""
type1 = ivy.default_dtype(item=x1).strip("u123456789")
type2 = ivy.default_dtype(item=x2).strip("u123456789")
if hasattr(x1, "dtype") and not hasattr(x2, "dtype") and type1 == type2:
x2 = ivy.asarray(
x2, dtype=x1.dtype, device=ivy.default_device(item=x1, as_native=False)
)
elif not hasattr(x1, "dtype") and hasattr(x2, "dtype") and type1 == type2:
x1 = ivy.asarray(
x1, dtype=x2.dtype, device=ivy.default_device(item=x2, as_native=False)
)
else:
x1 = ivy.asarray(x1)
x2 = ivy.asarray(x2)
x1_type, x2_type = x1.dtype, x2.dtype
if x1_type != x2_type:
x1_type = _handle_x64_promotion(x1_type)
x2_type = _handle_x64_promotion(x2_type)
promoted = _handle_x64_promotion(promote_types_jax(x1_type, x2_type))
x1 = ivy.asarray(x1, dtype=promoted)
x2 = ivy.asarray(x2, dtype=promoted)
return x1, x2
from . import fft
from . import linalg
from . import creation
from .creation import *
from .dtype import *
from .scalars import *
from . import indexing
from .indexing import *
from . import logic
from .logic import *
from . import manipulations
from .manipulations import *
from . import mathematical_functions
from .mathematical_functions import *
from . import statistical
from .statistical import *
from . import searching_sorting
from .searching_sorting import *
_frontend_array = array
| ivy/ivy/functional/frontends/jax/numpy/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/__init__.py",
"repo_id": "ivy",
"token_count": 6558
} | 29 |
import ivy
from ivy.functional.frontends.mxnet.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.frontends.numpy.func_wrapper import handle_numpy_dtype
@handle_numpy_dtype
@to_ivy_arrays_and_back
def softmax(data, length=None, axis=-1, temperature=None, use_length=False, dtype=None):
ret = ivy.softmax(data, axis=axis)
if dtype:
ivy.utils.assertions.check_elem_in_list(
dtype, ["float16", "float32", "float64"]
)
ret = ivy.astype(ret, dtype)
return ret
| ivy/ivy/functional/frontends/mxnet/numpy_extension/_op.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/numpy_extension/_op.py",
"repo_id": "ivy",
"token_count": 223
} | 30 |
from . import discrete_fourier_transform
from .discrete_fourier_transform import *
| ivy/ivy/functional/frontends/numpy/fft/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/fft/__init__.py",
"repo_id": "ivy",
"token_count": 24
} | 31 |
from . import array_contents
from .array_contents import *
from . import array_type_testing
from .array_type_testing import *
from . import comparison
from .comparison import *
from . import logical_operations
from .logical_operations import *
from . import truth_value_testing
from .truth_value_testing import *
| ivy/ivy/functional/frontends/numpy/logic/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/logic/__init__.py",
"repo_id": "ivy",
"token_count": 86
} | 32 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
)
@to_ivy_arrays_and_back
def asanyarray(a, dtype=None, order=None, like=None):
return ivy.asarray(a)
@to_ivy_arrays_and_back
def asarray_chkfinite(a, dtype=None, order=None):
a = ivy.asarray(a, dtype=dtype)
if not ivy.all(ivy.isfinite(a)):
raise ValueError("array must not contain infs or NaNs")
return a
@to_ivy_arrays_and_back
def asfarray(a, dtype=ivy.float64):
return ivy.asarray(a, dtype=ivy.float64)
@to_ivy_arrays_and_back
def broadcast_to(array, shape, subok=False):
return ivy.broadcast_to(array, shape)
@to_ivy_arrays_and_back
def moveaxis(a, source, destination):
return ivy.moveaxis(a, source, destination)
@to_ivy_arrays_and_back
def ravel(a, order="C"):
return ivy.reshape(a, shape=(-1,), order=order)
@to_ivy_arrays_and_back
def require(a, dtype=None, requirements=None, *, like=None):
return ivy.asarray(a, dtype=dtype)
@to_ivy_arrays_and_back
def reshape(x, /, newshape, order="C"):
return ivy.reshape(x, shape=newshape, order=order)
@to_ivy_arrays_and_back
def resize(x, newshape, /, refcheck=True):
if isinstance(newshape, int):
newshape = (newshape,)
x_new = ivy.reshape(x, shape=(-1,), order="C")
total_size = 1
for diff_size in newshape:
total_size *= diff_size
if diff_size < 0:
raise ValueError("values must not be negative")
if x_new.size == 0 or total_size == 0:
return ivy.zeros_like(x_new)
repetition = -(-total_size // len(x_new))
conc = (x_new,) * repetition
x_new = ivy.concat(conc)[:total_size]
y = ivy.reshape(x_new, shape=newshape, order="C")
return y
| ivy/ivy/functional/frontends/numpy/manipulation_routines/changing_array_shape.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/changing_array_shape.py",
"repo_id": "ivy",
"token_count": 773
} | 33 |
# global
import ivy
# local
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_casting,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
from ivy.func_wrapper import with_supported_dtypes
# --- Helpers --- #
# --------------- #
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _absolute(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.abs(x)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _cbrt(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
all_positive = ivy.pow(ivy.abs(x), 1.0 / 3.0)
ret = ivy.where(ivy.less(x, 0.0), ivy.negative(all_positive), all_positive)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _clip(
a,
a_min,
a_max,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ivy.utils.assertions.check_all_or_any_fn(
a_min,
a_max,
fn=ivy.exists,
type="any",
limit=[1, 2],
message="at most one of a_min and a_max can be None",
)
if a_min is None:
ret = ivy.minimum(a, a_max, out=out)
elif a_max is None:
ret = ivy.maximum(a, a_min, out=out)
else:
ret = ivy.clip(a, a_min, a_max, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _copysign(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="k",
dtype=None,
subok=True,
):
ret = ivy.copysign(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _fabs(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.abs(x)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
@with_supported_dtypes(
{"1.26.3 and below": ("int8", "int16", "int32", "int64")}, "numpy"
) # Add
def _gcd(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.gcd(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _heaviside(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.heaviside(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def _lcm(
x1,
x2,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.lcm(x1, x2, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _reciprocal(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.reciprocal(x)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _sign(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.sign(x, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _sqrt(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.sqrt(x)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@handle_numpy_casting
@from_zero_dim_arrays_to_scalar
def _square(
x,
/,
out=None,
*,
where=True,
casting="same_kind",
order="K",
dtype=None,
subok=True,
):
ret = ivy.square(x)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def convolve(a, v, mode="full"):
if a.ndim != 1 or v.ndim != 1:
raise ValueError("convolve() only support 1-dimensional inputs.")
if a.shape[0] < v.shape[0]:
a, v = v, a
v = ivy.flip(v)
out_order = slice(None)
if mode == "valid":
padding = [(0, 0)]
elif mode == "same":
padding = [(v.shape[0] // 2, v.shape[0] - v.shape[0] // 2 - 1)]
elif mode == "full":
padding = [(v.shape[0] - 1, v.shape[0] - 1)]
result = ivy.conv_general_dilated(
a[None, None, :],
v[:, None, None],
(1,),
padding,
dims=1,
data_format="channel_first",
)
return result[0, 0, out_order]
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def interp(x, xp, fp, left=None, right=None, period=None):
return ivy.interp(x, xp, fp, left=left, right=right, period=period)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
bounds = ivy.finfo(x.dtype)
if posinf is None:
posinf = bounds.max
if neginf is None:
neginf = bounds.min
pos_where = ivy.isinf(x, detect_negative=False)
neg_where = ivy.isinf(x, detect_positive=False)
nan_where = ivy.isnan(x)
ret = ivy.where(nan_where, nan, x)
ret = ivy.where(pos_where, posinf, ret)
ret = ivy.where(neg_where, neginf, ret)
ret = ret.astype(x.dtype, copy=False)
if not copy:
return ivy.inplace_update(x, ret)
return ret
@to_ivy_arrays_and_back
def real_if_close(a, tol=100):
a = ivy.array(a, dtype=a.dtype)
dtype_ = a.dtype
if not ivy.is_complex_dtype(dtype_):
return a
if tol > 1:
f = ivy.finfo(dtype_)
tol = f.eps * tol
if ivy.all(ivy.abs(ivy.imag(a)) < tol):
a = ivy.real(a)
return a
| ivy/ivy/functional/frontends/numpy/mathematical_functions/miscellaneous.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/mathematical_functions/miscellaneous.py",
"repo_id": "ivy",
"token_count": 3918
} | 34 |
from . import scalars
from .scalars import *
| ivy/ivy/functional/frontends/numpy/scalars/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/scalars/__init__.py",
"repo_id": "ivy",
"token_count": 14
} | 35 |
from ivy.functional.frontends.onnx.proto import NodeProto
from ivy_tests.test_ivy.helpers.testing_helpers import _import_fn
def make_node(
op_type, inputs, outputs, name=None, doc_string=None, domain=None, **kwargs
):
# keep things upper case to follow ONNX naming convention
fn_tree = "ivy.functional.frontends.onnx." + op_type
callable_fn, fn_name, fn_mod = _import_fn(fn_tree)
node = NodeProto()
node._fn = callable_fn
node._fn_mod = fn_mod
node._fn_name = fn_name
node.input = inputs
node.output = outputs
node.name = name
return node
| ivy/ivy/functional/frontends/onnx/helper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/onnx/helper.py",
"repo_id": "ivy",
"token_count": 231
} | 36 |
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
numerator = ivy.sum(x1 * x2, axis=axis)
x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
else:
numerator = ivy.sum(x1 * x2)
x1_squared_norm = ivy.sum(ivy.square(x1))
x2_squared_norm = ivy.sum(ivy.square(x2))
x1_norm = ivy.sqrt(x1_squared_norm)
x2_norm = ivy.sqrt(x2_squared_norm)
norm_mm = x1_norm * x2_norm
denominator = ivy.maximum(norm_mm, eps)
cosine = numerator / denominator
return cosine
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None):
if axis is not None and axis > 1:
raise ValueError("Axis value can only be 0 or 1 or None.")
elif axis is None or (isinstance(axis, list) and len(axis) == 2):
mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)
elif axis == 0:
mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)
mask = ivy.broadcast_to(mask, x.shape)
elif axis == 1:
mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)
mask = ivy.broadcast_to(mask, x.shape)
if mode == "upscale_in_train":
if training:
out = ivy.multiply(x, mask)
ret = ivy.multiply(out, 1.0 / (1.0 - p))
else:
ret = x
else:
if training:
ret = ivy.multiply(x, mask)
else:
ret = ivy.multiply(x, (1.0 - p))
return ret
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def dropout2d(x, *, p=0.5, training=True, data_format="NCHW", name=None):
return ivy.dropout2d(x, p, training=training, data_format=data_format)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def dropout3d(x, p=0.5, training=True, data_format="NCDHW", name=None):
return ivy.dropout3d(x, p, training=training, data_format=data_format)
def get_mask(shape, device, prob, seed=None):
mask = ivy.where(
ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,
0.0,
1.0,
)
return mask
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def interpolate(
x,
size=None,
scale_factor=None,
mode="nearest",
align_corners=False,
align_mode=0,
data_format="NCHW",
name=None,
):
return ivy.interpolate(
x, size, mode=mode, scale_factor=scale_factor, align_corners=align_corners
)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def linear(x, weight, bias=None, name=None):
weight = ivy.swapaxes(weight, -1, -2)
return ivy.linear(x, weight, bias=bias)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
# Input checking
if isinstance(kernel_sizes, int):
kernel_sizes = [kernel_sizes, kernel_sizes]
elif not isinstance(kernel_sizes, (list, tuple)):
raise ivy.exceptions.IvyError(
"Expected kernel size input as type int, tuple or list but got"
f" {type(kernel_sizes)}"
)
if isinstance(strides, int):
strides = [strides, strides]
elif not isinstance(strides, (list, tuple)):
raise ivy.exceptions.IvyError(
f"Expected strides input as type int, tuple or list but got {type(strides)}"
)
if isinstance(dilations, int):
dilations = [dilations, dilations]
elif not isinstance(dilations, (list, tuple)):
raise ivy.exceptions.IvyError(
"Expected dilations input as type int, tuple or list but got"
f" {type(dilations)}"
)
if isinstance(paddings, int):
paddings = [paddings, paddings]
elif not isinstance(paddings, (list, tuple)):
raise ivy.exceptions.IvyError(
"Expected paddings, input as type int, tuple or list but got"
f" {type(paddings)}"
)
n, c, h, w = x.shape
# Padding
if paddings[0] >= 0 or paddings[1] >= 0:
padding_tup = [(0, 0) for i in range(2)] + [
(paddings[0], paddings[0]),
(paddings[1], paddings[1]),
]
x = ivy.pad(x, padding_tup, mode="constant", constant_values=0.0)
else:
raise ivy.exceptions.IvyError(
f"Expected padding size larger than 0 but got {paddings[0]}/{paddings[1]}"
)
# Expected input shape
h_steps = int(
(h + (paddings[0] * 2) - dilations[0] * (kernel_sizes[0] - 1) - 1) / strides[0]
+ 1
)
w_steps = int(
(w + (paddings[1] * 2) - dilations[1] * (kernel_sizes[1] - 1) - 1) / strides[1]
+ 1
)
if h_steps < 1 or w_steps < 1:
raise ivy.exceptions.IvyError(
"Expected at least one for height and width, but got expected output shape"
f" H:{h_steps} W:{w_steps}]"
)
# sliding windows
folder = []
for i in range(0, h_steps * strides[0], strides[0]):
for j in range(0, w_steps * strides[1], strides[1]):
window = x[
:,
:,
i : i + dilations[0] * (kernel_sizes[0] - 1) + 1 : dilations[0],
j : j + dilations[1] * (kernel_sizes[1] - 1) + 1 : dilations[1],
]
window = ivy.flatten(window, start_dim=1)
folder.append(window)
return ivy.stack(folder, axis=2)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def zeropad2d(x, padding, data_format="NCHW", name=None):
if ivy.is_array(padding):
padding = padding.to_list()
if isinstance(padding, int):
padding = [padding, padding, padding, padding]
if len(padding) != 4:
raise ValueError("Padding length should be 4.")
if x.ndim != 4:
raise ValueError("Input x must be 4-dimensional.")
if data_format == "NCHW":
padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))
elif data_format == "NHWC":
padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))
else:
raise ValueError(f"Unknown data_format: {data_format}")
return ivy.pad(x, padding, mode="constant", constant_values=0.0)
| ivy/ivy/functional/frontends/paddle/nn/functional/common.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/common.py",
"repo_id": "ivy",
"token_count": 3259
} | 37 |
# local
from ..logic import * # noqa: F401
| ivy/ivy/functional/frontends/paddle/tensor/logic.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/tensor/logic.py",
"repo_id": "ivy",
"token_count": 17
} | 38 |
from . import vq
from . import hierarchy
| ivy/ivy/functional/frontends/scipy/cluster/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/cluster/__init__.py",
"repo_id": "ivy",
"token_count": 11
} | 39 |
# global
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
# --- Helpers --- #
# --------------- #
def _check_finite(a):
if not ivy.all(ivy.isfinite(a)):
raise ValueError("Array must not contain infs or NaNs")
# --- Main --- #
# ------------ #
# eigh_tridiagonal
@to_ivy_arrays_and_back
def eigh_tridiagonal(
d,
e,
/,
*,
eigvals_only=False,
select="a",
select_range=None,
check_finite=True,
tol=0.0,
):
if check_finite:
_check_finite(d)
_check_finite(e)
return ivy.eigh_tridiagonal(
d,
e,
eigvals_only=eigvals_only,
select=select,
select_range=select_range,
tol=tol,
)
# inv
@to_ivy_arrays_and_back
def inv(a, /, *, overwrite_a=False, check_finite=True):
if check_finite:
_check_finite(a)
if len(a.shape) != 2 or a.shape[0] != a.shape[1]:
raise ValueError("Expected a square matrix")
return ivy.inv(a)
# kron
@to_ivy_arrays_and_back
def kron(a, b):
return ivy.kron(a, b)
# lu_factor
@to_ivy_arrays_and_back
def lu_factor(a, /, *, overwrite_a=False, check_finite=True):
if check_finite:
_check_finite(a)
return ivy.lu_factor(a)
# norm
@to_ivy_arrays_and_back
def norm(a, /, *, ord=None, axis=None, keepdims=False, check_finite=True):
if check_finite:
_check_finite(a)
if axis is None and ord is not None:
if a.ndim not in (1, 2):
raise ValueError("Improper number of dimensions to norm.")
else:
if a.ndim == 1:
ret = ivy.vector_norm(a, axis=axis, keepdims=keepdims, ord=ord)
else:
ret = ivy.matrix_norm(a, keepdims=keepdims, ord=ord)
elif axis is None and ord is None:
a = ivy.flatten(a)
ret = ivy.vector_norm(a, axis=0, keepdims=keepdims, ord=2)
if isinstance(axis, int):
ret = ivy.vector_norm(a, axis=axis, keepdims=keepdims, ord=ord)
elif isinstance(axis, tuple):
ret = ivy.matrix_norm(a, axis=axis, keepdims=keepdims, ord=ord)
return ret
# pinv
@to_ivy_arrays_and_back
def pinv(
a,
/,
*,
atol=None,
rtol=None,
return_rank=False,
cond=None,
rcond=None,
check_finite=True,
):
if check_finite:
_check_finite(a)
if (rcond or cond) and (atol is None) and (rtol is None):
atol = rcond or cond
rtol = 0.0
inverse = ivy.pinv(a, rtol=rtol)
if return_rank:
rank = ivy.matrix_rank(a)
return inverse, rank
return inverse
# svd
@to_ivy_arrays_and_back
def svd(
a, /, *, full_matrices=True, compute_uv=True, overwrite_a=False, check_finite=True
):
if check_finite:
_check_finite(a)
return ivy.svd(a, full_matrices=full_matrices, compute_uv=compute_uv)
# svdvals
@to_ivy_arrays_and_back
def svdvals(a, /, *, overwrite_a=False, check_finite=True):
if check_finite:
_check_finite(a)
return ivy.svdvals(a)
# Functions #
# --------- #
# tril
@to_ivy_arrays_and_back
def tril(m, /, *, k=0):
return ivy.tril(m, k=k)
# triu
@to_ivy_arrays_and_back
def triu(m, /, *, k=0):
return ivy.triu(m, k=k)
| ivy/ivy/functional/frontends/scipy/linalg/linalg.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/linalg/linalg.py",
"repo_id": "ivy",
"token_count": 1605
} | 40 |
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
from sklearn.utils.multiclass import type_of_target
@to_ivy_arrays_and_back
def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None):
# TODO: implement sample_weight
y_type = type_of_target(y_true)
if y_type.startswith("multilabel"):
diff_labels = ivy.count_nonzero(y_true - y_pred, axis=1)
ret = ivy.equal(diff_labels, 0).astype("int64")
else:
ret = ivy.equal(y_true, y_pred).astype("int64")
ret = ret.sum().astype("int64")
if normalize:
ret = ret / y_true.shape[0]
ret = ret.astype("float64")
return ret
| ivy/ivy/functional/frontends/sklearn/metrics/_classification.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/metrics/_classification.py",
"repo_id": "ivy",
"token_count": 301
} | 41 |
from . import nn
from . import general_functions
from .general_functions import *
| ivy/ivy/functional/frontends/tensorflow/compat/v1/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/compat/v1/__init__.py",
"repo_id": "ivy",
"token_count": 23
} | 42 |
# local
import ivy.functional.frontends.tensorflow.ragged as ragged_tf
from ivy.functional.frontends.tensorflow.func_wrapper import to_ivy_arrays_and_back
import ivy
# --- Helpers --- #
# --------------- #
def _flatten_composite_array(x, expand_composites=False):
if isinstance(x, ragged_tf.RaggedTensor):
if not expand_composites:
return x
new_struc = [x.flat_values]
for row_split in x.nested_row_splits:
new_struc.append(row_split)
return new_struc
elif ivy.is_ivy_sparse_array(x) or ivy.is_native_sparse_array(x):
return ivy.native_sparse_array_to_indices_values_and_shape(x)
# try:
# import tensorflow as tf
# except ImportError:
# import types
#
# tf = types.SimpleNamespace()
# tf.Tensor = None
# tf.RaggedTensor = None
def _is_composite_array(x):
if isinstance(x, ragged_tf.RaggedTensor):
return True
if ivy.is_ivy_sparse_array(x) or ivy.is_native_sparse_array(x):
return True
return False
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def flatten(structure, expand_composites=False):
if expand_composites and _is_composite_array(structure):
return _flatten_composite_array(structure, expand_composites=expand_composites)
elif isinstance(structure, (tuple, list)):
return [x for child in structure for x in flatten(child)]
elif isinstance(structure, dict):
return [x for key in sorted(structure) for x in flatten(structure[key])]
return [structure]
| ivy/ivy/functional/frontends/tensorflow/nest.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/nest.py",
"repo_id": "ivy",
"token_count": 654
} | 43 |
# global
import ivy
import ivy.functional.frontends.torch as torch_frontend
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
# local
from collections import namedtuple
# --- Helpers --- #
# --------------- #
def _compute_allclose_with_tol(input, other, rtol, atol):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.all(
ivy.less_equal(
ivy.abs(ivy.subtract(input, other)),
ivy.add(atol, ivy.multiply(rtol, ivy.abs(other))),
)
)
def _compute_isclose_with_tol(input, other, rtol, atol):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.less_equal(
ivy.abs(ivy.subtract(input, other)),
ivy.add(atol, ivy.multiply(rtol, ivy.abs(other))),
)
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
finite_input = ivy.isfinite(input)
finite_other = ivy.isfinite(other)
if ivy.all(finite_input) and ivy.all(finite_other):
ret = _compute_allclose_with_tol(input, other, rtol, atol)
return ivy.all_equal(True, ret)
else:
finites = ivy.bitwise_and(finite_input, finite_other)
ret = ivy.zeros_like(finites)
ret_ = ret.astype(int)
input = input * ivy.ones_like(ret_)
other = other * ivy.ones_like(ret_)
ret[finites] = _compute_allclose_with_tol(
input[finites], other[finites], rtol, atol
)
nans = ivy.bitwise_invert(finites)
ret[nans] = ivy.equal(input[nans], other[nans])
if equal_nan:
both_nan = ivy.bitwise_and(ivy.isnan(input), ivy.isnan(other))
ret[both_nan] = both_nan[both_nan]
return ivy.all(ret)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def argsort(input, dim=-1, descending=False):
return ivy.argsort(input, axis=dim, descending=descending)
@to_ivy_arrays_and_back
def eq(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.equal(input, other, out=out)
@to_ivy_arrays_and_back
def equal(input, other):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.all(ivy.equal(input, other))
@to_ivy_arrays_and_back
def fmax(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.where(
ivy.bitwise_or(ivy.greater(input, other), ivy.isnan(other)),
input,
other,
out=out,
)
@to_ivy_arrays_and_back
def fmin(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.where(
ivy.bitwise_or(ivy.less(input, other), ivy.isnan(other)),
input,
other,
out=out,
)
@with_unsupported_dtypes({"2.2 and below": ("complex64", "complex128")}, "torch")
@to_ivy_arrays_and_back
def greater(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.greater(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex64", "complex128")}, "torch")
@to_ivy_arrays_and_back
def greater_equal(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.greater_equal(input, other, out=out)
@to_ivy_arrays_and_back
def isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
finite_input = ivy.isfinite(input)
finite_other = ivy.isfinite(other)
if ivy.all(finite_input) and ivy.all(finite_other):
return _compute_isclose_with_tol(input, other, rtol, atol)
else:
finites = ivy.bitwise_and(finite_input, finite_other)
ret = ivy.zeros_like(finites)
ret_ = ret.astype(int)
input = input * ivy.ones_like(ret_)
other = other * ivy.ones_like(ret_)
ret[finites] = _compute_isclose_with_tol(
input[finites], other[finites], rtol, atol
)
nans = ivy.bitwise_invert(finites)
ret[nans] = ivy.equal(input[nans], other[nans])
if equal_nan:
both_nan = ivy.bitwise_and(ivy.isnan(input), ivy.isnan(other))
ret[both_nan] = both_nan[both_nan]
return ret
@to_ivy_arrays_and_back
def isfinite(input):
return ivy.isfinite(input)
@with_unsupported_dtypes(
{"2.2 and below": ("float16", "bfloat16", "complex", "bool")}, "torch"
)
@to_ivy_arrays_and_back
def isin(elements, test_elements, *, assume_unique=False, invert=False):
input_elements_copy = ivy.reshape(ivy.to_ivy(elements), (-1,))
test_elements_copy = ivy.reshape(ivy.to_ivy(test_elements), (-1,))
if (
ivy.shape(test_elements_copy)[0]
< 10 * ivy.shape(input_elements_copy)[0] ** 0.145
):
if invert:
mask = ivy.ones(ivy.shape(input_elements_copy[0]), dtype=bool)
for a in test_elements_copy:
mask &= input_elements_copy != a
else:
mask = ivy.zeros(ivy.shape(input_elements_copy[0]), dtype=bool)
for a in test_elements_copy:
mask |= input_elements_copy == a
return ivy.reshape(mask, ivy.shape(elements))
if not assume_unique:
input_elements_copy, rev_idx = ivy.unique_inverse(input_elements_copy)
test_elements_copy = ivy.sort(ivy.unique_values(test_elements_copy))
ar = ivy.concat((input_elements_copy, test_elements_copy))
order = ivy.argsort(ar, stable=True)
sar = ar[order]
if invert:
bool_ar = sar[1:] != sar[:-1]
else:
bool_ar = sar[1:] == sar[:-1]
flag = ivy.concat((bool_ar, ivy.array([invert])))
ret = ivy.empty(ivy.shape(ar), dtype=bool)
ret[order] = flag
if assume_unique:
return ivy.reshape(
ret[: ivy.shape(input_elements_copy)[0]], ivy.shape(elements)
)
else:
return ivy.reshape(ret[rev_idx], ivy.shape(elements))
@to_ivy_arrays_and_back
def isinf(input):
return ivy.isinf(input)
@to_ivy_arrays_and_back
def isnan(input):
return ivy.isnan(input)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def isneginf(input, *, out=None):
is_inf = ivy.isinf(input)
neg_sign_bit = ivy.less(input, 0)
return ivy.logical_and(is_inf, neg_sign_bit, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def isposinf(input, *, out=None):
is_inf = ivy.isinf(input)
pos_sign_bit = ivy.bitwise_invert(ivy.less(input, 0))
return ivy.logical_and(is_inf, pos_sign_bit, out=out)
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
@to_ivy_arrays_and_back
def isreal(input):
return ivy.isreal(input)
@with_unsupported_dtypes(
{"2.2 and below": ("bfloat16", "float16", "bool", "complex")}, "torch"
)
@to_ivy_arrays_and_back
def kthvalue(input, k, dim=-1, keepdim=False, *, out=None):
sorted_input = ivy.sort(input, axis=dim)
sort_indices = ivy.argsort(input, axis=dim)
values = ivy.asarray(
ivy.gather(sorted_input, ivy.array(k - 1), axis=dim), dtype=input.dtype
)
indices = ivy.asarray(
ivy.gather(sort_indices, ivy.array(k - 1), axis=dim), dtype="int64"
)
if keepdim:
values = ivy.expand_dims(values, axis=dim)
indices = ivy.expand_dims(indices, axis=dim)
ret = namedtuple("sort", ["values", "indices"])(values, indices)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
@with_unsupported_dtypes({"2.2 and below": ("complex64", "complex128")}, "torch")
@to_ivy_arrays_and_back
def less(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.less(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex64", "complex128")}, "torch")
@to_ivy_arrays_and_back
def less_equal(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.less_equal(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex64", "complex128")}, "torch")
@to_ivy_arrays_and_back
def maximum(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.maximum(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex64", "complex128")}, "torch")
@to_ivy_arrays_and_back
def minimum(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.minimum(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
def msort(input, *, out=None):
return ivy.sort(input, axis=0, out=out)
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def not_equal(input, other, *, out=None):
input, other = torch_frontend.promote_types_of_torch_inputs(input, other)
return ivy.not_equal(input, other, out=out)
@with_unsupported_dtypes({"2.2 and below": ("complex",)}, "torch")
@to_ivy_arrays_and_back
# TODO: the original torch.sort places * right before `out`
def sort(input, *, dim=-1, descending=False, stable=False, out=None):
values = ivy.sort(input, axis=dim, descending=descending, stable=stable, out=out)
indices = ivy.argsort(input, axis=dim, descending=descending)
return namedtuple("sort", ["values", "indices"])(values, indices)
@with_unsupported_dtypes({"2.2 and below": ("float16", "complex")}, "torch")
@to_ivy_arrays_and_back
def topk(input, k, dim=None, largest=True, sorted=True, *, out=None):
if dim is None:
dim = -1
return ivy.top_k(input, k, axis=dim, largest=largest, sorted=sorted, out=out)
ge = greater_equal
gt = greater
le = less_equal
lt = less
ne = not_equal
| ivy/ivy/functional/frontends/torch/comparison_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/comparison_ops.py",
"repo_id": "ivy",
"token_count": 4592
} | 44 |
# global
import ivy
import ivy.functional.frontends.torch as torch_frontend
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
# --- Helpers --- #
# --------------- #
def _apply_reduction(reduction, size_average, reduce, to_reduce):
if size_average is not None or reduce is not None:
reduction = _get_reduction_string(size_average, reduce)
return _get_reduction_method(reduction, to_reduce)
def _get_reduction(reduction, size_average=None, reduce=None):
if size_average is not None or reduce is not None:
return _get_reduction_func(_get_reduction_string(size_average, reduce))
else:
return _get_reduction_func(reduction)
def _get_reduction_func(reduction):
if reduction == "none":
def ret(x):
return x
elif reduction == "mean":
ret = ivy.mean
elif reduction == "sum":
ret = ivy.sum
else:
raise ivy.utils.exceptions.IvyException(
f"{reduction} is not a valid value for reduction"
)
return ret
def _get_reduction_method(reduction, to_reduce):
if reduction == "none":
ret = to_reduce
elif reduction == "mean":
ret = ivy.mean(to_reduce)
elif reduction == "sum":
ret = ivy.sum(to_reduce)
else:
raise ivy.utils.exceptions.IvyException(
f"{reduction} is not a valid value for reduction"
)
return ret
def _get_reduction_string(size_average, reduce):
if size_average is None:
size_average = True
if reduce is None:
reduce = True
if size_average and reduce:
ret = "mean"
elif reduce:
ret = "sum"
else:
ret = "none"
return ret
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def binary_cross_entropy(
input, target, weight=None, size_average=None, reduce=None, reduction="mean"
):
if size_average is not None or reduce is not None:
reduction = _get_reduction_string(size_average, reduce)
result = ivy.binary_cross_entropy(target, input, epsilon=0.0, reduction=reduction)
if weight is not None:
result = ivy.multiply(weight, result)
return result
@to_ivy_arrays_and_back
def binary_cross_entropy_with_logits(
input,
target,
weight=None,
size_average=None,
reduce=None,
reduction="mean",
pos_weight=None,
):
if size_average is not None or reduce is not None:
reduction = _get_reduction_string(size_average, reduce)
result = ivy.binary_cross_entropy(
target,
input,
reduction=reduction,
from_logits=True,
pos_weight=pos_weight,
)
if weight is not None:
result = ivy.multiply(weight, result)
return result
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def cosine_embedding_loss(
input1, input2, target, margin=0.0, size_average=None, reduce=None, reduction="mean"
):
def norm(input, axis):
return ivy.sqrt(ivy.sum(ivy.square(input), axis=axis))
def cosine_similarity(x1, x2):
axis = None
if len(x1.shape) == len(x2.shape) and len(x2.shape) == 2:
axis = 1
input1_norm = norm(x1, axis=axis)
input2_norm = norm(x2, axis=axis)
norm_mm = input1_norm * input2_norm
norm_mm, eps = torch_frontend.promote_types_of_torch_inputs(norm_mm, 1e-08)
return ivy.sum(x1 * x2, axis=axis) / ivy.maximum(norm_mm, eps)
def calculate_loss(x1, x2, target):
cos = cosine_similarity(x1, x2)
if target == ivy.array(1.0):
loss = 1.0 - cos
elif target == ivy.array(-1.0):
loss = ivy.maximum(ivy.array(0.0), cos - ivy.array(margin))
else:
_, zero = torch_frontend.promote_types_of_torch_inputs(
input1, ivy.array(0.0)
)
return zero
return loss
ivy.utils.assertions.check_true(
target.ndim + 1 == input1.ndim and target.ndim + 1 == input2.ndim,
f"{target.ndim}D target tensor expects {target.ndim + 1}D input tensors, but "
f"found inputs with sizes {list(input1.shape)} and {list(input2.shape)}.",
)
ivy.utils.assertions.check_true(
target.ndim < 2, "0D or 1D target tensor expected, multi-target not supported"
)
ivy.utils.assertions.check_shape(input1, input2)
if target.ndim == 1:
ivy.utils.assertions.check_true(
target.shape[0] == input1.shape[0],
f"The size of target tensor ({target.shape[0]}) must match the size of"
f" input tensor ({input1.shape[0]}) at non-singleton dimension 0 ",
)
if target.ndim == 0:
loss = calculate_loss(input1, input2, target)
else:
loss = ivy.array(
[
calculate_loss(input1[i], input2[i], target[i])
for i in range(input1.shape[0])
]
)
reduction = _get_reduction(reduction, size_average, reduce)
loss = reduction(loss)
return loss
def cosine_similarity(x1, x2):
axis = None
if len(x1.shape) == len(x2.shape) and len(x2.shape) == 2:
axis = 1
input1_norm = norm(x1, axis=axis)
input2_norm = norm(x2, axis=axis)
norm_mm = input1_norm * input2_norm
norm_mm, eps = torch_frontend.promote_types_of_torch_inputs(norm_mm, 1e-08)
return ivy.sum(x1 * x2, axis=axis) / ivy.maximum(norm_mm, eps)
@to_ivy_arrays_and_back
def cross_entropy(
input,
target,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction="mean",
label_smoothing=0.0,
):
loss = ivy.cross_entropy(target, input, epsilon=label_smoothing, reduction="none")
if ignore_index != -100:
mask = ivy.not_equal(target, ignore_index)
loss = ivy.where(mask, loss, ivy.zeros_like(loss))
if weight is not None:
result = ivy.multiply(weight, loss)
reduction = _get_reduction(reduction, size_average, reduce)
return reduction(result).astype(target.dtype)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("bool", "integer")}, "torch")
def gaussian_nll_loss(input, target, var, full=False, eps=1e-6, reduction="mean"):
input, target = torch_frontend.promote_types_of_torch_inputs(input, target)
target, var = torch_frontend.promote_types_of_torch_inputs(target, var)
if var.shape != input.shape:
if input.shape[:-1] == var.shape:
var = torch_frontend.unsqueeze(var, dim=2)
elif input.shape[:-1] == var.shape[:-1] and var.shape[-1] == 1:
pass
else:
raise ivy.utils.exceptions.IvyError("var is of incorrect size")
if reduction is not None and reduction != "mean" and reduction != "sum":
raise ivy.utils.exceptions.IvyError(f"{reduction} is not valid")
if ivy.any(var < 0):
raise ivy.utils.exceptions.IvyError("var has negative entry/entries")
var = ivy.maximum(var, eps)
loss = 0.5 * (ivy.log(var) + (input - target) ** 2 / var)
if full:
loss += 0.5 * ivy.log(2 * ivy.pi)
reduction = _get_reduction_func(reduction)
ret = reduction(loss)
return ret.astype(input.dtype)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
@to_ivy_arrays_and_back
def hinge_embedding_loss(
input,
target,
margin=1.0,
size_average=None,
reduce=None,
reduction="mean",
):
margin = ivy.array(margin)
loss = ivy.where(
ivy.logical_or(target == -1, target == 1),
ivy.where(target == 1, input, ivy.maximum(0, margin - input)),
ivy.maximum(margin, input),
)
reduction = _get_reduction(reduction, size_average, reduce)
ret = reduction(loss)
return ivy.astype(ret, input.dtype)
@to_ivy_arrays_and_back
def huber_loss(
input,
target,
reduction="mean",
delta=1.0,
):
return ivy.huber_loss(target, input, delta=delta, reduction=reduction)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.2 and below": ("float32", "float64")}, "torch")
def kl_div(
input, target, size_average=None, reduce=None, reduction="mean", log_target=False
):
orig_red = reduction
if size_average is not None or reduce is not None:
reduction = _get_reduction_string(size_average, reduce)
else:
reduction = reduction if reduction != "batchmean" else "sum"
ret = ivy.kl_div(input, target, reduction=reduction, log_target=log_target)
if orig_red == "batchmean" and input.ndim != 0:
ret = ret / input.shape[0]
return ret
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.2 and below": ("float", "complex")}, "torch")
def l1_loss(
input,
target,
size_average=None,
reduce=None,
reduction="mean",
):
if size_average is not None or reduce is not None:
reduction = _get_reduction_string(size_average, reduce)
ret = ivy.l1_loss(input, target, reduction=reduction)
return ret
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def margin_ranking_loss(
input1,
input2,
target,
margin=0.0,
size_average=None,
reduce=None,
reduction="mean",
):
input1, input2 = torch_frontend.promote_types_of_torch_inputs(input1, input2)
input2, target = torch_frontend.promote_types_of_torch_inputs(input2, target)
loss = -1 * target * (input1 - input2) + margin
loss = ivy.where(loss < 0, 0, loss)
reduction = _get_reduction(reduction, size_average, reduce)
return reduction(loss).astype(input1.dtype)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("bfloat16",)}, "torch")
def mse_loss(input, target, size_average=None, reduce=None, reduction="mean"):
reduction = _get_reduction(reduction, size_average, reduce)
result = ivy.square(input - target)
result = reduction(result)
return result
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def multilabel_margin_loss(
input, target, size_average=None, reduce=None, reduction="mean"
):
ivy.assertions.check_true(
input.shape == target.shape,
lambda: (
"Same shape is expected for both output and target, but instead got :"
f" output {input.shape} and target : {target.shape}"
),
)
input, target = torch_frontend.promote_types_of_torch_inputs(input, target)
pos = input[ivy.astype(target, bool)]
neg = input[ivy.astype(1 - target, bool)]
loss = ivy.maximum(0, 1 - (torch_frontend.unsqueeze(pos, dim=1) - neg))
reduct = _get_reduction(reduction, size_average, reduce)
return reduct(loss)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def multilabel_soft_margin_loss(
input,
target,
weight=None,
size_average=None,
reduce=None,
reduction="mean",
):
loss = -(
target * ivy.log(ivy.sigmoid(input))
+ (1 - target) * ivy.log(1 - ivy.sigmoid(input))
)
if weight is not None:
loss = ivy.multiply(weight, loss)
class_dim = ivy.get_num_dims(input) - 1
C = ivy.shape(input)[class_dim]
loss = ivy.sum(loss, axis=class_dim) / C
reduction = _get_reduction(reduction, size_average, reduce)
ret = reduction(loss)
return ret
@to_ivy_arrays_and_back
@with_unsupported_dtypes(
{"2.2 and below": ("float16", "int8", "int16", "int32")}, "torch"
)
def nll_loss(
input,
target,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction="mean",
):
out = ivy.zeros_like(target)
if len(input.shape) == 1:
for i in range(len(target)):
out[i] = input[target[i]]
else:
for i in range(len(target)):
out[i] = input[i][target[i]]
loss = -out
if weight is not None:
loss = ivy.multiply(weight, loss)
reduct = _get_reduction(reduction, size_average, reduce)
ret = reduct(loss)
return ret
def norm(input, axis):
return ivy.sqrt(ivy.sum(ivy.square(input), axis=axis))
def pairwise_distance(x1, x2, *, p=2.0, eps=1e-06, keepdim=False):
x1, x2 = torch_frontend.promote_types_of_torch_inputs(x1, x2)
x1_dim = len(x1.shape)
x2_dim = len(x2.shape)
if x1_dim > x2_dim:
output_dim = x1_dim
else:
output_dim = x2_dim
return ivy.vector_norm(x1 - x2 + eps, ord=p, axis=output_dim - 1, keepdims=keepdim)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def poisson_nll_loss(
input,
target,
log_input=True,
full=False,
size_average=None,
eps=1e-8,
reduce=None,
reduction="mean",
):
input, target = torch_frontend.promote_types_of_torch_inputs(input, target)
if log_input:
loss = ivy.exp(input) - target * input
else:
loss = input - target * ivy.log(input + eps)
if full:
approximation = (
target * ivy.log(target) - target + 0.5 * ivy.log(2 * ivy.pi * target)
)
loss += ivy.where(target > 1, approximation, 0)
reduction = _get_reduction(reduction, size_average, reduce)
return reduction(loss).astype(input.dtype)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def smooth_l1_loss(
input,
target,
size_average=None,
reduce=None,
reduction="mean",
beta=1.0,
):
return ivy.smooth_l1_loss(input, target, beta=beta, reduction=reduction)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def soft_margin_loss(
input,
target,
size_average=None,
reduce=None,
reduction="mean",
):
return ivy.soft_margin_loss(input, target, reduction=reduction)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def triplet_margin_loss(
anchor,
positive,
negative,
margin=1.0,
p=2.0,
eps=1e-06,
swap=False,
size_average=None,
reduce=None,
reduction="mean",
):
def pairwise_distance(x1, x2, *, p=2.0, eps=1e-06, keepdim=False):
x1, x2 = torch_frontend.promote_types_of_torch_inputs(x1, x2)
x1_dim = len(x1.shape)
x2_dim = len(x2.shape)
if x1_dim > x2_dim:
output_dim = x1_dim
else:
output_dim = x2_dim
return ivy.vector_norm(
x1 - x2 + eps, ord=p, axis=output_dim - 1, keepdims=keepdim
)
reduction = _get_reduction(reduction, size_average, reduce)
a_dim = anchor.ndim
p_dim = positive.ndim
n_dim = negative.ndim
ivy.assertions.check_true(
a_dim == p_dim and p_dim == n_dim,
lambda: (
"The anchor, positive, and negative tensors are expected to have "
f"the same number of dimensions, but got: anchor {a_dim}D, "
f"positive {p_dim}D, and negative {n_dim}D inputs"
),
)
dist_positive = pairwise_distance(anchor, positive, p=p, eps=eps)
dist_negative = pairwise_distance(anchor, negative, p=p, eps=eps)
if swap:
dist_swap = pairwise_distance(positive, negative, p=p, eps=eps)
dist_negative = ivy.minimum(dist_negative, dist_swap)
loss = ivy.maximum(
dist_positive - dist_negative + ivy.array(margin), ivy.array(0.0)
)
loss = reduction(loss).astype(anchor.dtype)
return loss
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch")
def triplet_margin_with_distance_loss(
anchor,
positive,
negative,
distance_function=None,
margin=1.0,
swap=False,
reduction="mean",
):
reduction = _get_reduction(reduction)
a_dim = anchor.ndim
p_dim = positive.ndim
n_dim = negative.ndim
ivy.assertions.check_true(
a_dim == p_dim and p_dim == n_dim,
lambda: (
"The anchor, positive, and negative tensors are expected to have "
f"the same number of dimensions, but got: anchor {a_dim}D, "
f"positive {p_dim}D, and negative {n_dim}D inputs"
),
)
if distance_function is None:
distance_function = pairwise_distance
dist_pos = distance_function(anchor, positive)
dist_neg = distance_function(anchor, negative)
if swap:
dist_swap = distance_function(positive, negative)
dist_neg = ivy.minimum(dist_neg, dist_swap)
loss = ivy.maximum(dist_pos - dist_neg + ivy.array(margin), ivy.array(0.0))
return reduction(loss).astype(anchor.dtype)
| ivy/ivy/functional/frontends/torch/nn/functional/loss_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/functional/loss_functions.py",
"repo_id": "ivy",
"token_count": 7426
} | 45 |
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def broadcast_tensors(*tensors):
return ivy.broadcast_arrays(*tensors)
@to_ivy_arrays_and_back
def is_complex(input):
return ivy.is_complex_dtype(input)
@to_ivy_arrays_and_back
def is_floating_point(input):
return ivy.is_float_dtype(input)
@to_ivy_arrays_and_back
def is_nonzero(input):
return ivy.nonzero(input)[0].size != 0
@to_ivy_arrays_and_back
def is_tensor(obj):
return ivy.is_array(obj)
@to_ivy_arrays_and_back
def numel(input):
return ivy.astype(ivy.array(input.size), ivy.int64)
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter(input, dim, index, src):
return ivy.put_along_axis(input, index, src, dim, mode="replace")
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_add(input, dim, index, src):
return ivy.put_along_axis(input, index, src, dim, mode="sum")
@to_ivy_arrays_and_back
@with_supported_dtypes(
{"2.2 and below": ("float32", "float64", "int32", "int64")}, "torch"
)
def scatter_reduce(input, dim, index, src, reduce, *, include_self=True):
mode_mappings = {
"sum": "sum",
"amin": "min",
"amax": "max",
"prod": "mul",
"replace": "replace",
}
reduce = mode_mappings.get(reduce, reduce)
return ivy.put_along_axis(input, index, src, dim, mode=reduce)
| ivy/ivy/functional/frontends/torch/tensor_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/tensor_functions.py",
"repo_id": "ivy",
"token_count": 707
} | 46 |
"""Collection of Ivy activation functions."""
from typing import Union, Optional, Callable, Literal
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
handle_out_argument,
to_native_arrays_and_back,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_complex_input,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
def _gelu_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original: Optional[Callable] = None,
approximate: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
# We don't have the exact implementation
# cuz the erf function doesn't work on complex numbers
return fn_original(x, approximate=True, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def gelu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
approximate: bool = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the Gaussian error linear unit (GELU) activation function.
Parameters
----------
x
Input array.
approximate
Whether to approximate, default is ``True``. An approximation is always used if
the input array is complex.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with gelu applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.2, -0.6, 1.5])
>>> y = ivy.gelu(x)
>>> y
ivy.array([-0.138, -0.165, 1.4])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([-1.3, 3.8, 2.1])
>>> y = ivy.gelu(x)
>>> y
ivy.array([-0.126, 3.8, 2.06])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1., 2.]), b=ivy.array([-0.9, -1.]))
>>> y = ivy.gelu(x)
>>> y
{
a: ivy.array([0.841, 1.95]),
b: ivy.array([-0.166, -0.159])
}
"""
return current_backend(x).gelu(x, approximate=approximate, out=out)
gelu.jax_like = _gelu_jax_like
def _leaky_relu_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original: Optional[Callable] = None,
alpha: float = 0.2,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.where(
(
ivy.logical_or(
ivy.real(x) < 0, ivy.logical_and(ivy.real(x) == 0, ivy.imag(x) < 0)
)
),
ivy.astype(x * alpha, x.dtype),
x,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def leaky_relu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
alpha: float = 0.2,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the leaky rectified linear unit function element-wise.
If the input is complex, then by default each element is scaled by `alpha` if
either its real part is strictly negative or if its real part is zero and its
imaginary part is negative. This behaviour can be changed by specifying a different
`complex_mode`.
Parameters
----------
x
Input array.
alpha
Negative slope for ReLU.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with leaky relu applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0.39, -0.85])
>>> y = ivy.leaky_relu(x)
>>> print(y)
ivy.array([ 0.39, -0.17])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> y = ivy.zeros(3)
>>> ivy.leaky_relu(x, out=y)
>>> print(y)
ivy.array([ 1.5 , 0.7 , -0.48])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [-4.4, -5.5, -6.6]])
>>> ivy.leaky_relu(x, out=x)
>>> print(x)
ivy.array([[ 1.1 , 2.2 , 3.3 ],
[-0.88, -1.1 , -1.32]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> x = ivy.leaky_relu(x, out=x)
>>> print(x)
{
a: ivy.array([0., -0.24000001]),
b: ivy.array([0.40000001, -0.04])
}
"""
return current_backend(x).leaky_relu(x, alpha=alpha, out=out)
leaky_relu.jax_like = _leaky_relu_jax_like
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def log_softmax(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the log_softmax function element-wise.
Parameters
----------
x
Input array.
axis
The dimension log_softmax would be performed on. The default is ``None``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The output array with log_softmax applied element-wise to input.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, -0.98])
>>> y = ivy.log_softmax(x)
>>> print(y)
ivy.array([-0.703, -0.683])
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.log_softmax(x)
>>> print(y)
ivy.array([-2.41, -1.41, -0.408])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([1.5, 0.5, 1.0])
>>> y = ivy.log_softmax(x)
>>> print(y)
ivy.array([-0.68, -1.68, -1.18])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.5, 0.5, 1.0]))
>>> y = ivy.log_softmax(x)
>>> print(y)
{
a: ivy.array([-0.68, -1.68, -1.18])
}
>>> x = ivy.Container(a=ivy.array([1.0, 2.0]), b=ivy.array([0.4, -0.2]))
>>> y = ivy.log_softmax(x)
>>> print(y)
{
a: ivy.array([-1.31, -0.313]),
b: ivy.array([-0.437, -1.04])
}
"""
return current_backend(x).log_softmax(x, axis=axis, out=out)
def _relu_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original=None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
return ivy.where(
(
ivy.logical_or(
ivy.real(x) < 0, ivy.logical_and(ivy.real(x) == 0, ivy.imag(x) < 0)
)
),
ivy.array(0.0, dtype=x.dtype),
x,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def relu(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the rectified linear unit function element-wise.
If the input is complex, then by default each element is set to zero if
either its real part is strictly negative or if its real part is zero and its
imaginary part is negative. This behaviour can be changed by specifying a different
`complex_mode`.
Parameters
----------
x
input array
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the rectified linear unit activation of each element in
``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1., 0., 1.])
>>> y = ivy.relu(x)
>>> print(y)
ivy.array([0., 0., 1.])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> y = ivy.zeros(3)
>>> ivy.relu(x, out = y)
>>> print(y)
ivy.array([1.5, 0.7, 0.])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> x = ivy.relu(x, out=x)
>>> print(x)
{
a: ivy.array([1., 0.]),
b: ivy.array([0.40000001, 0.])
}
"""
return current_backend(x).relu(x, out=out)
relu.jax_like = _relu_jax_like
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def sigmoid(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the sigmoid function element-wise.
Parameters
----------
x
input array.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
input broadcast to.
default: None
Returns
-------
ret
an array containing the sigmoid activation of each element in ``x``.
sigmoid activation of x is defined as 1/(1+exp(-x)).
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.sigmoid(x)
>>> print(y)
ivy.array([0.2689414 , 0.7310586 , 0.88079703])
>>> x = ivy.array([-1.0, 1.0, 2.0])
>>> y = ivy.zeros(3)
>>> ivy.sigmoid(x, out=y)
>>> print(y)
ivy.array([0.2689414 , 0.7310586 , 0.88079703])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.]),
... b=ivy.Container(c=ivy.array([1.]),
... d=ivy.array([2.])))
>>> y = ivy.sigmoid(x)
>>> print(y)
{
a: ivy.array([0.5]),
b: {
c: ivy.array([0.7310586]),
d: ivy.array([0.88079703])
}
}
>>> x = ivy.Container(a=ivy.array([0.]),
... b=ivy.Container(c=ivy.array([1.]),
... d=ivy.array([2.])))
>>> y = ivy.Container(a=ivy.array([0.]),
... b=ivy.Container(c=ivy.array([0.]),
... d=ivy.array([0.])))
>>> ivy.sigmoid(x, out=y)
>>> print(y)
{
a: ivy.array([0.5]),
b: {
c: ivy.array([0.7310586]),
d: ivy.array([0.88079703])
}
}
"""
return current_backend(x).sigmoid(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def softmax(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: Optional[int] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the softmax function element-wise.
Parameters
----------
x
Input array.
axis
The dimension softmax would be performed on. The default is ``None``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with softmax applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1.0, 0, 1.0])
>>> y = ivy.softmax(x)
>>> print(y)
ivy.array([0.422, 0.155, 0.422])
>>> x = ivy.array([[1.1, 2.2, 3.3],
... [4.4, 5.5, 6.6]])
>>> y = ivy.softmax(x, axis = 1)
>>> print(y)
ivy.array([[0.0768, 0.231 , 0.693 ],
[0.0768, 0.231 , 0.693 ]])
"""
return current_backend(x).softmax(x, axis=axis, out=out)
def _wrap_between(y, a):
"""Wrap y between [-a, a]"""
a = ivy.array(a, dtype=y.dtype)
a2 = ivy.array(2 * a, dtype=y.dtype)
zero = ivy.array(0, dtype=y.dtype)
rem = ivy.remainder(ivy.add(y, a), a2)
rem = ivy.where(rem < zero, rem + a2, rem) - a
return rem
def _softplus_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original=None,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
out: Optional[ivy.Array] = None,
):
if beta is not None:
x_beta = ivy.multiply(x, ivy.array(beta, dtype=x.dtype))
else:
x_beta = x
amax = ivy.relu(x_beta)
res = ivy.subtract(x_beta, ivy.multiply(amax, ivy.array(2, dtype=x.dtype)))
res = ivy.add(amax, ivy.log(ivy.add(1, ivy.exp(res))))
res = ivy.real(res) + _wrap_between(ivy.imag(res), ivy.pi).astype(
x.dtype
) * ivy.astype(1j, x.dtype)
if beta is not None:
res = ivy.divide(res, ivy.array(beta, dtype=x.dtype))
if threshold is not None:
res = ivy.where(
ivy.real(x_beta) < threshold,
res,
x,
).astype(x.dtype)
return res
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def softplus(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the softplus function element-wise.
If the input is complex, then by default we apply the softplus operation
`log(1+ exp(x))` to each element
If threshold is set we check if either its real part is strictly negative or
if its real part is zero and its imaginary part is negative then we apply
`input×β > threshold`.
Parameters
----------
x
input array.
beta
The beta value for the softplus formation. Default: ``None``.
threshold
values above this revert to a linear function
If the input is complex, only its real part is considered. Default: ``None``
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the softplus activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-0.3461, -0.6491])
>>> y = ivy.softplus(x)
>>> print(y)
ivy.array([0.535,0.42])
>>> x = ivy.array([-0.3461, -0.6491])
>>> y = ivy.softplus(x, beta=0.5)
>>> print(y)
ivy.array([1.22, 1.09])
>>> x = ivy.array([1., 2., 3.])
>>> y = ivy.softplus(x, threshold=2)
>>> print(y)
ivy.array([1.31, 2.13, 3. ])
"""
return current_backend(x).softplus(x, beta=beta, threshold=threshold, out=out)
softplus.jax_like = _softplus_jax_like
# Softsign
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def softsign(
x: Union[ivy.Array, ivy.NativeArray],
/,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the softsign function element-wise.
Parameters
----------
x
Input array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The input array with softsign applied element-wise.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.softsign(x)
>>> print(y)
ivy.array([0.5, 0.66666667, 0.75])
"""
return current_backend(x).softsign(x, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
@handle_complex_input
def mish(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the mish activation function element-wise.
Parameters
----------
x
input array
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the mish activation of each element in
``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([-1., 0., 1.])
>>> y = ivy.mish(x)
>>> print(y)
ivy.array([-0.30340147, 0. , 0.86509842])
>>> x = ivy.array([1.5, 0.7, -2.4])
>>> y = ivy.zeros(3)
>>> ivy.mish(x, out = y)
>>> print(y)
ivy.array([ 1.40337825, 0.56114835, -0.20788449])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([1.0, -1.2]), b=ivy.array([0.4, -0.2]))
>>> x = ivy.mish(x)
>>> print(x)
{
a: ivy.array([0.86509842, -0.30883577]),
b: ivy.array([0.28903052, -0.10714479])
}
"""
return current_backend(x).mish(x, out=out)
def _hardswish_jax_like(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
fn_original=None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
def hard_sigmoid(x):
return ivy.relu6(x + 3.0) / 6
return ivy.multiply(x, hard_sigmoid(x).astype(x.dtype))
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_complex_input
def hardswish(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply the hardswish activation function element-wise.
Parameters
----------
x
input array
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the hardswish activation of each element in ``x``.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([0., 0., 4.])
>>> y = ivy.hardswish(x)
>>> y
ivy.array([0., 0., 4.])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-3., 4., 5.]), b=ivy.array([0., 5.]))
>>> x = ivy.hardswish(x, out=x)
>>> x
{
a: ivy.array([-0., 4., 5.]),
b: ivy.array([0., 5.])
}
"""
return current_backend(x).hardswish(x, out=out)
hardswish.jax_like = _hardswish_jax_like
| ivy/ivy/functional/ivy/activations.py/0 | {
"file_path": "ivy/ivy/functional/ivy/activations.py",
"repo_id": "ivy",
"token_count": 9292
} | 47 |
# global
import math
import itertools
from typing import Optional, Union, Tuple, List, Literal, Sequence, Callable
from functools import reduce as _reduce
import builtins
# local
import ivy
from ivy.func_wrapper import (
handle_array_like_without_promotion,
handle_out_argument,
to_native_arrays_and_back,
handle_nestable,
handle_partial_mixed_function,
inputs_to_ivy_arrays,
handle_array_function,
handle_device,
handle_backend_invalid,
)
from ivy.functional.ivy.experimental.general import _correct_ivy_callable
from ivy.utils.exceptions import handle_exceptions
_min = builtins.min
_slice = builtins.slice
_max = builtins.max
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def max_pool1d(
x: Union[ivy.Array, ivy.NativeArray],
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
dilation: Union[int, Tuple[int]] = 1,
ceil_mode: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D max pool given 3-D input x.
Parameters
----------
x
Input image *[batch_size, w, d_in]* if data_format is "NWC".
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm; int, or list of tuple
indicating the per-dimension paddings. (e.g. 2, [(1, 0)])
data_format
"NWC" or "NCW". Defaults to "NWC".
dilation
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element in 'x' is covered by a sliding window.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Both the description and the type hints above assumes an array input
for simplicity, but this function is *nestable*, and therefore
also accepts :class:`ivy.Container` instances in place of any of
the arguments.
Examples
--------
>>> x = ivy.arange(0, 24.).reshape((2, 3, 4))
>>> print(ivy.max_pool1d(x, 2, 2, 'SAME'))
ivy.array([[[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]],
[[16., 17., 18., 19.],
[20., 21., 22., 23.]]])
>>> x = ivy.arange(0, 24.).reshape((2, 3, 4))
>>> print(ivy.max_pool1d(x, 2, 2, 'VALID'))
ivy.array([[[ 4., 5., 6., 7.]],
[[16., 17., 18., 19.]]])
>>> x = ivy.arange(0, 24.).reshape((2, 3, 4))
>>> print(ivy.max_pool1d(x, 2, 2, [(1,0)], data_format="NCW", dilation=1, ceil_mode=True))
ivy.array([[[ 0., 2., 3.],
[ 4., 6., 7.],
[ 8., 10., 11.]],
[[12., 14., 15.],
[16., 18., 19.],
[20., 22., 23.]]])
""" # noqa: E501
return ivy.current_backend(x).max_pool1d(
x,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def max_pool2d(
x: Union[ivy.Array, ivy.NativeArray],
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 2-D max pool given 4-D input x.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
dilation
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element in 'x' is covered by a sliding window.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Both the description and the type hints above assumes an array input
for simplicity, but this function is *nestable*, and therefore
also accepts :class:`ivy.Container` instances in place of any of
the arguments.
Examples
--------
>>> x = ivy.arange(12.).reshape((2, 1, 3, 2))
>>> print(ivy.max_pool2d(x, (2, 2), (1, 1), 'SAME'))
ivy.array([[[[ 2., 3.],
[ 4., 5.],
[ 4., 5.]]],
[[[ 8., 9.],
[10., 11.],
[10., 11.]]]])
>>> x = ivy.arange(48.).reshape((2, 4, 3, 2))
>>> print(ivy.max_pool2d(x, 3, 1, 'VALID'))
ivy.array([[[[16., 17.]],
[[22., 23.]]],
[[[40., 41.]],
[[46., 47.]]]])
"""
return ivy.current_backend(x).max_pool2d(
x,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def max_pool3d(
x: Union[ivy.Array, ivy.NativeArray],
kernel: Union[int, Tuple[int, ...]],
strides: Union[int, Tuple[int, ...]],
padding: Union[str, int, Tuple[int], List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
dilation: Union[int, Tuple[int, ...]] = 1,
ceil_mode: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 3-D max pool given 5-D input x.
Parameters
----------
x
Input tensor *[batch_size,d,h,w,d_in]* if data_format is "NDHWC".
kernel
Convolution filters *[d,h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
"SAME" or "VALID" indicating the algorithm; int, or list of tuple
indicating the per-dimension paddings. (e.g. 2, [(1, 0), (0, 1), (1, 1)])
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
dilation
The stride between elements within a sliding window, must be > 0.
ceil_mode
If True, ceil is used instead of floor to compute the output shape.
This ensures that every element in 'x' is covered by a sliding window.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Both the description and the type hints above assumes an array input
for simplicity, but this function is *nestable*, and therefore
also accepts :class:`ivy.Container` instances in place of any of
the arguments.
Examples
--------
>>> x = ivy.arange(48.).reshape((2, 3, 2, 2, 2))
>>> print(ivy.max_pool3d(x, 2, 2, 'VALID'))
ivy.array([[[[[14., 15.]]]],
[[[[38., 39.]]]]])
>>> print(ivy.max_pool3d(x, 2, 2, 'SAME'))
ivy.array([[[[[14., 15.]]],
[[[22., 23.]]]],
[[[[38., 39.]]],
[[[46., 47.]]]]])
"""
return ivy.current_backend(x).max_pool3d(
x,
kernel,
strides,
padding,
data_format=data_format,
dilation=dilation,
ceil_mode=ceil_mode,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def avg_pool1d(
x: Union[ivy.Array, ivy.NativeArray],
kernel: Union[int, Tuple[int]],
strides: Union[int, Tuple[int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D avg pool given 3-D input x.
Parameters
----------
x
Input image *[batch_size, w, d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NWC" or "NCW". Defaults to "NWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as the divisor,
otherwise kernel_size will be used.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Both the description and the type hints above assumes an array input
for simplicity, but this function is *nestable*, and therefore
also accepts :class:`ivy.Container` instances in place of any of
the arguments.
Examples
--------
>>> x = ivy.arange(0, 24.).reshape((2, 3, 4))
>>> print(ivy.avg_pool1d(x, 2, 2, 'SAME'))
ivy.array([[[ 2., 3., 4., 5.],
[ 8., 9., 10., 11.]],
[[14., 15., 16., 17.],
[20., 21., 22., 23.]]])
>>> x = ivy.arange(0, 24.).reshape((2, 3, 4))
>>> print(ivy.avg_pool1d(x, 2, 2, 'VALID'))
ivy.array([[[ 2., 3., 4., 5.]],
[[14., 15., 16., 17.]]])
"""
return ivy.current_backend(x).avg_pool1d(
x,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def avg_pool2d(
x: Union[ivy.Array, ivy.NativeArray],
kernel: Union[int, Tuple[int], Tuple[int, int]],
strides: Union[int, Tuple[int], Tuple[int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 2-D average pool given 4-D input x.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]*.
kernel
Size of the kernel i.e., the sliding window for each
dimension of input. *[h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NHWC" or "NCHW". Defaults to "NHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
out
optional output array, for writing the result to.
Returns
-------
ret
The result of the pooling operation.
Both the description and the type hints above assumes an array input
for simplicity, but this function is *nestable*, and therefore
also accepts :class:`ivy.Container` instances in place of any of
the arguments.
Examples
--------
>>> x = ivy.arange(12.).reshape((2, 1, 3, 2))
>>> print(ivy.avg_pool2d(x, (2, 2), (1, 1), 'SAME'))
ivy.array([[[[ 1., 2.],
[ 3., 4.],
[ 4., 5.]]],
[[[ 7., 8.],
[ 9., 10.],
[10., 11.]]]])
>>> x = ivy.arange(48.).reshape((2, 4, 3, 2))
>>> print(ivy.avg_pool2d(x, 3, 1, 'VALID'))
ivy.array([[[[ 8., 9.]],
[[14., 15.]]],
[[[32., 33.]],
[[38., 39.]]]])
"""
return ivy.current_backend(x).avg_pool2d(
x,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def avg_pool3d(
x: Union[ivy.Array, ivy.NativeArray],
kernel: Union[int, Tuple[int], Tuple[int, int, int]],
strides: Union[int, Tuple[int], Tuple[int, int, int]],
padding: Union[str, int, List[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
count_include_pad: bool = False,
ceil_mode: bool = False,
divisor_override: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 3-D avg pool given 5-D input x.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]*.
kernel
Convolution filters *[d,h,w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list indicating the per-dimension
paddings.
data_format
NDHWC" or "NCDHW". Defaults to "NDHWC".
count_include_pad
Whether to include padding in the averaging calculation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
divisor_override
If specified, it will be used as divisor, otherwise kernel_size will be used.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Both the description and the type hints above assumes an array input
for simplicity, but this function is *nestable*, and therefore
also accepts :class:`ivy.Container` instances in place of any of
the arguments.
Examples
--------
>>> x = ivy.arange(48.).reshape((2, 3, 2, 2, 2))
>>> print(ivy.avg_pool3d(x,2,2,'VALID'))
ivy.array([[[[[ 7., 8.]]]],
[[[[31., 32.]]]]])
>>> print(ivy.avg_pool3d(x,2,2,'SAME'))
ivy.array([[[[[ 7., 8.]]],
[[[19., 20.]]]],
[[[[31., 32.]]],
[[[43., 44.]]]]])
"""
return ivy.current_backend(x).avg_pool3d(
x,
kernel,
strides,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
def pool(
x: Union[ivy.Array, ivy.NativeArray],
window_shape: Union[int, Tuple[int], Tuple[int, int]],
pool_type: str,
/,
*,
strides: Optional[Union[int, Tuple[int], Tuple[int, int]]] = None,
padding: str = "VALID",
data_format: Optional[str] = None,
dilations: Optional[Union[int, Tuple[int], Tuple[int, int]]] = None,
ceil_mode: bool = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Perform an N-D pooling operation.
Parameters
----------
x
Input array to pool over.
window_shape
Shape of the pooling window.
pool_type
Type of pooling operation, either 'MAX' or 'AVG'.
strides
Strides of the pooling operation.
padding
Padding type, either 'VALID' or 'SAME'.
data_format
Data format of the input and output data, either 'NCHW' or 'NHWC'.
dilations
Dilation rate of the pooling operation.
ceil_mode
Whether to use ceil or floor for creating the output shape.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the pooling operation.
Examples
--------
>>> x = ivy.arange(12.).reshape((2, 1, 3, 2))
>>> print(ivy.pool(x, (2, 2), 'MAX', strides=(1, 1), padding='SAME'))
ivy.array([[[[ 1., 2.],
[ 3., 4.],
[ 4., 5.]]],
[[[ 7., 8.],
[ 9., 10.],
[10., 11.]]]])
>>> x = ivy.arange(48.).reshape((2, 4, 3, 2))
>>> print(ivy.pool(x, 3, 'AVG', strides=1, padding='VALID'))
ivy.array([[[[ 8., 9.]],
[[14., 15.]]],
[[[32., 33.]],
[[38., 39.]]]])
"""
return ivy.current_backend(x).pool(
x,
window_shape,
pool_type,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations,
ceil_mode=ceil_mode,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def dct(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Compute the 1D Discrete Cosine Transformation of a given signal.
Parameters
----------
x
The input signal.
type
The type of the dct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger then x is zero-padded.
axis
The axis to compute the DCT along.
norm
The type of normalization to be applied. Must be either None or "ortho".
out
optional output array, for writing the result to.
Returns
-------
ret
Array containing the transformed input.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([8, 16, 24, 32, 40, 48, 56, 64])
>>> y = ivy.dct(x, type=2, n=None, norm='ortho')
>>> print(y)
ivy.array([ 1.01823380e+02, -5.15385818e+01, 1.36371466e-06, -5.38763905e+00,
0.00000000e+00, -1.60722279e+00, -8.80319249e-08, -4.05617893e-01])
>>> x = ivy.array([[[8, 16, 24, 32], [40, 48, 56, 64]],
... [[1, 2, 3, 4], [ 5, 6, 7, 8]]])
>>> y = ivy.dct(x, type=1, n=None, axis=0, norm=None)
>>> print(y)
ivy.array([[[ 9., 18., 27., 36.],
[45., 54., 63., 72.]],
[[ 7., 14., 21., 28.],
[35., 42., 49., 56.]]])
>>> x = ivy.array([[ 8.1, 16.2, 24.3, 32.4],
... [40.5, 48.6, 56.7, 64.8]])
>>> y = ivy.zeros((2, 4), dtype=ivy.float32)
>>> ivy.dct(x, type=1, n=None, norm=None, out=y)
>>> print(y)
ivy.array([[ 1.21500000e+02, -3.24000015e+01, 1.90734863e-06,
-8.10000420e+00],
[ 3.15899994e+02, -3.24000053e+01, 3.81469727e-06,
-8.09999847e+00]])
>>> x = ivy.array([8., 16., 24., 32., 40., 48., 56., 64.])
>>> ivy.dct(x, type=4, n=None, norm=None, out=x)
>>> print(x)
ivy.array([ 279.4135742 , -279.6779785 , 128.3770599 , -114.8719864 ,
83.72109985, -79.52869415, 69.79182434, -68.72489166])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> y = ivy.dct(x, type=3, n=None, norm='ortho')
>>> print(y)
{
a: ivy.array([79.49862671, -70.37691498, 30.00390816, -23.58938599,
13.92713165, -10.078475, 5.19664812, -1.95411837]),
b: ivy.array([9.93732834, -8.79711437, 3.75048852, -2.94867325, 1.74089146,
-1.25980937, 0.64958102, -0.2442648])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> container_n = ivy.Container(a=9, b=4)
>>> container_type = ivy.Container(a=2, b=1)
>>> container_norm = ivy.Container(a="ortho", b=None)
>>> y = ivy.dct(x, type=container_type, n=container_n, norm=container_norm)
>>> print(y)
{
a: ivy.array([96., -28.1580677, -31.89422607, 22.86190414,
-26.00041008, 19.75149155, -16.97056389, 10.87819386,
-5.89381361]),
b: ivy.array([1.50000000e+01, -4.00000000e+00, -2.22044605e-16,
-1.00000000e+00])
}
"""
return ivy.current_backend(x).dct(x, type=type, n=n, axis=axis, norm=norm, out=out)
@handle_exceptions
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
def idct(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
type: Literal[1, 2, 3, 4] = 2,
n: Optional[int] = None,
axis: int = -1,
norm: Optional[Literal["ortho"]] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Compute the 1D Inverse Discrete Cosine Transformation of a given signal.
Parameters
----------
x
The input signal.
type
The type of the idct. Must be 1, 2, 3 or 4.
n
The length of the transform. If n is less than the input signal length,
then x is truncated, if n is larger then x is zero-padded.
axis
The axis to compute the IDCT along.
norm
The type of normalization to be applied. Must be either None or "ortho".
out
optional output array, for writing the result to.
Returns
-------
ret
Array containing the transformed input.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([8, 16, 24, 32, 40, 48, 56, 64])
>>> y = ivy.idct(x, type=2, n=None, norm='ortho')
>>> print(y)
ivy.array([ 79.49862671, -70.37691498, 30.00390816, -23.58938599,
13.92713165, -10.078475 , 5.19664812, -1.95411837])
>>> x = ivy.array([[[8, 16, 24, 32], [40, 48, 56, 64]],
... [[1, 2, 3, 4], [ 5, 6, 7, 8]]])
>>> y = ivy.idct(x, type=1, n=None, axis=0, norm=None)
>>> print(y)
ivy.array([[[ 9., 18., 27., 36.],
[45., 54., 63., 72.]],
[[ 7., 14., 21., 28.],
[35., 42., 49., 56.]]])
>>> x = ivy.array([[ 8.1, 16.2, 24.3, 32.4],
... [40.5, 48.6, 56.7, 64.8]])
>>> y = ivy.zeros((2, 4), dtype=ivy.float32)
>>> ivy.idct(x, type=1, n=None, norm=None, out=y)
>>> print(y)
ivy.array([[ 1.21500000e+02, -3.24000015e+01, 1.90734863e-06,
-8.10000420e+00],
[ 3.15899994e+02, -3.24000053e+01, 3.81469727e-06,
-8.09999847e+00]])
>>> x = ivy.array([8., 16., 24., 32., 40., 48., 56., 64.])
>>> ivy.idct(x, type=4, n=None, norm=None, out=x)
>>> print(x)
ivy.array([279.4135742, -279.6779785, 128.3770599, -114.8719864,
83.72109985, -79.52869415, 69.79182434, -68.72489166])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> y = ivy.idct(x, type=3, n=None, norm='ortho')
>>> print(y)
{
a: ivy.array([1.01823380e+02, -5.15385818e+01, 1.36371466e-06, -5.38763905e+00,
0.00000000e+00, -1.60722279e+00, -8.80319249e-08,
-4.05617893e-01]),
b: ivy.array([1.27279224e+01, -6.44232273e+00, 1.70464332e-07, -6.73454881e-01,
0.00000000e+00, -2.00902849e-01, -1.10039906e-08,
-5.07022366e-02])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([8, 16, 24, 32, 40, 48, 56, 64]),
... b=ivy.array([1, 2, 3, 4, 5, 6, 7, 8]))
>>> container_n = ivy.Container(a=9, b=4)
>>> container_type = ivy.Container(a=2, b=1)
>>> container_norm = ivy.Container(a="ortho", b=None)
>>> y = ivy.idct(x, type=container_type, n=container_n, norm=container_norm)
>>> print(y)
{
a: ivy.array([86.29723358, -66.69506073, 9.93914604, 2.88008881,
-16.18951607, 18.06697273, -17.57439613, 11.68861485,
-4.41308832]),
b: ivy.array([1.50000000e+01, -4.00000000e+00, -2.22044605e-16,
-1.00000000e+00])
}
"""
return ivy.current_backend(x).idct(x, type=type, n=n, axis=axis, norm=norm, out=out)
idct.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_device",
),
"to_skip": (),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def fft(
x: Union[ivy.Array, ivy.NativeArray],
dim: int,
/,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
r"""Compute the one dimensional discrete Fourier transform given input at
least 1-D input x.
Parameters
----------
x
Input volume *[...,d_in,...]*,
where d_in indicates the dimension that needs FFT.
dim
The dimension along which to take the one dimensional FFT.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be "backward".
"backward" indicates no normalization.
"ortho" indicates normalization by $\frac{1}{\sqrt{n}}$.
"forward" indicates normalization by $\frac{1}{n}$.
n
Optional argument indicating the sequence length, if given, the input would be
padded with zero or truncated to length n before performing FFT.
Should be a integer greater than 1.
out
Optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the FFT operation.
Examples
--------
>>> ivy.fft(np.exp(2j * np.pi * np.arange(8) / 8), 0)
ivy.array([-3.44509285e-16+1.14423775e-17j, 8.00000000e+00-8.11483250e-16j,
2.33486982e-16+1.22464680e-16j, 0.00000000e+00+1.22464680e-16j,
9.95799250e-17+2.33486982e-16j, 0.00000000e+00+7.66951701e-17j,
1.14423775e-17+1.22464680e-16j, 0.00000000e+00+1.22464680e-16j])
>>> ivy.fft(np.exp(2j * np.pi * np.arange(8) / 8), 0, n=16)
ivy.array([-3.44509285e-16+1.14423775e-17j, 1.00000000e+00+5.02733949e+00j,
8.00000000e+00-8.11483250e-16j, 1.00000000e+00-5.02733949e+00j,
2.33486982e-16+1.22464680e-16j, 1.00000000e+00-1.49660576e+00j,
0.00000000e+00+1.22464680e-16j, 1.00000000e+00-6.68178638e-01j,
9.95799250e-17+2.33486982e-16j, 1.00000000e+00-1.98912367e-01j,
0.00000000e+00+7.66951701e-17j, 1.00000000e+00+1.98912367e-01j,
1.14423775e-17+1.22464680e-16j, 1.00000000e+00+6.68178638e-01j,
0.00000000e+00+1.22464680e-16j, 1.00000000e+00+1.49660576e+00j])
>>> ivy.fft(np.exp(2j * np.pi * np.arange(8) / 8), 0, norm="ortho")
ivy.array([-1.21802426e-16+4.04549134e-18j, 2.82842712e+00-2.86902654e-16j,
8.25501143e-17+4.32978028e-17j, 0.00000000e+00+4.32978028e-17j,
3.52068201e-17+8.25501143e-17j, 0.00000000e+00+2.71158374e-17j,
4.04549134e-18+4.32978028e-17j, 0.00000000e+00+4.32978028e-17j])
"""
return ivy.current_backend(x).fft(x, dim, norm=norm, n=n, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def dropout1d(
x: Union[ivy.Array, ivy.NativeArray],
prob: float,
/,
*,
training: bool = True,
data_format: str = "NWC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Randomly zero out entire channels with probability prob using samples
from a Bernoulli distribution and the remaining channels are scaled by
(1/1-prob). In this case, dropout1d performs a channel-wise dropout but
assumes a channel is a 1D feature map.
Parameters
----------
x
a 2D or 3D input array. Should have a floating-point data type.
prob
probability of a channel to be zero-ed.
training
controls whether dropout1d is performed during training or ignored
during testing.
data_format
"NWC" or "NCW". Defaults to "NWC".
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array with some channels zero-ed and the rest of channels are
scaled by (1/1-prob).
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 1, 1]).reshape([1, 1, 3])
>>> y = ivy.dropout1d(x, 0.5)
>>> print(y)
ivy.array([[[2., 0, 2.]]])
>>> x = ivy.array([1, 1, 1]).reshape([1, 1, 3])
>>> y = ivy.dropout1d(x, 1, training=False, data_format="NCW")
>>> print(y)
ivy.array([[[1, 1, 1]]])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([100, 200, 300]).reshape([1, 1, 3]),
... b=ivy.array([400, 500, 600]).reshape([1, 1, 3]))
>>> y = ivy.dropout1d(x, 0.5)
>>> print(y)
{
a: ivy.array([[[200., 400., 0.]]]),
b: ivy.array([[[0., 0., 0.]]])
}
"""
return ivy.current_backend(x).dropout1d(
x, prob, training=training, data_format=data_format, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def dropout2d(
x: Union[ivy.Array, ivy.NativeArray],
prob: float,
/,
*,
training: bool = True,
data_format: str = "NHWC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Randomly zero out entire channels with probability prob using samples
from a Bernoulli distribution and the remaining channels are scaled by
(1/1-prob). In this case, dropout2d performs a channel-wise dropout but
assumes a channel is a 2D feature map.
Parameters
----------
x
a 3D or 4D input array. Should have a floating-point data type.
prob
probability of a channel to be zero-ed.
training
controls whether dropout2d is performed during training or ignored
during testing.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array with some channels zero-ed and the rest of channels are
scaled by (1/1-prob).
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1, 1, 1]])
>>> y = ivy.dropout2d(x, 0.5)
>>> print(y)
ivy.array([[0., 2., 2.]])
>>> x = ivy.array([[1, 1, 1]])
>>> y = ivy.dropout2d(x, 1, training=False, data_format="NCW")
>>> print(y)
ivy.array([[1, 1, 1]])
"""
return ivy.current_backend(x).dropout2d(
x, prob, training=training, data_format=data_format, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def dropout3d(
x: Union[ivy.Array, ivy.NativeArray],
prob: float,
/,
*,
training: bool = True,
data_format: str = "NDHWC",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Randomly zero out entire channels with probability prob using samples
from a Bernoulli distribution and the remaining channels are scaled by
(1/1-prob). In this case, dropout3d performs a channel-wise dropout but
assumes a channel is a 1D feature map.
Parameters
----------
x
a 4D or 5D input array. Should have a floating-point data type.
prob
probability of a channel to be zero-ed.
training
controls whether dropout3d is performed during training or ignored
during testing.
data_format
"NDHWC" or "NCDHW". Defaults to "NDHWC".
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
an array with some channels zero-ed and the rest of channels are
scaled by (1/1-prob).
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
return ivy.current_backend(x).dropout3d(
x, prob, training=training, data_format=data_format, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def ifft(
x: Union[ivy.Array, ivy.NativeArray],
dim: int,
*,
norm: str = "backward",
n: Optional[Union[int, Tuple[int]]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
r"""Compute the one dimensional discrete Fourier transform given input at
least 1-D input x.
Parameters
----------
x
Input volume *[...,d_in,...]*,
where d_in indicates the dimension that needs IFFT.
dim
The dimension along which to take the one dimensional IFFT.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be "backward".
"backward" indicates no normalization.
"ortho" indicates normalization by $\frac{1}{\sqrt{n}}$.
"forward" indicates normalization by $\frac{1}{n}$.
n
Optional argument indicating the sequence length, if given, the input would be
padded with zero or truncated to length n before performing IFFT.
Should be a integer greater than 1.
out
Optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the IFFT operation.
Examples
--------
>>> ivy.ifft(np.exp(2j * np.pi * np.arange(8) / 8), 0)
ivy.array([-4.30636606e-17+1.43029718e-18j, 0.00000000e+00+1.53080850e-17j,
1.43029718e-18+1.53080850e-17j, 0.00000000e+00+9.58689626e-18j,
1.24474906e-17+2.91858728e-17j, 0.00000000e+00+1.53080850e-17j,
2.91858728e-17+1.53080850e-17j, 1.00000000e+00-1.01435406e-16j])
>>> ivy.ifft(np.exp(2j * np.pi * np.arange(8) / 8), 0, n=16)
ivy.array([-2.15318303e-17+7.15148591e-19j, 6.25000000e-02+9.35378602e-02j,
0.00000000e+00+7.65404249e-18j, 6.25000000e-02+4.17611649e-02j,
7.15148591e-19+7.65404249e-18j, 6.25000000e-02+1.24320230e-02j,
0.00000000e+00+4.79344813e-18j, 6.25000000e-02-1.24320230e-02j,
6.22374531e-18+1.45929364e-17j, 6.25000000e-02-4.17611649e-02j,
0.00000000e+00+7.65404249e-18j, 6.25000000e-02-9.35378602e-02j,
1.45929364e-17+7.65404249e-18j, 6.25000000e-02-3.14208718e-01j,
5.00000000e-01-5.07177031e-17j, 6.25000000e-02+3.14208718e-01j])
>>> ivy.ifft(np.exp(2j * np.pi * np.arange(8) / 8), 0, norm="ortho")
ivy.array([-1.21802426e-16+4.04549134e-18j, 0.00000000e+00+4.32978028e-17j,
4.04549134e-18+4.32978028e-17j, 0.00000000e+00+2.71158374e-17j,
3.52068201e-17+8.25501143e-17j, 0.00000000e+00+4.32978028e-17j,
8.25501143e-17+4.32978028e-17j, 2.82842712e+00-2.86902654e-16j])
"""
return ivy.current_backend(x).ifft(x, dim, norm=norm, n=n, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def embedding(
weights: Union[ivy.Array, ivy.NativeArray],
indices: Union[ivy.Array, ivy.NativeArray],
/,
*,
max_norm: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Embeds a given tensor of indices using a given tensor of weights.
Parameters
----------
weights
The weights tensor.
indices
The indices tensor.
max_norm
The maximum norm of the embeddings.
out
Optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the embedding operation.
Examples
--------
>>> weights = ivy.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> indices = ivy.array([0, 2])
>>> print(ivy.embedding(weights, indices, max_norm=5))
ivy.array([[1. , 2. , 3. ],
[2.51285338, 2.87183261, 3.2308116 ]])
"""
ivy.utils.assertions.check_equal(
len(weights.shape), 2, message="weights must be 2-d", as_array=False
)
return ivy.current_backend(indices).embedding(
weights,
indices,
max_norm=max_norm,
out=out,
)
@handle_exceptions
@handle_nestable
@handle_out_argument
@inputs_to_ivy_arrays
def dft(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
axis: int = 1,
inverse: bool = False,
onesided: bool = False,
dft_length: Optional[Union[int, Tuple[int]]] = None,
norm: str = "backward",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the discrete Fourier transform of input.
Parameters
----------
x
Input volume *[...,d_in,...]*,
where d_in indicates the dimension that needs FFT.
axis
The axis on which to perform the DFT. By default this
value is set to 1, which corresponds to the first dimension
after the batch index.
inverse
Whether to perform the inverse discrete fourier transform.
By default this value is set to False.
onesided
If onesided is True, only values for w in [0, 1, 2, …, floor(n_fft/2) + 1]
are returned because the real-to-complex Fourier transform satisfies the
conjugate symmetry, i.e., X[m, w] = X[m,w]=X[m,n_fft-w]*. Note if the
input or window tensors are complex, then onesided output is not possible.
Enabling onesided with real inputs performs a Real-valued fast Fourier
transform (RFFT). When invoked with real or complex valued input, the
default value is False. Values can be True or False.
dft_length
The length of the signal.If greater than the axis dimension,
the signal will be zero-padded up to dft_length. If less than
the axis dimension, only the first dft_length values will be
used as the signal. It’s an optional value.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be
"backward".
"backward" indicates no normalization.
"ortho" indicates normalization by 1/sqrt(n).
"forward" indicates normalization by 1/n.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
The Fourier Transform of the input vector.If onesided is False,
the following shape is expected: [batch_idx][signal_dim1][signal_dim2]
…[signal_dimN][2]. If axis=0 and onesided is True, the following shape
is expected: [batch_idx][floor(signal_dim1/2)+1][signal_dim2]…[signal_dimN][2].
If axis=1 and onesided is True, the following shape is expected:
[batch_idx][signal_dim1][floor(signal_dim2/2)+1]…[signal_dimN][2].
If axis=N-1 and onesided is True, the following shape is expected:
[batch_idx][signal_dim1][signal_dim2]…[floor(signal_dimN/2)+1][2].
The signal_dim at the specified axis is equal to the dft_length.
"""
if inverse:
res = ivy.ifft(x, axis, norm=norm, n=dft_length, out=out)
else:
res = ivy.fft(x, axis, norm=norm, n=dft_length, out=out)
if onesided:
slices = [slice(0, a) for a in res.shape]
slices[axis] = slice(0, ivy.shape(res)[axis] // 2 + 1)
res = res[tuple(slices)]
return res
@handle_exceptions
@handle_nestable
@handle_out_argument
@inputs_to_ivy_arrays
def interp(x, xp, fp, left=None, right=None, period=None):
x_arr = ivy.array(x)
fix_later = False
if x_arr.shape == ():
x_arr = ivy.array([x])
fix_later = True
x = ivy.astype(x_arr, "float64")
xp = ivy.astype(ivy.array(xp), "float64")
fp = ivy.astype(ivy.array(fp), "float64")
ivy.utils.assertions.check_equal(xp.ndim, 1, as_array=False)
ivy.utils.assertions.check_equal(fp.ndim, 1, as_array=False)
ivy.utils.assertions.check_equal(xp.shape[0], fp.shape[0], as_array=False)
if period is not None:
ivy.utils.assertions.check_equal(period, 0, inverse=True)
period = ivy.abs(period)
x = ivy.remainder(x, period)
xp = ivy.remainder(xp, period)
asort_xp = ivy.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = ivy.concat((xp[-1:] - period, xp, xp[0:1] + period))
fp = ivy.concat((fp[-1:], fp, fp[0:1]))
def interp_inner(value):
value = ivy.array(value)
if value < xp[0]:
return left if left is not None else fp[0]
elif value > xp[-1]:
return right if right is not None else fp[-1]
else:
last = None
if xp.shape[0] < 3:
for i in range(xp.shape[0] - 1, -1, -1):
if xp[i] == value:
return fp[i]
elif xp[i] < value:
last = i
else:
first = 0
last = xp.shape[0]
while first < last:
midpoint = (first + last) // 2
if xp[midpoint] == value:
already_exists = ivy.argwhere(xp == value)
if already_exists.shape[0] > 0:
return fp[already_exists[-1][0]]
return fp[midpoint]
else:
if value < xp[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
dist = (value - xp[last]) / (xp[last + 1] - xp[last])
return (fp[last + 1] - fp[last]) * dist + fp[last]
ret = ivy.map(interp_inner, unique={"value": x})
if fix_later:
return ivy.astype(ivy.array(ret[0]), "float64")
else:
return ivy.astype(ivy.array(ret), "float64")
def _tf_area_dim_scale(index, starting_index, scale, ending_index):
if index < starting_index:
dim_scale = scale if index + 1 > ending_index else index + 1 - starting_index
else:
dim_scale = ending_index - index if index + 1 > ending_index else 1.0
return dim_scale
def _tf_area_indices(dim_index, scale):
starting_index = dim_index * scale
ending_index = (dim_index + 1) * scale
rounded_indices = (
int(starting_index),
math.ceil(ending_index),
)
return starting_index, ending_index, rounded_indices
def _tf_area_interpolate(x, size, scale, dims):
ret = ivy.zeros(x.shape[:2] + size)
area = 1.0 / ivy.prod(scale)
for i, ba in enumerate(x):
for j, ch in enumerate(ba):
if dims == 3:
for d_dim in range(size[0]):
for h_dim in range(size[1]):
for w_dim in range(size[2]):
d_in, d_in1, d_index = _tf_area_indices(d_dim, scale[0])
h_in, h_in1, h_index = _tf_area_indices(h_dim, scale[1])
w_in, w_in1, w_index = _tf_area_indices(w_dim, scale[2])
sum_data = ivy.zeros(
(
d_index[1] - d_index[0],
h_index[1] - h_index[0],
w_index[1] - w_index[0],
)
)
for d_ind in range(d_index[0], d_index[1]):
scale_z = _tf_area_dim_scale(
d_ind, d_in, scale[0], d_in1
)
for h_ind in range(h_index[0], h_index[1]):
scale_y = _tf_area_dim_scale(
h_ind, h_in, scale[1], h_in1
)
for w_ind in range(w_index[0], w_index[1]):
scale_x = _tf_area_dim_scale(
w_ind, w_in, scale[2], w_in1
)
sum_data[
d_ind - d_index[0],
h_ind - h_index[0],
w_ind - w_index[0],
] = (
ivy.array(ch[d_ind, h_ind, w_ind])
* scale_x
* scale_y
* scale_z
* area
)
ret[i, j, d_dim, h_dim, w_dim] = ivy.sum(sum_data)
elif dims == 2:
for h_dim in range(size[0]):
for w_dim in range(size[1]):
h_in, h_in1, h_index = _tf_area_indices(h_dim, scale[0])
w_in, w_in1, w_index = _tf_area_indices(w_dim, scale[1])
sum_data = ivy.zeros(
(h_index[1] - h_index[0], w_index[1] - w_index[0])
)
for h_ind in range(h_index[0], h_index[1]):
scale_y = _tf_area_dim_scale(h_ind, h_in, scale[0], h_in1)
for w_ind in range(w_index[0], w_index[1]):
scale_x = _tf_area_dim_scale(
w_ind, w_in, scale[1], w_in1
)
sum_data[h_ind - h_index[0], w_ind - w_index[0]] = (
ivy.array(ch[h_ind, w_ind])
* scale_x
* scale_y
* area
)
ret[i, j, h_dim, w_dim] = ivy.sum(sum_data)
else:
for w_dim in range(size[0]):
w_in, w_in1, w_index = _tf_area_indices(w_dim, scale[0])
sum_data = ivy.zeros((w_index[1] - w_index[0],))
for w_ind in range(w_index[0], w_index[1]):
scale_x = _tf_area_dim_scale(w_ind, w_in, scale[0], w_in1)
sum_data[w_ind - w_index[0]] = (
ivy.array(ch[w_ind]) * scale_x * area
)
ret[i, j, w_dim] = ivy.sum(sum_data)
return ret
def nearest_interpolate(x, dims, size, scale, exact):
off = 0.5 if exact else 0
for d in range(dims):
n = size[d]
offsets = (ivy.arange(n, dtype="float32") + off) * scale[d]
offsets = ivy.astype(ivy.floor(ivy.astype(offsets, "float32")), "int64")
num_dims_to_add = x.ndim - offsets.ndim
if num_dims_to_add > 0:
for _ in range(num_dims_to_add):
offsets = ivy.expand_dims(offsets, axis=0)
x = ivy.gather(x, offsets, axis=d + 2)
return x
def _triangle_kernel(x):
return ivy.maximum(0, 1 - ivy.abs(x))
def _cubic_kernel(x):
out = ((1.5 * x - 2.5) * x) * x + 1.0
out = ivy.where(x >= 1.0, ((-0.5 * x + 2.5) * x - 4.0) * x + 2.0, out)
return ivy.where(x >= 2.0, 0.0, out)
def _lanczos_kernel(radius, x):
y = radius * ivy.sin(ivy.pi * x) * ivy.sin(ivy.pi * x / radius)
out = ivy.where(x != 0, ivy.divide(y, ivy.pi**2 * x**2), 1)
return ivy.where(ivy.bitwise_and(x >= radius, x < -radius), 0.0, out)
def _get_final_scale(input_size, output_size, align_corners, scale_factor):
scale = []
for i, (input, output) in enumerate(zip(input_size, output_size)):
if align_corners:
if output > 1:
scale.append((input - 1) / (output - 1))
else:
scale.append(1)
else:
scale.append(1 / scale_factor[i])
return scale
def _mitchellcubic_kernel(x):
absx = abs(x)
if absx < 1:
return (7 * absx**3 - 12 * absx**2 + 6) / 6
elif absx < 2:
return (-(absx**3) + 6 * absx**2 - 11 * absx + 6) / 6
else:
return 0
def _compute_weight_mat(
input_size,
output_size,
align_corners,
kernel_fn,
dim_scale,
):
if not align_corners:
sample_f = (ivy.arange(output_size) + 0.5) * dim_scale - 0.5
else:
sample_f = ivy.arange(output_size) * dim_scale
x = ivy.abs(
ivy.expand_dims(sample_f) - ivy.expand_dims(ivy.arange(input_size), axis=-1)
)
weights = kernel_fn(x)
total_weight_sum = ivy.sum(weights, axis=0, keepdims=True)
weights = ivy.where(
ivy.abs(total_weight_sum) > 1000.0 * float(ivy.finfo("float32").eps),
ivy.divide(weights, ivy.where(total_weight_sum != 0, total_weight_sum, 1)),
0,
)
input_size_minus_0_5 = input_size if align_corners else input_size - 0.5
return ivy.where(
ivy.expand_dims(
ivy.logical_and(sample_f >= -0.5, sample_f <= input_size_minus_0_5)
),
weights,
0,
)
def _upsample_cubic_convolution1(x, A):
return ((A + 2) * x - (A + 3)) * x * x + 1
def _upsample_cubic_convolution2(x, A):
return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A
def _upsample_get_cubic_coefficients(t):
A = -0.75
return (
_upsample_cubic_convolution2(t + 1.0, A),
_upsample_cubic_convolution1(t, A),
_upsample_cubic_convolution1(1.0 - t, A),
_upsample_cubic_convolution2(2.0 - t, A),
)
def _upsample_cubic_interp1d(coeffs, ts):
coeffs2 = _upsample_get_cubic_coefficients(ts)
return _sum_tensors(c1 * c2 for (c1, c2) in zip(coeffs, coeffs2))
def _sum_tensors(ts):
return _reduce(ivy.add, ts)
def _upsample_bicubic2d_default(
a,
output_size,
scale,
align_corners,
):
N, C, iH, iW = a.shape
oH, oW = output_size
def compute_source_index(scale, dst_index, align_corners):
if align_corners:
return scale * dst_index
else:
return scale * (dst_index + 0.5) - 0.5
N_idx = ivy.reshape(ivy.arange(N), (N, 1, 1, 1)).astype(ivy.int64)
C_idx = ivy.reshape(ivy.arange(C), (1, C, 1, 1)).astype(ivy.int64)
out_y = ivy.reshape(ivy.arange(oH), ((1, 1, oH, 1)))
out_x = ivy.reshape(ivy.arange(oW), ((1, 1, 1, oW)))
scale_y, scale_x = scale
real_x = compute_source_index(scale_x, out_x, align_corners)
in_x = ivy.floor(real_x)
t_x = real_x - in_x
ix = ivy.astype(in_x, ivy.int64)
real_y = compute_source_index(scale_y, out_y, align_corners)
in_y = ivy.floor(real_y)
t_y = real_y - in_y
iy = ivy.astype(in_y, ivy.int64)
iys_ofs = (iy - 1, iy, iy + 1, iy + 2)
ixs_ofs = (ix - 1, ix, ix + 1, ix + 2)
def load_bounded(ys, xs):
y_idx = ivy.clip(ys, 0, iH - 1)
x_idx = ivy.clip(xs, 0, iW - 1)
return a[N_idx, C_idx, y_idx, x_idx]
def get_x_interp(y):
coeffs_x = tuple(load_bounded(y, x_ofs) for x_ofs in ixs_ofs)
return _upsample_cubic_interp1d(coeffs_x, t_x)
coeffs_y = tuple(get_x_interp(y_ofs) for y_ofs in iys_ofs)
result = _upsample_cubic_interp1d(coeffs_y, t_y)
return result
def area_interpolate(x, dims, size, scale):
ret = ivy.zeros(x.shape[:2] + size)
for i, ba in enumerate(x):
for j, ch in enumerate(ba):
if dims == 3:
for d_dim in range(size[0]):
for h_dim in range(size[1]):
for w_dim in range(size[2]):
d_index = (
int(d_dim * scale[0]),
math.ceil((d_dim + 1) * scale[0]),
)
h_index = (
int(h_dim * scale[1]),
math.ceil((h_dim + 1) * scale[1]),
)
w_index = (
int(w_dim * scale[2]),
math.ceil((w_dim + 1) * scale[2]),
)
scale_z = d_index[1] - d_index[0]
scale_y = h_index[1] - h_index[0]
scale_x = w_index[1] - w_index[0]
area = scale_z * scale_y * scale_x
ret[i, j, d_dim, h_dim, w_dim] = ivy.sum(
ch[
d_index[0] : d_index[1],
h_index[0] : h_index[1],
w_index[0] : w_index[1],
]
) * (1 / area)
elif dims == 2:
for h_dim in range(size[0]):
for w_dim in range(size[1]):
h_index = (
int(h_dim * scale[0]),
math.ceil((h_dim + 1) * scale[0]),
)
w_index = (
int(w_dim * scale[1]),
math.ceil((w_dim + 1) * scale[1]),
)
scale_y = h_index[1] - h_index[0]
scale_x = w_index[1] - w_index[0]
area = scale_y * scale_x
ret[i, j, h_dim, w_dim] = ivy.sum(
ch[h_index[0] : h_index[1], w_index[0] : w_index[1]]
) * (1 / area)
else:
for w_dim in range(size[0]):
w_index = (
int(w_dim * scale[0]),
math.ceil((w_dim + 1) * scale[0]),
)
scale_x = w_index[1] - w_index[0]
ret[i, j, w_dim] = ivy.sum(ch[w_index[0] : w_index[1]]) * (
1 / scale_x
)
return ret
def get_interpolate_kernel(mode):
kernel_func = _triangle_kernel
if mode == "tf_bicubic":
def kernel_func(inputs): # noqa F811
return _cubic_kernel(inputs)
elif mode == "lanczos3":
def kernel_func(inputs):
return _lanczos_kernel(3, inputs)
elif mode == "lanczos5":
def kernel_func(inputs):
return _lanczos_kernel(5, inputs)
return kernel_func
def generate_einsum_equation(dim):
alphabet = "abcdefghijklmnopqrstuvwxyz"
input_indices = alphabet[: dim + 2]
output_indices = [alphabet[2 + i] + alphabet[2 + dim + i] for i in range(dim)]
contraction_indices = ",".join([input_indices, *output_indices])
output = input_indices[:2] + "".join([output[-1] for output in output_indices])
einsum_string = contraction_indices + "->" + output
return einsum_string
def _interpolate_with_kernel(x, dims, size, input_size, align_corners, scale, mode):
equation = generate_einsum_equation(dims)
kernel_func = get_interpolate_kernel(mode)
operands = []
for m, n, s in zip(input_size, size, scale):
w = _compute_weight_mat(m, n, align_corners, kernel_func, s).astype(x.dtype)
operands.append(w)
return ivy.einsum(equation, x, *operands)
@handle_exceptions
@handle_nestable
@handle_partial_mixed_function
@inputs_to_ivy_arrays
@handle_array_function
def interpolate(
x: Union[ivy.Array, ivy.NativeArray],
size: Union[Sequence[int], int],
/,
*,
mode: Literal[
"linear",
"bilinear",
"trilinear",
"nd",
"nearest",
"area",
"nearest_exact",
"tf_area",
"tf_bicubic",
"bicubic",
"mitchellcubic",
"lanczos3",
"lanczos5",
"gaussian",
] = "linear",
scale_factor: Optional[Union[Sequence[int], int]] = None,
recompute_scale_factor: Optional[bool] = None,
align_corners: bool = False,
antialias: bool = False, # ToDo: add support for antialias
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Down/up samples the input to the given size. The algorithm used for
interpolation is determined by mode.
Parameters
----------
x
Input array, Must have the shape
[batch x channels x [optional depth] x [optional height] x width].
size
Output size.
mode
Interpolation mode. Can be one of the following:
- linear
- bilinear
- trilinear
- nd
- nearest
- nearest-exact
- area
- tf_area
- bicubic
- tf_bicubic
- mitchellcubic
- lanczos3
- lanczos5
- gaussian
scale_factor
Multiplier for spatial size that defines the output size (overwriting `size`).
recompute_scale_factor
If True, then scale_factor must be provided and scale_factor is used to
compute the output size. The computed output size will be used to infer new
scales for the interpolation. If recompute_scale_factor is False, then size
or scale_factor will be used directly for interpolation.
align_corners
If True, the corner pixels of the input and output tensors are aligned,
and thus preserving the values at the corner pixels. If False, the corner
pixels are not aligned, and the interpolation uses edge value padding for
out-of-boundary values.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
resized array
"""
input_size = ivy.shape(x)[2:]
dims = len(input_size)
if ivy.exists(size) and ivy.exists(scale_factor):
raise ivy.utils.exceptions.IvyException(
"only one of size or scale_factor should be defined"
)
elif ivy.exists(size) and not ivy.exists(scale_factor):
if isinstance(size, (list, tuple)):
ivy.utils.assertions.check_equal(
len(size),
dims,
inverse=False,
message=(
"Input and output must have the same number of spatial dimensions,"
f" but got input with {list(input_size)} spatial dimensions and"
f" output size {size}."
),
as_array=False,
)
elif ivy.exists(scale_factor) and not ivy.exists(size):
if isinstance(scale_factor, (list, tuple)):
ivy.utils.assertions.check_equal(
len(scale_factor),
dims,
inverse=False,
message=(
"Input and scale_factor must have the same number of spatial"
f" dimensions, but got input with {list(input_size)} spatial"
f" dimensions and scale_factor {scale_factor}."
),
as_array=False,
)
else:
raise ivy.utils.exceptions.IvyException(
"either size or scale_factor should be defined"
)
if ivy.exists(size) and recompute_scale_factor is not None:
raise ivy.utils.exceptions.IvyException(
"recompute_scale_factor is not meaningful with an explicit size."
)
if ivy.get_num_dims(x) != 4 and mode == "bilinear":
raise ivy.utils.exceptions.IvyException(
f"Got {x.ndim}D input, but bilinear mode needs 4D input"
)
if ivy.get_num_dims(x) != 5 and mode == "trilinear":
raise ivy.utils.exceptions.IvyException(
f"Got {x.ndim}D input, but trilinear mode needs 5D input"
)
if ivy.get_num_dims(x) != 3 and mode == "linear":
raise ivy.utils.exceptions.IvyException(
f"Got {x.ndim}D input, but trilinear mode needs 3D input"
)
size, scale_factor = _get_size(scale_factor, size, dims, input_size)
ivy.utils.assertions.check_true(
all(s > 0 for s in size),
message=f"output sizes should be greater than 0, but got {size}",
)
if all(a == b for a, b in zip(size, input_size)):
ret = x
else:
if recompute_scale_factor:
scale_factor = [ivy.divide(size[i], input_size[i]) for i in range(dims)]
else:
scale_factor = [
1 if input_size[i] == size[i] else scale_factor[i] for i in range(dims)
]
scale = _get_final_scale(input_size, size, align_corners, scale_factor)
if mode in [
"linear",
"bilinear",
"trilinear",
"nd",
"tf_bicubic",
"lanczos3",
"lanczos5",
]:
ret = _interpolate_with_kernel(
x,
dims,
size,
input_size,
align_corners,
scale,
mode,
)
elif mode == "bicubic":
ret = _upsample_bicubic2d_default(x, size, scale, align_corners)
elif mode in ["nearest-exact", "nearest"]:
ret = nearest_interpolate(x, dims, size, scale, mode == "nearest-exact")
elif mode == "area":
ret = area_interpolate(x, dims, size, scale)
elif mode == "mitchellcubic":
batch, channels, in_height, in_width = x.shape
out_height, out_width = size
scale_h, scale_w = scale
ret = ivy.zeros((batch, channels, out_height, out_width))
for i in range(out_height):
for j in range(out_width):
p_i = i * scale_h
p_j = j * scale_w
left = int(math.floor(p_j - 2))
right = int(math.ceil(p_j + 2))
top = int(math.floor(p_i - 2))
bottom = int(math.ceil(p_i + 2))
kernel_w = ivy.array(
[
_mitchellcubic_kernel((p_j - j) * scale_w)
for i in range(left, right)
]
)
kernel_h = ivy.array(
[
_mitchellcubic_kernel((p_i - i) * scale_h)
for j in range(top, bottom)
]
)
left_pad = max(0, -left)
right_pad = max(0, right - in_width)
top_pad = max(0, -top)
bottom_pad = max(0, bottom - in_height)
pad_width = [(0, 0), (0, 0)] * (len(x.shape) - 3) + [
(top_pad, bottom_pad),
(left_pad, right_pad),
]
padded_x = ivy.pad(x, pad_width, mode="edge")
for b in range(batch):
for c in range(channels):
patch = padded_x[
b,
c,
top + top_pad : bottom + top_pad,
left + left_pad : right + left_pad,
]
ret[b, c, i, j] = ivy.sum(
kernel_h[:, ivy.newaxis]
* patch
* kernel_w[ivy.newaxis, :]
)
elif mode == "gaussian":
ratio_h, ratio_w = scale
sigma = max(ratio_h, ratio_w) * 0.5
kernel_size = 2 * int(math.ceil(3 * sigma)) + 1
kernel_h = ivy.zeros((kernel_size,), dtype=x.dtype)
kernel_w = ivy.zeros((kernel_size,), dtype=x.dtype)
for i in range(kernel_h.size):
kernel_h[i] = ivy.exp(-0.5 * ((i - kernel_h.size // 2) / sigma) ** 2)
kernel_w[i] = ivy.exp(-0.5 * ((i - kernel_w.size // 2) / sigma) ** 2)
kernel_h /= ivy.sum(kernel_h)
kernel_w /= ivy.sum(kernel_w)
pad_width = [(0, 0), (0, 0)] * (len(x.shape) - 3) + [
(int(math.ceil(3 * sigma)), int(math.ceil(3 * sigma))),
(int(math.ceil(3 * sigma)), int(math.ceil(3 * sigma))),
]
padded_x = ivy.pad(x, pad_width, mode="constant")
output_shape = x.shape[:2] + size
ret = ivy.zeros(output_shape, dtype=x.dtype)
for i in range(size[0]):
for j in range(size[1]):
p_i = int(math.floor(i * ratio_h + int(math.ceil(3 * sigma))))
p_j = int(math.floor(j * ratio_w + int(math.ceil(3 * sigma))))
for b in range(x.shape[0]):
for c in range(x.shape[1]):
patch = padded_x[
b,
c,
p_i - kernel_size // 2 : p_i + kernel_size // 2 + 1,
p_j - kernel_size // 2 : p_j + kernel_size // 2 + 1,
]
ret[b, c, i, j] = ivy.sum(
kernel_h[ivy.newaxis, :]
* patch
* kernel_w[:, ivy.newaxis]
)
elif mode == "tf_area":
ret = _tf_area_interpolate(x, size, scale, dims)
ret = ivy.astype(ret, ivy.dtype(x))
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
interpolate.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_device",
),
"to_skip": (),
}
def _get_size(scale_factor, size, dims, input_shape):
if scale_factor is not None:
if isinstance(scale_factor, (float, int)):
scale_factor = [scale_factor] * dims
elif isinstance(scale_factor, (tuple, list)) and len(scale_factor) != dims:
scale_factor = [scale_factor[0]] * dims
size = tuple(
int(math.floor(input_shape[i] * scale_factor[i])) for i in range(dims)
)
else:
size = (size,) * dims if isinstance(size, int) else tuple(size)
scale_factor = [ivy.divide(size[i], input_shape[i]) for i in range(dims)]
return size, scale_factor
def _output_ceil_shape(w, f, p, s):
return math.ceil((w - f + p) / s) + 1
def _padding_ceil_mode(w, f, p, s, return_added_padding=False):
remaining_pixels = (w - f + p[0]) % s
added_padding = 0
if s > 1 and remaining_pixels != 0 and f > 1:
input_size = w + sum(p)
# making sure that the remaining pixels are supposed
# to be covered by the window
# they won't be covered if stride is big enough to skip them
if input_size - remaining_pixels - (f - 1) + s > input_size:
return p
output_shape = _output_ceil_shape(
w,
f,
sum(p),
s,
)
# calculating new padding with ceil_output_shape
new_pad = (output_shape - 1) * s + f - w
# updating pad_list with new padding by adding it to the end
added_padding = new_pad - sum(p)
p = (
p[0],
p[1] + added_padding,
)
if return_added_padding:
return p, added_padding
return p
interpolate.mixed_backend_wrappers = {
"to_add": (
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
def _compute_idx(in_size, out_size, device):
out_range = ivy.arange(out_size, device=device, dtype=ivy.int64)
i0 = ivy.trunc_divide(out_range * in_size, out_size).astype(ivy.int64)
maxlength = in_size // out_size + 1
in_size_mod = in_size % out_size
# adaptive = True iff there are kernels with different lengths
adaptive = in_size_mod != 0 and out_size % in_size_mod != 0
if adaptive:
maxlength += 1
elif in_size_mod == 0:
maxlength -= 1
range_max = ivy.arange(maxlength, device=device, dtype=ivy.int64)
idx = ivy.expand_dims(i0, axis=-1) + range_max
if adaptive:
maxval = ivy.full_like(idx, fill_value=in_size - 1)
idx = ivy.minimum(idx, maxval)
i1 = ivy.trunc_divide(
(out_range + 1) * in_size + out_size - 1, out_size
).astype(ivy.int64)
length = i1 - i0
else:
length = maxlength
return idx, length, range_max, adaptive
def _expand_to_dim(x, dim):
for _ in range(dim - len(x.shape)):
x = ivy.expand_dims(x, axis=-1)
return x
def _mask(vals, length, range_max, dim, mask_value=0.0):
if isinstance(length, int):
return vals, length
else:
assert dim < 0
mask = ivy.greater_equal(range_max, ivy.expand_dims(length, axis=-1))
if dim == -2:
mask = _expand_to_dim(mask, 4)
vals = ivy.where(mask, ivy.array(mask_value, device=vals.device), vals)
length = _expand_to_dim(length, -dim)
return vals, length
@handle_nestable
@inputs_to_ivy_arrays
def adaptive_max_pool2d(
input: Union[ivy.Array, ivy.NativeArray],
output_size: Union[Sequence[int], int],
):
"""Apply a 2D adaptive maximum pooling over an input signal composed of
several input planes.
Parameters
----------
input
Input array. Must have shape (N, C, H_in, W_in) or (C, H_in, W_in) where N is
the batch dimension, C is the feature dimension, and H_in and W_in are the 2
spatial dimensions.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation. Will have shape (N, C, S_0, S_1) or
(C, S_0, S_1), where S = `output_size`
"""
squeeze = False
if input.ndim == 3:
input = ivy.expand_dims(input, axis=0)
squeeze = True
elif input.ndim != 4:
raise ivy.utils.exceptions.IvyException(
f"Got {len(input.shape)}D input, but only 3D and 4D inputs are supported.",
)
if isinstance(output_size, int):
output_size = (output_size, output_size)
if all(i_s % o_s == 0 for i_s, o_s in zip(input.shape[-2:], output_size)):
stride = tuple(i_s // o_s for i_s, o_s in zip(input.shape[-2:], output_size))
kernel_size = stride # Mathematically identical to the previous expression
pooled_output = ivy.max_pool2d(
input, kernel_size, stride, "VALID", data_format="NCHW"
)
if squeeze:
return ivy.squeeze(pooled_output, axis=0)
return pooled_output
idxh, length_h, range_max_h, adaptive_h = _compute_idx(
input.shape[-2], output_size[-2], input.device
)
idxw, length_w, range_max_w, adaptive_w = _compute_idx(
input.shape[-1], output_size[-1], input.device
)
# to numpy and back in order to bypass a slicing error in tensorflow
vals = ivy.array(
input.to_numpy()[..., _expand_to_dim(idxh, 4), idxw], device=input.device
)
if not adaptive_h and not adaptive_w:
ret = ivy.max(vals, axis=(-3, -1))
ret = ivy.squeeze(ret, axis=0) if squeeze else ret
return ret
vals, length_h = _mask(
vals, length_h, range_max_h, dim=-2, mask_value=float("-inf")
)
vals, length_w = _mask(
vals, length_w, range_max_w, dim=-1, mask_value=float("-inf")
)
ret = None
for i, j in itertools.product(range(vals.shape[-3]), range(vals.shape[-1])):
if ret is None:
ret = vals[..., i, :, j]
else:
ret = ivy.maximum(ret, vals[..., i, :, j])
pooled_output = ret.astype(vals.dtype)
pooled_output = ivy.squeeze(pooled_output, axis=0) if squeeze else pooled_output
return pooled_output
adaptive_max_pool2d.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_nestable
@inputs_to_ivy_arrays
def adaptive_max_pool3d(
input: Union[ivy.Array, ivy.NativeArray],
output_size: Union[Sequence[int], int],
):
"""Apply a 3D adaptive maximum pooling over an input signal composed of
several input planes.
Parameters
----------
input
Input array. Must have shape (N, C, D_in, H_in, W_in) or (C, D_in, H_in, W_in)
where N is the batch dimension, C is the feature dimension, and D_in, H_in,
and W_in are the 3 spatial dimensions.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation. Will have shape (N, C, D_out, H_out, W_out)
or (C, D_out, H_out, W_out), where D_out, H_out, W_out = `output_size`
"""
squeeze = False
if input.ndim == 4:
input = ivy.expand_dims(input, axis=0)
squeeze = True
elif input.ndim != 5:
raise ivy.utils.exceptions.IvyException(
f"Got {len(input.shape)}D input, but only 4D and 5D inputs are supported.",
)
if isinstance(output_size, int):
output_size = (output_size, output_size, output_size)
if all(i_s % o_s == 0 for i_s, o_s in zip(input.shape[-3:], output_size)):
stride = tuple(i_s // o_s for i_s, o_s in zip(input.shape[-3:], output_size))
kernel_size = stride
pooled_output = ivy.max_pool3d(
input, kernel_size, stride, "VALID", data_format="NCDHW"
)
if squeeze:
return ivy.squeeze(pooled_output, axis=0)
return pooled_output
idxd, length_d, range_max_d, adaptive_d = _compute_idx(
input.shape[-3], output_size[-3], input.device
)
idxh, length_h, range_max_h, adaptive_h = _compute_idx(
input.shape[-2], output_size[-2], input.device
)
idxw, length_w, range_max_w, adaptive_w = _compute_idx(
input.shape[-1], output_size[-1], input.device
)
# to numpy and back in order to bypass a slicing error in tensorflow
vals = ivy.array(
input.to_numpy()[..., _expand_to_dim(idxd, 5), _expand_to_dim(idxh, 4), idxw],
device=input.device,
)
if not (adaptive_d or adaptive_h or adaptive_w):
ret = ivy.max(vals, axis=(-3, -1))
ret = ivy.squeeze(ret, axis=0) if squeeze else ret
return ret
vals, length_d = _mask(
vals, length_d, range_max_d, dim=-3, mask_value=float("-inf")
)
vals, length_h = _mask(
vals, length_h, range_max_h, dim=-2, mask_value=float("-inf")
)
vals, length_w = _mask(
vals, length_w, range_max_w, dim=-1, mask_value=float("-inf")
)
ret = None
for i, j, k in itertools.product(
range(vals.shape[-4]), range(vals.shape[-2]), range(vals.shape[-1])
):
if ret is None:
ret = vals[..., i, :, j, k]
else:
ret = ivy.maximum(ret, vals[..., i, :, j, k])
pooled_output = ret.astype(vals.dtype)
pooled_output = ivy.squeeze(pooled_output, axis=0) if squeeze else pooled_output
return pooled_output
adaptive_max_pool3d.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_nestable
@inputs_to_ivy_arrays
def adaptive_avg_pool1d(
input: Union[ivy.Array, ivy.NativeArray],
output_size: int,
) -> ivy.Array:
"""Apply a 1D adaptive average pooling over an input signal composed of
several input planes.
Parameters
----------
input
Input array. Must have shape (N, C, L_in) or (C, L_in) where N is
the batch dimension, C is the feature dimension, and L_in is the spatial
dimension.
output_size
Spatial output size.
Returns
-------
The result of the pooling operation. Will have shape (N, C, L_out) or
(C, L_out), where L_out = `output_size`
"""
squeeze = False
if input.ndim == 2:
input = ivy.expand_dims(input, axis=0)
squeeze = True
elif input.ndim != 3:
raise ivy.utils.exceptions.IvyException(
f"Got {len(input.shape)}D input, but only 2D and 3D inputs are supported.",
)
if input.shape[-1] % output_size == 0:
stride = input.shape[-1] // output_size
kernel_size = input.shape[-1] - (output_size - 1) * stride
pooled_output = ivy.avg_pool1d(
input, kernel_size, stride, "VALID", data_format="NCW"
)
if squeeze:
return ivy.squeeze(pooled_output, axis=0)
return pooled_output
idxw, length_w, range_max_w, adaptive_w = _compute_idx(
input.shape[-1], output_size, input.device
)
# to numpy and back in order to bypass a slicing error in tensorflow
vals = ivy.array(input.to_numpy()[..., idxw])
if not adaptive_w:
ret = ivy.mean(vals, axis=-1)
ret = ivy.squeeze(ret, axis=0) if squeeze else ret
return ret
vals, length_w = _mask(vals, length_w, range_max_w, dim=-1)
ret = None
for i in range(vals.shape[-1]):
if ret is None:
ret = vals[..., i]
else:
ret = ret + vals[..., i]
pooled_output = ret / length_w.astype(ret.dtype)
pooled_output = ivy.squeeze(pooled_output, axis=0) if squeeze else pooled_output
return pooled_output
adaptive_avg_pool1d.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def adaptive_avg_pool2d(
input: Union[ivy.Array, ivy.NativeArray],
output_size: Union[Sequence[int], int],
/,
*,
data_format: str = "NHWC",
) -> ivy.Array:
"""Apply a 2D adaptive average pooling over an input signal composed of
several input planes.
Parameters
----------
input
A 3D or 4D input array. Should have a floating-point data type.
output_size
Spatial output size.
data_format
"NHWC" or "NCHW". Defaults to "NHWC".
Returns
-------
The result of the pooling operation. Will have shape (N, C, S_0, S_1) or
(C, S_0, S_1), where S = `output_size`
"""
squeeze = False
if input.ndim == 3:
input = ivy.expand_dims(input, axis=0)
squeeze = True
elif input.ndim != 4:
raise ivy.utils.exceptions.IvyException(
f"Got {len(input.shape)}D input, but only 3D and 4D inputs are supported.",
)
permuted_input = False
if data_format == "NHWC":
input = ivy.permute_dims(input, (0, input.ndim - 1, *range(1, input.ndim - 1)))
data_format = "NCHW"
permuted_input = True
if isinstance(output_size, int):
output_size = (output_size, output_size)
if all(i_s % o_s == 0 for i_s, o_s in zip(input.shape[-2:], output_size)):
stride = tuple(i_s // o_s for i_s, o_s in zip(input.shape[-2:], output_size))
kernel_size = stride # Mathematically identical to the previous expression
pooled_output = ivy.avg_pool2d(
input, kernel_size, stride, "VALID", data_format="NCHW"
)
pooled_output = (
ivy.permute_dims(pooled_output, (0, *range(2, input.ndim), 1))
if permuted_input
else pooled_output
)
if squeeze:
return ivy.squeeze(pooled_output, axis=0)
return pooled_output
idxh, length_h, range_max_h, adaptive_h = _compute_idx(
input.shape[-2], output_size[-2], input.device
)
idxw, length_w, range_max_w, adaptive_w = _compute_idx(
input.shape[-1], output_size[-1], input.device
)
# to numpy and back in order to bypass a slicing error in tensorflow
vals = input[..., _expand_to_dim(idxh, 4), idxw]
if not adaptive_h and not adaptive_w:
ret = ivy.mean(vals, axis=(-3, -1))
ret = (
ivy.permute_dims(ret, (0, *range(2, input.ndim), 1))
if permuted_input
else ret
)
ret = ivy.squeeze(ret, axis=0) if squeeze else ret
return ret
vals, length_h = _mask(vals, length_h, range_max_h, dim=-2)
vals, length_w = _mask(vals, length_w, range_max_w, dim=-1)
ret = None
for i, j in itertools.product(range(vals.shape[-3]), range(vals.shape[-1])):
if ret is None:
ret = vals[..., i, :, j]
else:
ret = ret + vals[..., i, :, j]
pooled_output = ret / (length_h * length_w).astype(vals.dtype)
pooled_output = (
ivy.permute_dims(pooled_output, (0, *range(2, input.ndim), 1))
if permuted_input
else pooled_output
)
pooled_output = ivy.squeeze(pooled_output, axis=0) if squeeze else pooled_output
return pooled_output
adaptive_avg_pool2d.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
def _conv_view(lhs, rhs_shape, window_strides, pads, pad_value):
def _pad(arr, pads, pad_value):
out = ivy.astype(
ivy.pad(
arr,
ivy.maximum(0, pads).to_list(),
mode="constant",
constant_values=ivy.to_scalar(pad_value),
),
arr.dtype,
)
slices = tuple(
_slice(abs(lo) if lo < 0 else 0, hi % dim if hi < 0 else None)
for (lo, hi), dim in zip(pads, arr.shape)
)
return out[slices]
if (
_min(lhs.ndim, len(rhs_shape)) < 2
or lhs.ndim != len(rhs_shape)
or lhs.shape[1] != rhs_shape[1]
):
raise ValueError("Dimension mismatch")
if len(window_strides) != len(rhs_shape) - 2:
raise ValueError("Wrong number of strides for spatial dimensions")
if len(pads) != len(rhs_shape) - 2:
raise ValueError("Wrong number of pads for spatial dimensions")
lhs = _pad(lhs, [(0, 0)] * 2 + list(pads), pad_value)
in_shape = lhs.shape[2:]
filter_shape = rhs_shape[2:]
dim = len(filter_shape)
out_strides = ivy.multiply(window_strides, lhs.strides[2:]).to_list()
view_strides = lhs.strides[:1] + tuple(out_strides) + lhs.strides[1:]
out_shape = [
(in_shape[i] - filter_shape[i]) // s + 1 for i, s in enumerate(window_strides)
]
view_shape = list(lhs.shape[:1]) + out_shape + rhs_shape[1:]
view = ivy.as_strided(lhs, view_shape, view_strides)
view_axes = list(range(view.ndim))
sum_axes = view_axes[-dim - 1 :]
rhs_axes = [view.ndim] + sum_axes
out_axes = [0, view.ndim] + list(range(1, dim + 1))
return view, view_axes, rhs_axes, out_axes
def _dilate(operand, factors, fill_value):
outspace = list(operand.shape[:2]) + [
shape + (factors[i] - 1) * (shape - 1)
for i, shape in enumerate(operand.shape[2:])
]
out = ivy.full(outspace, fill_value, dtype=fill_value.dtype)
lhs_slices = tuple(_slice(None, None, step) for step in factors)
out[(_slice(None),) * 2 + lhs_slices] = operand
return out
def _padtype_to_pads(in_shape, filter_shape, window_strides, padding):
if padding.upper() == "SAME":
out_shape = [
math.ceil(in_size / stride)
for in_size, stride in zip(in_shape, window_strides)
]
pad_sizes = [
_max((out_size - 1) * stride + filter_size - in_size, 0)
for out_size, stride, filter_size, in_size in zip(
out_shape, window_strides, filter_shape, in_shape
)
]
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
else:
return [(0, 0)] * len(in_shape)
identities = {
"max": -float("inf"),
"min": float("inf"),
"add": 0,
"mul": 1,
"multiply": 1,
"logical_and": True,
"logical_or": False,
}
def _cast_init(init, dtype):
if not ivy.is_bool_dtype(dtype) and ivy.isinf(init):
if ivy.is_float_dtype(dtype):
info = ivy.finfo(dtype)
else:
info = ivy.iinfo(dtype)
if "float64" not in str(dtype):
init = info.max if init > 0 else info.min
return ivy.array(init, dtype=dtype)
def _get_identity(func, dtype, init):
func_name = func.__name__
if func_name in identities:
identity = identities[func_name]
return _cast_init(identity, dtype)
return init
avg_pool2d.mixed_backend_wrappers = {
"to_add": (
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def sliding_window(
input: Union[ivy.Array, ivy.NativeArray],
kernel_size: Union[int, Sequence[int]],
/,
*,
stride: Union[int, Tuple[int, int]] = 1,
dilation: Union[int, Tuple[int, int]] = 1,
padding: Union[str, int, Tuple[int, int]] = "VALID",
) -> ivy.Array:
"""Slide a window of specified dimension over all elements of an array.
Parameters
----------
input
An array representing the base area on which the window is going to slide over.
window_size
Size of the sliding window for each dimension of the input.
stride
The stride of the sliding window for each dimension of input
padding
Either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
dilation
The stride between elements within a sliding window, must be > 0.
Returns
-------
ret
The result of the sliding window operation.
Examples
--------
>>> x = ivy.array([[1, 2, 3, 4],
>>> [5, 6, 7, 8],
>>> [9, 10, 11, 12]])
>>> ivy.sliding_window(x, (2, 2))
ivy.array([[[ 1, 2, 5, 6],
[ 2, 3, 6, 7],
[ 3, 4, 7, 8]],
[[ 5, 6, 9, 10],
[ 6, 7, 10, 11],
[ 7, 8, 11, 12]]])
"""
if ivy.current_backend_str() == "torch":
return ivy.current_backend(input).sliding_window(
input,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
)
if ivy.current_backend_str() == "tensorflow":
return ivy.current_backend(input).sliding_window(
input,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
)
if ivy.current_backend_str() == "paddle":
return ivy.current_backend(input).sliding_window(
input,
kernel_size,
stride=stride,
dilation=dilation,
padding=padding,
)
# convert to 2D
n = len(input.shape)
if n > 2:
input = ivy.reshape(input, (input.shape[n - 2 :]))
k_size, stride, padding, dilation = map(
lambda x: tuple([x] * len(input.shape)) if isinstance(x, int) else x,
[kernel_size, stride, padding, dilation],
)
k_size = list(k_size)
if len(input.shape) != len(k_size):
while len(k_size) < len(input.shape):
k_size.append(k_size[-1])
k_size = tuple(k_size)
stride = list(stride)
if len(input.shape) != len(stride):
while len(stride) < len(input.shape):
stride.append(stride[-1])
stride = tuple(stride)
if not isinstance(padding, str):
if padding[0] == 0 and padding[-1] == 0:
padding = "VALID"
else:
padding = "SAME"
pads = _padtype_to_pads(input.shape, k_size, stride, padding)
input = input.reshape((1, 1) + input.shape)
if dilation:
identity = ivy.array(0)
input = _dilate(input, dilation, identity)
view = _conv_view(input, [1, 1] + list(k_size), stride, pads, identity)[0]
view = ivy.reshape(view, (*view.shape[1 : 1 + len(k_size)], -1))
return view
sliding_window.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def reduce_window(
operand: Union[ivy.Array, ivy.NativeArray],
init_value: Union[int, float],
computation: Callable,
window_dimensions: Union[int, Sequence[int]],
/,
*,
window_strides: Union[int, Sequence[int]] = 1,
padding: Union[str, int, Sequence[Tuple[int, int]]] = "VALID",
base_dilation: Union[int, Sequence[int]] = 1,
window_dilation: Union[int, Sequence[int]] = 1,
) -> ivy.Array:
"""Apply a reduction function to all elements in each window of an array.
Parameters
----------
operand
An array representing the base area on which the window is going to slide over.
init_value
The starting value for the reduction.
computation
The reduction function to apply to elements in each window.
window_dimensions
A sequence containing the window dimensions.
window_strides
A sequence containing the window strides.
padding
Either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
base_dilation
A sequence containing the base dilation values.
window_dilation
A sequence containing the window dilation values.
Returns
-------
ret
The result of the pooling-like operation.
Examples
--------
>>> x = ivy.array([[1, 2, 3, 4],
>>> [5, 6, 7, 8],
>>> [9, 10, 11, 12]])
>>> ivy.reduce_window(x, 0, ivy.add, (2, 2))
ivy.array([[14, 18, 22], [30, 34, 38]])
"""
# ToDo: add support for window_dilation
computation = _correct_ivy_callable(computation)
op = operand
init_value = _cast_init(init_value, op.dtype)
slid_wind_vals = ivy.sliding_window(
operand,
window_dimensions,
stride=window_strides,
dilation=base_dilation,
padding=padding,
)
ret = ivy.reduce(slid_wind_vals, init_value, computation, axes=-1)
return ret.astype(operand.dtype)
reduce_window.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
def fft2(
x: Union[ivy.Array, ivy.NativeArray],
*,
s: Optional[Sequence[int]] = None,
dim: Sequence[int] = (-2, -1),
norm: str = "backward",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
r"""Compute the 2-dimensional discrete Fourier Transform.
Parameters
----------
x
Input volume *[...,d_in,...]*,
where d_in indicates the dimension that needs FFT2.
s
sequence of ints, optional
Shape (length of each transformed axis) of the output (s[0] refers to axis 0,
s[1] to axis 1, etc.). This corresponds to n for fft(x, n). Along each axis,
if the given shape is smaller than that of the input, the input is cropped.
If it is larger, the input is padded with zeros. if s is not given, the shape
of the input along the axes specified by axes is used.
dim
Axes over which to compute the FFT2. If not given, the last two axes are used.
A repeated index in axes means the transform over that axis is performed
multiple times. A one-element sequence means that a one-dimensional FFT is
performed.
norm
Optional argument, "backward", "ortho" or "forward". Defaults to be "backward".
"backward" indicates no normalization.
"ortho" indicates normalization by $\frac{1}{\sqrt{n}}$.
"forward" indicates normalization by $\frac{1}{n}$.
out
Optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the FFT2 operation.
Examples
--------
>>> x = ivy.array([[0, 0, 0, 0, 0],
... [1, 1, 1, 1, 1],
... [2, 2, 2, 2, 2],
... [3, 3, 3, 3, 3],
... [4, 4, 4, 4, 4]])
>>> y = ivy.fft2(x)
>>> print(y)
ivy.array([[ 50. +0.j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ]])
"""
return ivy.current_backend(x).fft2(x, s=s, dim=dim, norm=norm, out=out)
fft2.mixed_backend_wrappers = {
"to_add": ("handle_device",),
"to_skip": (),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
def ifftn(
x: Union[ivy.Array, ivy.NativeArray],
s: Optional[Union[int, Tuple[int, ...]]] = None,
axes: Optional[Union[int, Tuple[int, ...]]] = None,
*,
norm: str = "backward",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
r"""Compute the N-dimensional inverse discrete Fourier Transform.
Parameters
----------
x
Input array of complex numbers.
s
Shape (length of transformed axis) of the output (`s[0]` refers to axis 0,
`s[1]` to axis 1, etc.). If given shape is smaller than that of the input,
the input is cropped. If larger, input is padded with zeros. If `s` is not
given, shape of input along axes specified by axes is used.
axes
Axes over which to compute the IFFT. If not given, last `len(s)` axes are
used, or all axes if `s` is also not specified. Repeated indices in axes
means inverse transform over that axis is performed multiple times.
norm
Indicates direction of the forward/backward pair of transforms is scaled
and with what normalization factor. "backward" indicates no normalization.
"ortho" indicates normalization by $\frac{1}{\sqrt{n}}$. "forward"
indicates normalization by $\frac{1}{n}$.
out
Optional output array for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
out
The truncated or zero-padded input, transformed along the axes indicated
by axes, or by a combination of s or x, as explained in the parameters
section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of axes is larger than the number of axes of x.
Examples
--------
>>> x = ivy.array([[0.24730653+0.90832391j, 0.49495562+0.9039565j,
... 0.98193269+0.49560517j],
... [0.93280757+0.48075343j, 0.28526384+0.3351205j,
... 0.2343787 +0.83528011j],
... [0.18791352+0.30690572j, 0.82115787+0.96195183j,
... 0.44719226+0.72654048j]])
>>> y = ivy.ifftn(x)
>>> print(y)
ivy.array([[ 0.51476765+0.66160417j, -0.04319742-0.05411636j,
-0.015561 -0.04216015j],
[ 0.06310689+0.05347854j, -0.13392983+0.16052352j,
-0.08371392+0.17252843j],
[-0.0031429 +0.05421245j, -0.10446617-0.17747098j,
0.05344324+0.07972424j]])
>>> x = ivy.array([[0.24730653+0.90832391j, 0.49495562+0.9039565j,
... 0.98193269+0.49560517j],
... [0.93280757+0.48075343j, 0.28526384+0.3351205j,
... 0.2343787 +0.83528011j],
... [0.18791352+0.30690572j, 0.82115787+0.96195183j,
... 0.44719226+0.72654048j]])
>>> b = ivy.ifftn(x, s=[2, 1], axes=[0, 1], norm='ortho')
>>> print(b)
ivy.array([[ 0.8344667 +0.98222595j],
[-0.48472244+0.30233797j]])
"""
return ivy.current_backend(x).ifftn(x, s=s, axes=axes, norm=norm, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@to_native_arrays_and_back
@handle_device
def rfft(
x: Union[ivy.Array, ivy.NativeArray],
/,
*,
n: Optional[int] = None,
axis: int = -1,
norm: Literal["backward", "ortho", "forward"] = "backward",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the one-dimensional discrete Fourier transform for real-valued
input.
.. note::
Applying the one-dimensional inverse discrete Fourier transform for
real-valued input to the output of this function must return the original
(i.e., non-transformed) input array within numerical accuracy
(i.e., irfft(rfft(x)) == x), provided that the transform and inverse
transform are performed with the same arguments
(axis and normalization mode) and consistent length.
.. note::
If the input a contains an imaginary part, it is silently discarded.
Parameters
----------
x
input array. Must have a real-valued floating-point data type.
n
length of the transformed axis of the input. If
- n is greater than the length of the input array, the input array
is zero-padded to length n.
- n is less than the length of the input array, the input array is
trimmed to length n.
- n is not provided, the length of the transformed axis of the
output must equal the length of the input along the axis specified
by axis. Default is ``None``.
axis
axis (dimension) over which to compute the Fourier transform.
If not set, the last axis (dimension) is used. Default is ``-1``.
norm
normalization mode. Should be one of the following modes:
- 'backward': no normalization.
- 'ortho': normalize by 1/sqrt(n) (i.e., make the FFT orthonormal).
- 'forward': normalize by 1/n.
Default is ``backward``.
out
Optional output array, for writing the result to. It must
have a shape that the inputs broadcast to.
Returns
-------
ret
an array transformed along the axis (dimension) indicated by axis.
The returned array must have a complex-valued floating-point
data type determined by Type Promotion Rules.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the
`docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.max.html>`_
in the standard.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With `ivy.Array` input:
>>> x = ivy.array([0,1,2])
>>> y = ivy.rfft(x)
>>> print(y)
ivy.array([ 3. +0.j , -1.5+0.8660254j])
>>> x = ivy.array([2.3,3.14,7.2])
>>> y = ivy.zeros(2)
>>> ivy.rfft(x, out=y)
>>> print(x)
ivy.array([2.29999995, 3.1400001 , 7.19999981])
>>> x = ivy.array([-1.2, 3.4, -5.6])
>>> ivy.rfft(x, n=4, out=x)
>>> print(x)
ivy.array([ -3.3999999+0.j , 4.3999996-3.4j, -10.2 +0.j ],
dtype=complex64)
With `ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.,1.,2.]),
... b=ivy.array([3.,4.,5.]))
>>> y = ivy.rfft(x)
>>> print(y)
{
a: ivy.array([3.+0.j, -1.5+0.8660254j]),
b: ivy.array([12.+0.j, -1.5+0.8660254j])
}
"""
if axis is None:
axis = -1
if norm is None:
norm = "backward"
return ivy.current_backend().rfft(x, n=n, axis=axis, norm=norm, out=out)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
def rfftn(
x: Union[ivy.Array, ivy.NativeArray],
s: Optional[Sequence[int]] = None,
axes: Optional[Sequence[int]] = None,
*,
norm: Optional[str] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute the N-dimensional discrete Fourier Transform for real input.
Parameters
----------
x : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(s[0] refers to axis 0, s[1] to axis 1, etc.). The final element of s
corresponds to n for rfft(x, n), while for the remaining axes, it
corresponds to n for fft(x, n). Along any axis, if the given shape is
smaller than that of the input, the input is cropped. If it is larger,
the input is padded with zeros. If s is not given, the shape of the
input along the axes specified by axes is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last len(s) axes
are used, or all axes if s is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode. Default is "backward". Indicates which direction of
the forward/backward pair of transforms is scaled and with what
normalization factor.
out : array_like, optional
Optional output array to store the result of the computation. The shape
and dtype of this array must match the expected output.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes indicated
by axes or by a combination of s and a, as explained in the parameters
section above. The length of the last axis transformed will be
s[-1] // 2 + 1, while the remaining transformed axes will have lengths
according to s, or unchanged from the input.
Raises
------
ValueError
If s and axes have different lengths.
IndexError
If an element of axes is larger than the number of axes of a.
Examples
--------
>>> x = ivy.array([1, 2, 3, 4], dtype=ivy.float32)
>>> result = ivy.rfftn(x, s=(4,), axes=(0,))
>>> print(result)
ivy.array([10.+0.j, -2.+2.j, -2.+0.j])
>>> x = ivy.array([[1, 2, 3], [4, 5, 6]], dtype=ivy.float32)
>>> result = ivy.rfftn(x, s=(3, 4), axes=(0, 1))
>>> print(result)
ivy.array([[21. +0.j , -4. -7.j ,
7. +0.j ],
[-1.5 -12.99038106j, -5.33012702 +2.23205081j,
-0.5 -4.33012702j],
[-1.5 +12.99038106j, 3.33012702 -1.23205081j,
-0.5 +4.33012702j]])
"""
if norm is None:
norm = "backward"
if axes is None:
axes = list(range(x.ndim - len(s), x.ndim))
elif s is None:
s = [x.shape[axis] for axis in axes]
elif len(s) != len(axes):
raise ValueError("s and axes must have the same length.")
return ivy.current_backend(x).rfftn(x, s=s, axes=axes, norm=norm, out=out)
# stft
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def stft(
signals: Union[ivy.Array, ivy.NativeArray],
frame_length: int,
frame_step: int,
/,
*,
fft_length: Optional[int] = None,
window_fn: Optional[Callable] = None,
pad_end: bool = False,
name: Optional[str] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Container static method variant of ivy.stft.
This method simply wraps the function, and so the docstring for
ivy.stft also applies to this method with minimal changes.
Parameters
----------
signals
Input Arrays.
frame_length
An integer scalar Tensor. The window length in samples.
frame_step
An integer scalar Tensor. The number of samples to step.
fft_length, optional
An integer scalar Tensor. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing frame_length.
window_fn, optional
A callable that takes a window length and a dtype
keyword argument and returns a [window_length] Tensor of samples
in the provided datatype. If set to None, no windowing is used.
pad_end, optional
Whether to pad the end of signals with zeros when the provided frame length
and step produces a frame that lies partially past its end.
name, optional
An optional name for the operation.
out, optional
Optional output array for writing the result.
Returns
-------
ret
A [..., frames, fft_unique_bins] Tensor of
complex64/complex128 STFT values where fft_unique_bins is
fft_length // 2 + 1 (the unique components of the FFT).
"""
return ivy.current_backend(signals).stft(
signals,
frame_length,
frame_step,
fft_length=fft_length,
window_fn=window_fn,
pad_end=pad_end,
name=name,
out=out,
)
def _broadcast_pooling_helper(x, pool_dims: str = "2d", name: str = "padding"):
dims = {"1d": 1, "2d": 2, "3d": 3}
if isinstance(x, int):
return tuple(x for _ in range(dims[pool_dims]))
if len(x) == 1:
return tuple(x[0] for _ in range(dims[pool_dims]))
elif len(x) == dims[pool_dims]:
return tuple(x)
elif len(x) != dims[pool_dims]:
raise ValueError(
f"`{name}` must either be a single int, "
f"or a tuple of {dims[pool_dims]} ints. "
)
def _cal_output_shape(
input_shape,
padding,
kernel_size,
strides,
):
return [
(length - 1) * stride - 2 * pad + ker
for length, stride, pad, ker in zip(input_shape, strides, padding, kernel_size)
]
@handle_backend_invalid
@handle_nestable
@handle_partial_mixed_function
@to_native_arrays_and_back
@inputs_to_ivy_arrays
@handle_device
def max_unpool1d(
input: ivy.Array,
indices: ivy.Array,
kernel_size: Union[Tuple[int], int],
/,
*,
strides: Optional[Union[int, Tuple[int]]] = None,
padding: Union[int, Tuple[int]] = 0,
data_format: Optional[str] = "NCW",
) -> ivy.Array:
"""Compute a 1-D max unpooling given the 1-D pooled input x and its
indices.
Parameters
----------
input
Pooled input image *[batch_size, w, d_in]*.
indices
Indices obtained from the corresponding max pooling operation.
kernel_size
Size of the kernel i.e., the sliding window for each
dimension of input. *[w]*.
strides
The stride of the sliding window for each dimension of input.
padding
SAME" or "VALID" indicating the algorithm, or list
indicating the per-dimension paddings.
data_format
NWC" or "NCW". Defaults to "NWC".
Returns
-------
ret
The result of the unpooling operation.
"""
if strides is None:
strides = kernel_size
input_shape = input.shape
if data_format in ["NCW", "NWC"]:
revert = False
if data_format == "NWC":
x_len = (input_shape[1],)
input = input.permute_dims((0, 2, 1))
indices = indices.permute_dims((0, 2, 1))
revert = True
else:
x_len = (input_shape[-1],)
else:
raise ValueError(
f"data_format attr should be NCW or NWC but found {data_format}"
)
input_shape = input.shape
kernel_size = _broadcast_pooling_helper(kernel_size, "1d", name="kernel_size")
padding = _broadcast_pooling_helper(padding, "1d", name="padding")
strides = _broadcast_pooling_helper(strides, "1d", name="strides")
output_len = _cal_output_shape(x_len, padding, kernel_size, strides)
output_shape = list(input_shape[:-1]) + output_len
one_like_mask = ivy.ones_like(indices, dtype="int32")
batch_shape = [input_shape[0], 1, 1]
batch_range = ivy.reshape(
ivy.arange(0, output_shape[0], dtype="int32"), batch_shape
)
b = one_like_mask * batch_range
feature_range = ivy.arange(0, output_shape[1], dtype="int32").reshape((1, -1, 1))
f = one_like_mask * feature_range
indices = ivy.stack([b, f, indices]).reshape((3, -1))
output = ivy.zeros(output_shape, dtype=input.dtype)
indices = tuple(indices)
output[indices] = input.reshape((-1,))
if revert:
output = output.permute_dims([0, 2, 1])
return output
max_unpool1d.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"to_native_arrays_and_back",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@inputs_to_ivy_arrays
@handle_device
def rnn(
step_function: Callable,
inputs: ivy.Array,
initial_states: List[ivy.Array],
/,
*,
go_backwards: bool = False,
mask: Optional[ivy.Array] = None,
constants: Optional[ivy.Array] = None,
unroll: bool = False,
input_length: Optional[int] = None,
time_major: bool = False,
zero_output_for_mask: bool = False,
return_all_outputs: bool = True,
):
"""Iterate over the time dimension of a tensor.
Parameters
----------
step_function
RNN step function.
inputs
Array of temporal data of shape (samples, time, ...).
initial_states
Array with shape (samples, state_size).
go_backwards
If True, do the iteration over the time dimension in reverse order and return
the reversed sequence.
mask
Binary array with shape (samples, time, 1), with a zero for every element that
is masked.
constants
List of constant values passed at each step.
unroll
Whether to use a pythonic while loop or ivy.while_loop
input_length
An integer or 1-D array, depending on whether the time dimension is
fixed-length. In case of variable length input, it is used for masking in case
there is no mask specified.
time_major
If True, the inputs and outputs will be in shape (timesteps, batch, ...) whereas
in the False case, it will be (batch, timesteps, ...).
zero_output_for_mask
If True, the otput for masked timestep will be zeros, whereas in the False case,
output from previous timestep is returned
return_all_outputs
If True, return the recurrent outputs for all timesteps in the sequence. If
False, only return the output for the last timestep.
Returns
-------
ret
A tuple of
- the latest output of the rnn of shape (samples, ...)
- the output of the rnn of shape (samples, time, ...) if
return_all_outputs=True else (samples, 1, ...)
- list of tensors, latest states returned by the step funciton, of shape
(samples, ...)
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
# an ivy implementation of rnn inspired from
# https://github.com/keras-team/keras/blob/v2.14.0/keras/backend.py#L4723-L5202
# Swap the batch and timestep dim for the incoming tensor.
if not time_major:
inputs = ivy.permute_dims(inputs, (1, 0, *range(2, len(inputs.shape))))
time_steps = inputs.shape[0]
batch = inputs.shape[1]
time_steps_t = ivy.asarray(inputs.shape[0])
if mask is not None:
if not ivy.is_bool_dtype(mask):
mask = ivy.astype(mask, ivy.bool)
if len(mask.shape) == 2:
mask = ivy.expand_dims(mask, axis=-1)
if not time_major:
mask = ivy.permute_dims(mask, (1, 0, *range(2, len(mask.shape))))
if constants is None:
constants = []
def _expand_mask(mask_t, input_t, fixed_dim=1):
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = ivy.expand_dims(mask_t, axis=-1)
multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:])
return ivy.tile(mask_t, repeats=multiples)
if unroll:
states = tuple(initial_states)
successive_states = []
successive_outputs = []
processed_input = ivy.unstack(inputs)
if go_backwards:
processed_input.reverse()
if mask is not None:
mask_list = ivy.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
input_t = processed_input[i]
mask_t = mask_list[i]
output_t, new_states = step_function(
input_t, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output_t)
if not successive_outputs:
prev_output = ivy.zeros_like(output_t)
else:
prev_output = successive_outputs[-1]
output = ivy.where(tiled_mask_t, output_t, prev_output)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in states)
final_states = tuple(
ivy.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, new_states, states)
)
states = final_states
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = ivy.stack(successive_outputs)
if zero_output_for_mask:
last_output = ivy.where(
_expand_mask(mask_list[-1], last_output),
last_output,
ivy.zeros_like(last_output),
)
outputs = ivy.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
ivy.zeros_like(outputs),
)
else:
for i in range(time_steps):
input_t = processed_input[i]
output_t, states = step_function(
input_t, tuple(states) + tuple(constants)
)
if return_all_outputs:
successive_outputs.append(output_t)
successive_states.append(states)
else:
successive_outputs = [output_t]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = ivy.stack(successive_outputs)
else:
states = tuple(initial_states)
input_time_zero = inputs[0]
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants)
)
if return_all_outputs:
if ivy.is_array(time_steps_t):
output_size = time_steps_t.to_scalar()
else:
output_size = time_steps_t
else:
output_size = 1
output_loop = ivy.empty(
(output_size, *output_time_zero.shape), dtype=output_time_zero.dtype
)
if go_backwards:
inputs = ivy.flip(inputs, axis=0)
time = 0
test_fn = lambda time, *_: time < time_steps_t
if mask is not None:
if go_backwards:
mask = ivy.flip(mask, axis=0)
def masking_fn(time):
return mask[time]
def compute_masked_output(mask_t, output, mask):
tiled_mask_t = tuple(
_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) for o in output
)
return tuple(
ivy.where(m, o, fm) for m, o, fm in zip(tiled_mask_t, output, mask)
)
elif ivy.is_ivy_array(input_length):
if go_backwards:
max_len = ivy.max(input_length)
rev_input_length = ivy.subtract(max_len - 1, input_length)
def masking_fn(time):
return rev_input_length < time
else:
def masking_fn(time):
return input_length > time
def compute_masked_output(mask_t, output, mask):
return ivy.where(mask_t, output, mask)
else:
masking_fn = None
if masking_fn is not None:
zero_output = ivy.zeros_like(output_time_zero)
def _step(time, output_t, prev_output, *states):
current_input = inputs[time]
mask_t = masking_fn(time)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
mask_output = zero_output if zero_output_for_mask else prev_output
new_output = compute_masked_output(mask_t, [output], [mask_output])[0]
for state, new_state in zip(states, new_states):
if ivy.is_ivy_array(new_state):
ivy.reshape(new_state, shape=state.shape, out=new_state)
final_states = compute_masked_output(mask_t, new_states, states)
new_states = final_states
output_t[time if return_all_outputs else 0] = new_output
return (time + 1, output_t, new_output) + tuple(new_states)
final_outputs = ivy.while_loop(
test_fn=test_fn,
body_fn=_step,
vars=(time, output_loop, zero_output) + states,
)
new_states = final_outputs[3:]
else:
def _step(time, output_t, *states):
current_input = inputs[time]
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
for state, new_state in zip(states, new_states):
if ivy.is_ivy_array(new_state):
ivy.reshape(new_state, shape=state.shape, out=new_state)
output_t[time if return_all_outputs else 0] = output
return (time + 1, output_t) + tuple(new_states)
final_outputs = ivy.while_loop(
test_fn=test_fn, body_fn=_step, vars=(time, output_loop) + states
)
new_states = final_outputs[2:]
outputs = final_outputs[1]
last_output = outputs[-1]
shape = list(outputs.shape)
if return_all_outputs:
shape[0] = time_steps
else:
shape[0] = 1
shape[1] = batch
outputs = ivy.reshape(outputs, shape)
if not time_major:
outputs = ivy.permute_dims(outputs, (1, 0, *range(2, len(outputs.shape))))
return last_output, outputs, new_states
rnn.mixed_backend_wrappers = {
"to_add": (
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
| ivy/ivy/functional/ivy/experimental/layers.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/layers.py",
"repo_id": "ivy",
"token_count": 60205
} | 48 |
"""Collection of Ivy neural network layers in functional form."""
# global
from typing import Optional, Tuple, Union, Sequence
# local
import ivy
from ivy.utils.backend import current_backend
from ivy.func_wrapper import (
handle_array_function,
handle_partial_mixed_function,
inputs_to_ivy_arrays,
to_native_arrays_and_back,
inputs_to_native_shapes,
handle_out_argument,
handle_nestable,
handle_array_like_without_promotion,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# Extra #
# ------#
def _get_embed_dim(
in_proj_weights, q_proj_weights, k_proj_weights, v_proj_weights, query
):
pre_embed_dim = query.shape[-1]
if ivy.exists(in_proj_weights):
embed_dim = in_proj_weights.shape[0] / 3
elif all(ivy.exists(x) for x in [q_proj_weights, k_proj_weights, v_proj_weights]):
embed_dim = q_proj_weights.shape[0]
else:
embed_dim = None
return pre_embed_dim, embed_dim
def _in_projection(
q,
k,
v,
w,
b=None,
):
"""Projects query, key and value efficiently, depending on whether we are
doing self- attention (query is key is value) or cross-attention (key is
value) or an attention where query, key and value are all different.
it is only used in
multi_head_attention layer.
This helper function is a modified version of https://github.com/pytorch/pytorch/b
lob/5293dee9208cc0e1e7db2ebdcbaef64908c087c6/torch/nn/functional.py#L4762.
"""
E = q.shape[-1]
if k is v:
if q is k:
# self-attention
proj = ivy.linear(q, w, bias=b)
proj = proj.split(num_or_size_splits=3, axis=-1)
return proj[0], proj[1], proj[2]
else:
# encoder-decoder attention
w_q, w_kv = w.split(num_or_size_splits=[E, E * 2])
if b is None:
b_q = b_kv = None
else:
b_q, b_kv = b.split([E, E * 2])
q_proj = ivy.linear(q, w_q, bias=b_q)
kv_proj = ivy.linear(k, w_kv, bias=b_kv)
kv_proj = kv_proj.split(num_or_size_splits=2, axis=-1)
return (q_proj, kv_proj[0], kv_proj[1])
else:
w_q, w_k, w_v = w.split(num_or_size_splits=3)
if b is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = b.split(num_or_size_splits=3)
return (
ivy.linear(q, w_q, bias=b_q),
ivy.linear(k, w_k, bias=b_k),
ivy.linear(v, w_v, bias=b_v),
)
# Linear #
@handle_exceptions
@handle_nestable
@handle_partial_mixed_function
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def linear(
x: Union[ivy.Array, ivy.NativeArray],
weight: Union[ivy.Array, ivy.NativeArray],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply a linear transformation to the incoming data: y = x * t(weight) + bias.
The operation also supports batching of the weight matrices. This is useful if a
batch of different network parameters are to be represented.
Parameters
----------
x
The input x to compute linear transformation on.
*[outer_batch_shape,inner_batch_shape,in_features]*
weight
The weight matrix. *[outer_batch_shape,out_features,in_features]*
bias
The bias vector, default is ``None``. *[outer_batch_shape,out_features]*
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Result array of the linear transformation.
*[outer_batch_shape,inner_batch_shape,out_features]*
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([1., 2., 3.])
>>> w = ivy.array([[1., 0., 0.]])
>>> y = ivy.linear(x, w)
>>> print(y)
ivy.array([1.])
>>> x = ivy.array([[0.666, -0.4269, 1.911]])
>>> w = ivy.array([[1., 0., 0.], [0., 0., 1.]])
>>> y = ivy.zeros((1, 2))
>>> ivy.linear(x, w, out=y)
>>> print(y)
ivy.array([[0.666, 1.91 ]])
>>> x = ivy.array([[1.546, 5.234, 6.487],
... [0.157, 5.753, 4.52],
... [5.165, 3.159, 7.101]])
>>> w = ivy.array([[1.545, 2.547, 3.124],
... [5.852, 8.753, 6.963]])
>>> b = ivy.array([-1., 1.])
>>> y = ivy.zeros((3, 2))
>>> ivy.linear(x, w, bias=b, out=y)
>>> print(y)
ivy.array([[ 34.98495483, 101.0293808 ],
[ 28.0159359 , 83.74752808],
[ 37.20942307, 108.3205719 ]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1., 2., 3.],
... [4., 5., 6.]]),
... b=ivy.array([1.1, 2.2, 3.3]))
>>> w = ivy.Container(a=ivy.array([[1., 2., 3.],
... [-1., 1., 2.]]),
... b=ivy.array([[0., -1., 1.],
... [0., 1., 1.]]))
>>> b = ivy.Container(a=ivy.array([1., -1.]), b=ivy.array([1., 1.]))
>>> y = ivy.linear(x, w, bias=b)
>>> print(y)
{
a: ivy.array([[15., 6.],
[33., 12.]]),
b: ivy.array([2.1, 6.5])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2, 3.3],
... [11., 22., 33.]]),
... b=ivy.array([[1.245, 0.278, 4.105],
... [7., 13., 17.]]))
>>> w = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> b = ivy.Container(a=ivy.array([1., 0., -1.]),
... b=ivy.array([1., 1., 0.]))
>>> ivy.linear(x, w, bias=b, out=x)
>>> print(x)
{
a: ivy.array([[16.4, 35.2, 54.],
[155., 352., 549.]]),
b: ivy.array([[15.1, 32., 47.9],
[85., 196., 306.]])
}
"""
outer_batch_shape = list(weight.shape[:-2])
num_outer_batch_dims = len(outer_batch_shape)
inner_batch_shape = list(x.shape[num_outer_batch_dims:-1])
num_inner_batch_dims = len(inner_batch_shape)
num_out_feats, num_in_feats = list(weight.shape[-2:])
# OBS x IBS x OF
y = ivy.matmul(
x,
ivy.swapaxes(
ivy.reshape(
weight,
outer_batch_shape
+ [1] * max(num_inner_batch_dims - 1, 0)
+ [num_out_feats, num_in_feats],
),
-1,
-2,
),
)
if ivy.exists(bias):
# OBS x [1]*len(IBS) x OF
bias_broadcast = ivy.reshape(
bias, outer_batch_shape + [1] * num_inner_batch_dims + [num_out_feats]
)
# OBS x IBS x OF
y = y + bias_broadcast
if ivy.exists(out):
return ivy.inplace_update(out, y)
return y
linear.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
# Dropout #
@handle_exceptions
@handle_nestable
@handle_partial_mixed_function
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def dropout(
x: Union[ivy.Array, ivy.NativeArray],
prob: float,
/,
*,
scale: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
training: bool = True,
seed: Optional[int] = None,
noise_shape: Optional[Sequence[int]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Randomly setting a fraction of input tensor to zeroes with probability.
`prob` at each update during training time to prevent possible overfitting.
The inputs not set to 0 are scaled up `1 / (1 - prob)` by default, so that
overall sum is unchanged at training time and inference time.
Parameters
----------
x
The input array x to perform dropout on.
prob
The probability of zeroing out each array element, float between 0 and 1.
scale
Whether to scale the output by `1/(1-prob)`. Default is ``True``.
dtype
output array data type. If dtype is None, the output array data type
must be inferred from x. Default is ``None``.
training
Turn on dropout if training, turn off otherwise. Default is ``True``.
seed
Set a default seed for random number generating (for reproducibility). Default
is ``None``.
noise_shape
a sequence representing the shape of the binary dropout mask that will be
multiplied with the input. A shape dimension set to None means that a different
mask value will be applied to each element of the input across that dimension. A
dimension set to 1 means the same mask value will be applied to all elements of
the input across that dimension.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Result array after dropout is performed.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.],
... [10., 11., 12.]])
>>> y = ivy.dropout(x,0.3)
>>> print(y)
ivy.array([[ 1.42857146, 2.85714293, 4.28571415],
[ 0. , 7.14285755, 8.5714283 ],
[10. , 11.4285717 , 0. ],
[14.2857151 , 0. , 17.1428566 ]])
>>> x = ivy.array([[1.5, 2.6],
... [4.9, 6.6],
... [7.2, 8.7]])
>>> y = ivy.dropout(x,0.5)
>>> print(y)
ivy.array([[ 0. , 5.19999981],
[ 0. , 0. ],
[ 0. , 17.39999962]])
>>> x = ivy.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.],
... [10., 11., 12.]])
>>> y = ivy.dropout(x,0.3,scale=False)
>>> print(y)
ivy.array([[ 1., 2., 3.],
[ 4., 5., 0.],
[ 7., 0., 9.],
[10., 11., 0.]])
>>> x = ivy.array([[1.5, 2.6],
... [4.9, 6.6],
... [7.2, 8.7]])
>>> y = ivy.dropout(x,0.5,scale=False)
>>> print(y)
ivy.array([[0., 2.6],
[0., 0. ],
[0., 8.7]])
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[1., 2., 3.], [4., 5., 6.]]),
... b=ivy.array([7., 8., 9.]))
>>> y = ivy.dropout(x,0.3)
>>> print(y)
{
a: ivy.array([[0., 0., 4.28571415],
[5.71428585, 7.14285755, 0.]]),
b: ivy.array([0., 11.4285717, 12.8571434])
}
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2, 3.3], [11., 22., 33.]]),
... b=ivy.array([[1.245, 0.278, 4.105], [7., 13., 17.]]))
>>> y = ivy.dropout(x,0.5)
>>> print(y)
{
a: ivy.array([[0., 4.4000001, 6.5999999],
[22., 44., 0.]]),
b: ivy.array([[2.49000001, 0.55599999, 8.21000004],
[14., 0., 0.]])
}
>>> x = ivy.Container(a=ivy.array([[1., 2., 3.], [4., 5., 6.]]),
... b=ivy.array([7., 8., 9.]))
>>> y = ivy.dropout(x,0.3)
>>> print(y)
{
a: ivy.array([[0., 0., 3.],
[4., 5., 0.]]),
b: ivy.array([0., 8., 9.])
}
>>> x = ivy.Container(a=ivy.array([[1.1, 2.2, 3.3], [11., 22., 33.]]),
... b=ivy.array([[1.245, 0.278, 4.105], [7., 13., 17.]]))
>>> y = ivy.dropout(x,0.5)
>>> print(y)
{
a: ivy.array([[0., 2.2, 3.3],
[11., 22., 0.]]),
b: ivy.array([[1.245, 0.278, 4.105],
[7., 0., 0.]])
}
"""
if prob == 0 or not training:
if dtype is not None and x.dtype != dtype:
x = ivy.astype(x, dtype)
return ivy.inplace_update(out, x) if ivy.exists(out) else x
if noise_shape is None:
noise_shape = x.shape
else:
noise_shape = list(noise_shape)
for i, v in enumerate(noise_shape):
if v is None:
noise_shape[i] = x.shape[i]
mask = ivy.where(
ivy.random_uniform(shape=noise_shape, device=ivy.dev(x), dtype=dtype, seed=seed)
< prob,
0.0,
1.0,
)
x = x * mask
if scale:
x = ivy.multiply(x, 1.0 / (1.0 - prob), out=out)
return ivy.inplace_update(out, x) if ivy.exists(out) else x
dropout.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
# Attention #
@handle_exceptions
@handle_array_like_without_promotion
@handle_array_function
def scaled_dot_product_attention(
query: Union[ivy.Array, ivy.NativeArray],
key: Union[ivy.Array, ivy.NativeArray],
value: Union[ivy.Array, ivy.NativeArray],
/,
*,
scale: Optional[float] = None,
mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
dropout_p: Optional[float] = 0.0,
is_causal: Optional[bool] = False,
training: Optional[bool] = False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Apply scaled dot product attention to inputs x using optional mask.
Parameters
----------
query
The queries input array. The shape of queries input array should be in
*[batch_shape,num_queries,feat_dim]*. The queries input array should have the
same size as keys and values.
key
The keys input array. The shape of keys input array should be in
*[batch_shape,num_keys,feat_dim]*. The keys input array should have the same
size as queries and values.
value
The values input array. The shape of values input should be in
*[batch_shape,num_keys,feat_dim]*. The values input array should have the same
size as queries and keys.
scale
The scale float value.
The scale float value is used to scale the query-key pairs before softmax.
mask
The mask input array. The mask to apply to the query-key values. Default is
None. The shape of mask input should be in *[batch_shape,num_queries,num_keys]*.
dropout_p
Specifies the dropout probability, if greater than 0.0, dropout is applied
is_causal
If true, assumes causal attention masking
and errors if both `mask` and `is_causal` are set.
training
If True, dropout is used, otherwise dropout is not activated.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The output following application of scaled dot-product attention.
The output array is the weighted sum produced by the attention score and value.
The shape of output array is *[batch_shape,num_queries,feat_dim]* .
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.array([[[0.6, 1.5], [2.4, 3.3],[4.2, 5.1]]])
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> result = ivy.scaled_dot_product_attention(q,
... k,
... v,
... scale=1,
... dropout_p=0.1,
... is_causal=True,
... training=True)
>>> print(result)
ivy.array([[[0.40000001, 1.29999995],
... [2.19994521, 3.09994531],
... [4.30000019, 5.30000019]]])
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.array([[[0.6, 1.5], [2.4, 3.3],[4.2, 5.1]]])
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> mask = ivy.array([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],[0.0, 0.0, 0.0]]])
>>> result = ivy.scaled_dot_product_attention(q,k,v,scale=1,mask=mask)
>>> print(result)
ivy.array([[[2.30000019, 3.23333359],
[2.30000019, 3.23333359],
[2.30000019, 3.23333359]]])
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]])
>>> k = ivy.array([[[0.6, 1.5], [2.4, 3.3], [4.2, 5.1]]])
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1], [4.3, 5.3]]])
>>> out = ivy.zeros(shape=(1, 3, 2))
>>> ivy.scaled_dot_product_attention(q,
... k,
... v,
... scale=1,
... dropout_p=0.1,
... is_causal=True,
... training=True,
... out=out)
>>> print(out)
ivy.array([[[0.40000001, 1.29999995],
... [2.19994521, 3.09994531],
... [4.30000019, 5.30000019]]])
>>> q = ivy.native_array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.native_array([[[0.6, 1.5], [2.4, 3.3],[4.2, 5.1]]])
>>> v = ivy.native_array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> mask = ivy.native_array([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],[0.0, 0.0, 0.0]]])
>>> result = ivy.scaled_dot_product_attention(q,k,v,scale=1,mask=mask)
>>> print(result)
ivy.array([[[2.30000019, 3.23333359],
... [2.30000019, 3.23333359],
... [2.30000019, 3.23333359]]])
>>> q = ivy.native_array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]])
>>> k = ivy.native_array([[[0.6, 1.5], [2.4, 3.3], [4.2, 5.1]]])
>>> v = ivy.native_array([[[0.4, 1.3], [2.2, 3.1], [4.3, 5.3]]])
>>> out = ivy.zeros(shape=(1, 3, 2))
>>> ivy.scaled_dot_product_attention(q,
... k,
... v,
... scale=1,
... dropout_p=0.1,
... is_causal=True,
... training=True,
... out=out)
>>> print(out)
ivy.array([[[0.40000001, 1.29999995],
... [2.19994521, 3.09994531],
... [4.30000019, 5.30000019]]])
With :class:`ivy.Container` input:
>>> q = ivy.Container(a=ivy.array([[[0.2, 1.], [2.7, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[1.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3], [4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6], [4.0, 5.6]]]))
>>> v = ivy.Container(a=ivy.array([[[5.2, 1.], [2.1, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> result = ivy.scaled_dot_product_attention(q,
... k,
... v,
... scale=1,
... dropout_p=0.1,
... is_causal=True,
... training=True)
>>> print(result)
{
a: ivy.array([[[5.19999981, 1.],
... [2.59249449, 2.68226194],
... [4.4000001, 5.5999999]]]),
b: ivy.array([[[0.2, 1.],
... [2.19603825, 2.9960382],
... [4.4000001, 5.5999999]]])
}
>>> q = ivy.Container(a=ivy.array([[[0.2, 1.], [2.7, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[1.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3], [4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6], [4.0, 5.6]]]))
>>> v = ivy.Container(a=ivy.array([[[5.2, 1.], [2.1, 3.], [4.4, 5.6]]]),
... b=ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]]))
>>> mask = ivy.Container(
... a=ivy.array([[[1.0, 1.0, 1.0],[1.0, 1.0, 1.0],[1.0, 1.0, 1.0]]]),
... b=ivy.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0,1.0]]])
... )
>>> result = ivy.scaled_dot_product_attention(q,k,v,scale=1,mask=mask)
>>> print(result)
{
a: ivy.array([[[4.26894283, 5.40236187],
... [4.39999437, 5.59999037],
... [4.4000001, 5.5999999]]]),
b: ivy.array([[[4.35046196, 5.54282808],
... [4.39989519, 5.5998764],
... [4.4000001, 5.5999999]]])
}
With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs:
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.native_array([[[0.6, 1.5], [2.4, 3.3],[4.2, 5.1]]])
>>> v = ivy.native_array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> result = ivy.scaled_dot_product_attention(q,
... k,
... v,
... scale=1,
... dropout_p=0.1,
... is_causal=True,
... training=True)
>>> print(result)
ivy.array([[[0.40000001, 1.29999995],
... [2.19994521, 3.09994531],
... [4.30000019, 5.30000019]]])
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.], [4.4, 5.6]]])
>>> k = ivy.native_array([[[0.6, 1.5], [2.4, 3.3], [4.2, 5.1]]])
>>> v = ivy.native_array([[[0.4, 1.3], [2.2, 3.1], [4.3, 5.3]]])
>>> out = ivy.zeros(shape=(1, 3, 2))
>>> ivy.scaled_dot_product_attention(q,k,v,scale=1,out=out)
>>> print(out)
ivy.array([[[4.03946018, 5.0280633 ],
... [4.29981947, 5.29981089],
... [4.30000019, 5.30000019]]])
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3], [4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6], [4.0, 5.6]]]))
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1], [4.3, 5.3]]])
>>> result = ivy.scaled_dot_product_attention(q,k,v,scale=1,is_causal=True)
>>> print(result)
{
a: ivy.array([[[0.40000001, 1.29999995],
... [2.06345534, 2.9634552],
... [4.30000019, 5.30000019]]]),
b: ivy.array([[[0.40000001, 1.29999995],
... [2.19336844, 3.09336829],
... [4.30000019, 5.30000019]]])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> q = ivy.array([[[0.2, 1.], [2.2, 3.],[4.4, 5.6]]])
>>> k = ivy.Container(a=ivy.array([[[4.2, 1.], [2.2, 3.3],[4.4, 5.6]]]),
... b=ivy.array([[[3.2, 1.], [2.2, 3.6],[4.0, 5.6]]]))
>>> v = ivy.array([[[0.4, 1.3], [2.2, 3.1],[4.3, 5.3]]])
>>> mask = ivy.native_array([[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
>>> result = ivy.scaled_dot_product_attention(q,
... k,
... v,
... scale=1,
... mask=mask,
... dropout_p=0.1,
... training=True)
>>> print(result)
{
a: ivy.array([[[2.30000019, 3.23333359],
... [2.30000019, 3.23333359],
... [2.30000019, 3.23333359]]]),
b: ivy.array([[[2.30000019, 3.23333359],
... [2.30000019, 3.23333359],
... [2.30000019, 3.23333359]]])
}
"""
ivy.assertions.check_all(
(not is_causal) or (is_causal and mask is None),
"is_causal and attn_mask cannot be set at the same time",
)
embed_dim = query.shape[-1]
scale = scale if scale else 1 / (embed_dim**0.5)
sim = ivy.einsum("... q f, ... k f -> ... q k", query, key) * scale
sim = ivy.dropout(sim, dropout_p, training=training)
if ivy.exists(mask):
sim = ivy.where(
ivy.logical_not(mask),
-ivy.ones_like(sim) * ivy.finfo(ivy.dtype(sim)).max,
sim,
)
elif is_causal:
L = query.shape[-2] # Source sequence length
S = key.shape[-2] # Target sequence length
mask = ivy.tril(ivy.ones((L, S)), k=0)
mask = ivy.astype(mask, ivy.bool)
sim = ivy.where(
ivy.logical_not(mask),
-ivy.ones_like(sim) * ivy.finfo(ivy.dtype(sim)).max,
sim,
)
attn = ivy.softmax(sim, axis=-1)
result = ivy.einsum("... qk, ...kf -> ...qf", attn, value)
return ivy.inplace_update(out, result) if ivy.exists(out) else result
@handle_exceptions
@handle_nestable
@handle_out_argument
@handle_partial_mixed_function
@inputs_to_ivy_arrays
@handle_array_function
def multi_head_attention(
query: Union[ivy.Array, ivy.NativeArray],
/,
*,
key: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
value: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
batch_first: bool = True,
num_heads: int = 8,
scale: Optional[float] = None,
attention_mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
in_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
q_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
k_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
v_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out_proj_weights: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
in_proj_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out_proj_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
is_causal: bool = False,
key_padding_mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
bias_k: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
bias_v: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
static_k: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
static_v: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
add_zero_attn: bool = False,
return_attention_weights: bool = False,
average_attention_weights: bool = True,
dropout: float = 0.0,
training: bool = False,
out: Optional[ivy.Array] = None,
) -> Union[ivy.Array, ivy.NativeArray]:
"""Apply multi-head attention to inputs x. This is an implementation of
multi-headed attention as described in the paper "Attention is all you
Need" (Vaswani et al., 2017). If `query`, `key`, `value` are the same, then
this is self-attention. Each timestep in `query` attends to the
corresponding sequence in `key`, and returns a fixed-width vector. This
layer first projects `query`, `key` and `value`. These are (effectively) a
list of tensors of length `num_attention_heads`, where the corresponding
shapes are `(batch_size, <query dimensions>, key_dim)`, `(batch_size,
<key/value dimensions>, key_dim)`, `(batch_size, <key/value dimensions>,
value_dim)`. Then, the query and key tensors are dot-producted and scaled.
These are softmaxed to obtain attention probabilities. The value tensors
are then interpolated by these probabilities, then concatenated back to a
single tensor. Finally, the result tensor with the last dimension as
value_dim can take a linear projection and return.
Parameters
----------
query
The query embeddings. Shape: `(L, Q)` or `(N, L, Q)`, where L is the number of
queries, N is the batch size, Q is the query embedding dimension.
key
The key embeddings. Shape: `(S, K)` or `(N, S, K)`, where S is the number of
keys, N is the batch size, K is the key embedding dimension.
value
The value embeddings. Shape `(S, V)` or `(N, S, V)`, where S is the number of
keys, N is the batch size, V is the value embedding dimension.
batch_first
If False, `query`, `key` and `value` will have shapes `(L, N, Q)`, `(S, N, K)`
and `(S, N, V)` respectively (if batched).
num_heads
The number of attention heads to use.
scale
The value by which to scale the query-key similarity measure before softmax.
attention_mask
The mask to apply to the query-key values. Shape: `(L, S)` or
`(N*num_heads, L, S)`.
in_proj_weights
The weights used to project query, key and value. Shape: `(3*E, E')`, where E
is the new embedding dimension and E' is the input embedding dimension, i.e.
`E' = Q = K = V`.
q_proj_weights
The weights used to project query if `in_proj_weights` is None. Shape: `(E, Q)`.
k_proj_weights
The weights used to project key if `in_proj_weights` is None. Shape: `(E, K)`.
v_proj_weights
The weights used to project value if `in_proj_weights` is None. Shape: `(E, V)`.
out_proj_weights
The weights used to project the attention output. Shape: `(O, E)`, where O is
the output embedding dimension.
in_proj_bias
The bias used when projecting query, key and value. Shape: `(3*E,)`.
out_proj_bias
The bias used when projecting the output. Shape: `(O,)`.
is_causal
If True, use a causal attention mask and ignore the provided `attention_mask`.
key_padding_mask
A binary mask to apply to the key sequence. Shape: `(S,)` or `(N, S)`.
bias_k
An additional bias added to the key sequence. Shape: `(E,)`.
bias_v
An additional bias added to the value sequence. Shape: `(E,)`.
static_k
A static key to be used in the attention operators.
Shape: `(N*num_heads, S, E//num_heads)`.
static_v
A static value to be used in the attention operators.
Shape: `(N*num_heads, S, E//num_heads)`.
add_zero_attn
A boolean flag indicating whether to add a batch of zeros to key and value.
return_attention_weights
If True, return the attention weights alongside the attention output.
average_attention_weights
If True, the returned attention weights will be averaged across heads.
Otherwise, the attention weights will be provided separately per head.
Note that this flag only has an effect when `return_attention_weights=True`.
dropout
Specifies the dropout probability. Dropout is applied on the attention weights.
training
If True, dropout is used, otherwise dropout is not activated.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The output following the application of multi-head attention. Either `output`
or `(output, attention_weights)`. `output` will have shape `(L, E)` if the
inputs were unbatched or `(N, L, E)` otherwise, and `attention_weights` will
have shape `(L, S)` or `(N, L, S)` respectively. If `batch_first` is False and
the inputs were batched, the `output` will have shape `(L, N, E)`.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
"""
num_dims = query.ndim
ivy.assertions.check_all(
num_dims > 1 and num_dims < 4,
"Number of dimensions should be 2 (for unbatched input) or 3 (for batched"
f" input), got {num_dims}",
)
if key is None and value is None:
key = value = query
if num_dims == 2:
query, key, value = (ivy.expand_dims(x, axis=0) for x in [query, key, value])
elif not batch_first:
query, key, value = (ivy.swapaxes(x, 0, 1) for x in [query, key, value])
# project query, key and value
if ivy.exists(in_proj_weights):
q, k, v = _in_projection(query, key, value, w=in_proj_weights, b=in_proj_bias)
emb_dim = int(in_proj_weights.shape[0] / 3)
elif all(ivy.exists(x) for x in [q_proj_weights, k_proj_weights, v_proj_weights]):
if ivy.exists(in_proj_bias):
b_q, b_k, b_v = ivy.split(in_proj_bias, num_or_size_splits=3)
else:
b_q = b_k = b_v = None
q, k, v = (
ivy.linear(query, q_proj_weights, bias=b_q),
ivy.linear(key, k_proj_weights, bias=b_k),
ivy.linear(value, v_proj_weights, bias=b_v),
)
emb_dim = q_proj_weights.shape[0]
else:
q, k, v = query, key, value
if ivy.exists(out_proj_weights):
emb_dim = out_proj_weights.shape[-1]
else:
emb_dim = q.shape[-1]
num_batches, num_queries = query.shape[:2]
ivy.assertions.check_true(
emb_dim % num_heads == 0, "features must be divisible by number of heads"
)
head_dim = emb_dim // num_heads
# apply extra bias
if bias_k is not None and bias_v is not None:
ivy.assertions.check_true(
not (ivy.exists(static_k) or ivy.exists(static_v)),
"bias cannot be added to static key or value",
)
k = ivy.concat([k, ivy.tile(bias_k, (num_batches, 1, 1))], axis=1)
v = ivy.concat([v, ivy.tile(bias_v, (num_batches, 1, 1))], axis=1)
num_keys = k.shape[1]
# reshape q, k, v for efficient matrix multiplication
q = ivy.swapaxes(q.reshape((num_queries, num_batches * num_heads, head_dim)), 0, 1)
if static_k is None:
k = ivy.swapaxes(k.reshape((num_keys, num_batches * num_heads, head_dim)), 0, 1)
else:
k = static_k
if static_v is None:
v = ivy.swapaxes(v.reshape((num_keys, num_batches * num_heads, head_dim)), 0, 1)
else:
v = static_v
# add extra batch of zeros to k, v
if add_zero_attn:
zero_attn_shape = (num_batches * num_heads, 1, head_dim)
k = ivy.concat([k, ivy.zeros(zero_attn_shape, dtype=k.dtype)], axis=1)
v = ivy.concat([v, ivy.zeros(zero_attn_shape, dtype=v.dtype)], axis=1)
num_keys = k.shape[1]
# get attention scores
attn_scores = ivy.matmul(q, ivy.swapaxes(k, 1, 2))
scale = scale if scale else 1 / (head_dim**0.5)
attn_scores *= scale
# mask the attention scores
if ivy.exists(attention_mask):
assert attention_mask.dtype in [query.dtype, ivy.bool], (
"was expecting attention_mask of type bool or the same as the input's, but"
f" got {attention_mask.dtype}"
)
if is_causal:
mask = ivy.triu(ivy.ones((num_queries, num_keys)), k=1)
attention_mask = ivy.where(mask, float("-inf"), 0)
elif ivy.is_bool_dtype(attention_mask):
attention_mask = ivy.where(attention_mask, float("-inf"), 0)
if attention_mask.ndim == 2:
attention_mask = ivy.tile(attention_mask, (num_batches * num_heads, 1, 1))
if key_padding_mask is not None:
assert ivy.is_bool_dtype(key_padding_mask), (
"was expecting key_padding_mask of type bool, but got"
f" {key_padding_mask.dtype}"
)
key_padding_mask = ivy.where(key_padding_mask, float("-inf"), 0)
if num_dims == 2:
key_padding_mask = ivy.expand_dims(key_padding_mask, axis=0)
key_padding_mask = ivy.tile(
key_padding_mask, (num_batches * num_heads, num_queries, 1)
)
if attention_mask is None:
attention_mask = key_padding_mask
else:
attention_mask += key_padding_mask
if ivy.exists(attention_mask):
if bias_k is not None and bias_v is not None and not is_causal:
attention_mask = ivy.pad(attention_mask, [(0, 0), (0, 0), (0, 1)])
if add_zero_attn and not is_causal:
attention_mask = ivy.pad(attention_mask, [(0, 0), (0, 0), (0, 1)])
attn_scores += attention_mask.astype(query.dtype)
# get attention weights
attn_weights = ivy.softmax(attn_scores, axis=-1)
attn_weights = ivy.dropout(attn_weights, dropout, training=training)
# get attention output
attention_out = ivy.matmul(attn_weights, v)
attention_out = ivy.swapaxes(attention_out, 0, 1).reshape(
(num_batches, num_queries, emb_dim)
)
if ivy.exists(out_proj_weights):
attention_out = ivy.linear(attention_out, out_proj_weights, bias=out_proj_bias)
if num_dims == 2:
attention_out = attention_out.squeeze(axis=0)
elif not batch_first:
attention_out = attention_out.swapaxes(0, 1)
if return_attention_weights:
attn_weights = attn_weights.reshape(
(num_batches, num_heads, num_queries, num_keys)
)
if average_attention_weights:
attn_weights = attn_weights.mean(axis=1)
if num_dims == 2:
attn_weights = attn_weights.squeeze(axis=0)
return attention_out, attn_weights
else:
return attention_out
multi_head_attention.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"handle_out_argument",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device_shifting",
),
"to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"),
}
# Convolutions #
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv1d(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int]] = 1,
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D convolution given 3-D input x and filters arrays.
Parameters
----------
x
Input image *[batch_size,w,d_in]* or *[batch_size,d_in,w]*.
filters
Convolution filters *[fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
data_format
The ordering of the dimensions in the input, one of "NWC" or "NCW". "NWC"
corresponds to input with shape (batch_size, width, channels), while "NCW"
corresponds to input with shape (batch_size, channels, width).
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "OIW",
input data formats, while "channel_last" corresponds to "WIO", "HWIO", "DHWIO".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.asarray([[[0.], [3.], [0.]]]) #NWC
>>> filters = ivy.array([[[0.]], [[1.]], [[0.]]]) #WIO
>>> result = ivy.conv1d(x, filters, (1,), 'SAME', data_format='NWC',dilations= (1,))
>>> print(result)
ivy.array([[[0.], [3.], [0.]]])
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array([[[1., 3.], [2., 4.], [5., 7]]])
>>> filters = ivy.native_array([[[0., 1.], [1., 0.]]])
>>> result = ivy.conv1d(x, filters, (2,),'VALID')
>>> print(result)
ivy.array([[[3., 1.],
... [7., 5.]]])
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([[[1.2, 3.1, 4.8], [5.9, 2.2, 3.3],
... [10.8, 7.6, 4.9], [6.1, 2.2, 9.5]]]),
... b=ivy.array([[[8.8, 7.7, 6.6], [1.1, 2.2, 3.5]]]))
>>> filters = ivy.array([[[1., 0., 1.], [0., 1., 0.], [1., 1., 0.]]])
>>> result = ivy.conv1d(x, filters, 3, 'VALID')
>>> print(result)
{
a: ivy.array([[[6., 7.9, 1.2],
... [15.6, 11.7, 6.1]]]),
... b: ivy.array([[[15.4, 14.3, 8.8]]])
}
"""
return current_backend(x).conv1d(
x,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv1d_transpose(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D transpose convolution given 3-D input x and filters
arrays.
Parameters
----------
x
Input image *[batch_size,w,d_in]* or *[batch_size,d_in,w]*.
filters
Convolution filters *[fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
Either ‘SAME’ (padding so that the output's shape is the same as the
input's), or ‘VALID’ (padding so that the output's shape is `output_shape`).
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOW",input data formats, while "channel_last" corresponds to "WOI".
data_format
The ordering of the dimensions in the input, one of "NWC" or "NCW". "NWC"
corresponds to input with shape (batch_size, width, channels), while "NCW"
corresponds to input with shape (batch_size, channels, width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 6, 3])
>>> y = ivy.conv1d_transpose(x, filters, 2, 'SAME')
>>> print(y.shape)
ivy.Shape(1, 56, 6)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 128, 64])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[1, 64, 64])
>>> ivy.conv1d_transpose(x, filters, 1, 'VALID', out=x)
>>> print(x.shape)
ivy.Shape(1, 128, 64)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 256, 64])
>>> y = ivy.zeros((1, 258, 32))
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 32, 64])
>>> ivy.conv1d_transpose(x, filters, 1, 'VALID', out=y)
>>> print(y.shape)
ivy.Shape(1, 258, 32)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array(
... ivy.random_normal(mean=0, std=1, shape=[1, 256, 128]))
>>> filters = ivy.native_array(
... ivy.random_normal(mean=0, std=1, shape=[3, 32, 128]))
>>> y = ivy.conv1d_transpose(x, filters, 2, 'SAME')
>>> print(y.shape)
ivy.Shape(1, 512, 32)
With one :class:`ivy.Container` input:
>>> x = ivy.full((1, 6, 1), 2.7)
>>> a = ivy.random_normal(mean=0, std=1, shape=[3, 1, 1])
>>> b = ivy.random_normal(mean=0, std=1, shape=[3, 1, 1])
>>> filters = ivy.Container(a=a, b=b)
>>> y = ivy.conv1d_transpose(x, filters, 1, 'VALID', dilations=2)
>>> print(y.shape)
{
a: ivy.Shape(1, 10, 1),
b: ivy.Shape(1, 10, 1)
}
With multiple :class:`ivy.Container` inputs:
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 28, 3])
>>> c = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3])
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = ivy.conv1d_transpose(x, filters, 2, 'SAME')
>>> print(y.shape)
{
a: {
c: ivy.Shape(1, 28, 3),
d: ivy.Shape(1, 28, 3)
},
b: {
c: ivy.Shape(1, 56, 3),
d: ivy.Shape(1, 56, 3)
},
c: {
c: ivy.Shape(6, 6, 3),
d: ivy.Shape(6, 6, 3)
},
d: {
c: ivy.Shape(6, 6, 3),
d: ivy.Shape(6, 6, 3)
}
}
"""
return current_backend(x).conv1d_transpose(
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
out=out,
)
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv2d(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int]] = 1,
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 2-D convolution given 4-D input x and filters arrays.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]* or *[batch_size,d_in,h,w]*.
filters
Convolution filters *[fh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
data_format
The ordering of the dimensions in the input, one of "NHWC" or "NCHW". "NHWC"
corresponds to inputs with shape (batch_size, height, width, channels), while
"NCHW" corresponds to input with shape (batch_size, channels, height, width).
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "OIHW",
input data formats, while "channel_last" corresponds to "HWIO".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[[[1.], [2.0],[3.]],
... [[1.], [2.0],[3.]],
... [[1.], [2.0],[3.]]]])
>>> filters = ivy.array([[[[0.]],[[1.]],[[0.]]],
... [[[0.]],[[1.]], [[0.]]],
... [[[0.]],[[1.]], [[0.]]]])
>>> result = ivy.conv2d(x, filters, 1, 'SAME', data_format='NHWC', dilations=1)
>>> print(result)
ivy.array([[
[[2.],[4.],[6.]],
[[3.],[6.],[9.]],
[[2.],[4.],[6.]]
]])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([[[[1.], [2.0],[3.]],
... [[1.], [2.0],[3.]],
... [[1.], [2.0],[3.]]]]))
>>> filters = ivy.eye(3, 3).reshape((3, 3, 1, 1)).astype(ivy.float32)
>>> result = ivy.conv2d(x, filters, 2, 'SAME', data_format='NHWC', dilations= 1)
>>> print(result)
{
a:ivy.array([[[[3.], [3.]], [[1.], [5.]]]])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a = ivy.eye(3, 3).reshape((1, 3, 3, 1)),
... b = ivy.eye(4, 4).reshape((1, 4, 4, 1)),
... c = ivy.eye(5, 5).reshape((1, 5, 5, 1)))
>>> filters = ivy.array([[1, 1, 1],
... [0, 1, 1],
... [0, 0, 1]], dtype = ivy.float32).reshape((3, 3, 1, 1))
>>> result = ivy.conv2d(x, filters, 2, 'SAME')
>>> print(result)
{
a:ivy.array([[[[2.], [0.]], [[1.], [2.]]]]),
b:ivy.array([[[[3.], [0.]], [[1.], [2.]]]]),
c:ivy.array([[[[2.], [0.], [0.]],
[[1.], [3.], [0.]],
[[0.], [1.], [2.]]
]])
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.Container(a = ivy.eye(3, 3).reshape((1, 3, 3, 1)),
... b = ivy.eye(5, 5).reshape((1, 5, 5, 1)))
>>> filters = ivy.array([[2, 0, 1],
... [1, 3, 1],
... [0, 1, 1]], dtype = ivy.float32).reshape((3, 3, 1, 1))
>>> result = ivy.conv2d(x, filters, 2, 'SAME')
>>> print(result)
{
a:ivy.array([[[[4.],[0.]],[[1.],[5.]]]]),
b:ivy.array([[[[4.],[0.],[0.]],[[1.],[6.],[0.]],[[0.],[1.],[5.]]]])
}
"""
return current_backend(x).conv2d(
x,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv2d_transpose(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 2-D transpose convolution given 4-D input x and filters
arrays.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]* or *[batch_size,d_in,h,w]*.
filters
Convolution filters *[fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
Either ‘SAME’ (padding so that the output's shape is the same as the
input's), or ‘VALID’ (padding so that the output's shape is `output_shape`).
output_shape
Shape of the output (Default value = None)
data_format
The ordering of the dimensions in the input, one of "NHWC" or "NCHW". "NHWC"
corresponds to inputs with shape (batch_size, height, width, channels), while
"NCHW" corresponds to input with shape (batch_size, channels, height, width).
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IOHW",input data formats, while "channel_last" corresponds to "HWOI".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 28, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 6, 3])
>>> y = ivy.conv2d_transpose(x,filters,2,'SAME')
>>> print(y.shape)
ivy.Shape(1, 56, 56, 6)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 128, 128, 64])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[1, 1, 64, 64])
>>> ivy.conv2d_transpose(x,filters,1,'VALID',out=x)
>>> print(x.shape)
ivy.Shape(1, 128, 128, 64)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 256, 256, 64])
>>> y = ivy.zeros((1, 258, 258, 32))
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 32, 64])
>>> ivy.conv2d_transpose(x,filters,[1, 1, 1],'VALID',out=y)
>>> print(y.shape)
ivy.Shape(1, 258, 258, 32)
With one :class:`ivy.Container` inputs:
>>> x = ivy.full((1, 6, 6, 1), 2.7)
>>> a = ivy.random_normal(mean=0, std=1, shape=[3, 3, 1, 1])
>>> b = ivy.random_normal(mean=0, std=1, shape=[3, 3, 1, 1])
>>> filters = ivy.Container(a=a, b=b)
>>> y = ivy.conv2d_transpose(x,filters,1,'VALID',dilations=2)
>>> print(y.shape)
{
a: ivy.Shape(1, 10, 10, 1),
b: ivy.Shape(1, 10, 10, 1)
}
With multiple :class:`ivy.Container` inputs:
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 14, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 28, 28, 3])
>>> c = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3])
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = ivy.conv2d_transpose(x,filters,2,'SAME')
>>> print(y.shape)
{
a: {
c: ivy.Shape(1, 28, 28, 3),
d: ivy.Shape(1, 28, 28, 3)
},
b: {
c: ivy.Shape(1, 56, 56, 3),
d: ivy.Shape(1, 56, 56, 3)
},
c: {
c: ivy.Shape(6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 3)
},
d: {
c: ivy.Shape(6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 3)
}
}
"""
return current_backend(x).conv2d_transpose(
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def depthwise_conv2d(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int]],
padding: Union[str, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 2-D depthwise convolution given 4-D input ``x`` and filters
arrays.
Parameters
----------
x
Input image *[batch_size,h,w,d_in]* or *[batch_size,d_in,h,w]*.
filters
Convolution filters *[fh,fw,d_in]*. (d_in must be the same as d from x)
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
data_format
The ordering of the dimensions in the input, one of "NHWC" or "NCHW". "NHWC"
corresponds to inputs with shape (batch_size, height, width, channels), while
"NCHW" corresponds to input with shape (batch_size, channels, height, width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 28, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3])
>>> y = ivy.depthwise_conv2d(x, filters, (1, 1), 'VALID')
>>> print(y.shape)
ivy.Shape(1, 26, 26, 3)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 32, 32, 3])
>>> y = ivy.zeros((1, 16, 16, 3))
>>> filters = ivy.random_normal(mean=0, std=1, shape=[5, 5, 3])
>>> ivy.depthwise_conv2d(x, filters, [2, 2], 'SAME', out=y)
>>> print(y.shape)
ivy.Shape(1, 16, 16, 3)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 64, 64, 32])
>>> y = ivy.zeros((1, 61, 61, 32))
>>> filters = ivy.random_normal(mean=0, std=1, shape=[4, 4, 32])
>>> ivy.depthwise_conv2d(x, filters, [1, 1], 'VALID', out=y)
>>> print(x.shape)
ivy.Shape(1, 64, 64, 32)
With :class:`ivy.NativeArray` input:
>>> x = ivy.native_array(ivy.random_normal(mean=0, std=1, shape=[1, 7, 7, 64]))
>>> filters = ivy.native_array(ivy.random_normal(mean=0, std=1, shape=[3, 3, 64]))
>>> y = ivy.depthwise_conv2d(x, filters, [1, 1], 'SAME')
>>> print(y.shape)
ivy.Shape(1, 7, 7, 64)
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.eye(6, 6).reshape((1, 6, 6, 1)) #NHWC
>>> a = ivy.array([[1., 1., 1.], [1., -8., 1.], [1., 1., 1.]]).expand_dims(axis=-1)
>>> b = ivy.array([[1., 1., 1.],
... [1., 1., 1.],
... [1., 1., 1.]]).expand_dims(axis=-1) / 9.0
>>> filters = ivy.Container(a = a, b = b)
>>> y = ivy.depthwise_conv2d(x, filters, 1, 'VALID', dilations=2)
>>> print(y)
{
a: ivy.array([[[[-6.],
[0.]],
[[0.],
[-6.]]]]),
b: ivy.array([[[[0.33333334],
[0.]],
[[0.],
[0.33333334]]]])
}
With a mix of :class:`ivy.Array`, code:`ivy.NativeArray`
and :class:`ivy.Container` inputs:
>>> x = ivy.eye(6, 6).reshape((1, 6, 6, 1)) #NHWC
>>> y = ivy.native_array(ivy.eye(6, 6).reshape((1, 6, 6, 1)))
>>> inp = ivy.Container(x = x, y = y)
>>> filter = ivy.array([[1., 1., 1.],
... [1., -8., 1.],
... [1., 1., 1.]]).expand_dims(axis=-1)
>>> y = ivy.depthwise_conv2d(inp, filter, 1, 'VALID', dilations=2)
>>> print(y)
{
x: ivy.array([[[[-6.],
[0.]],
[[0.],
[-6.]]]]),
y: ivy.array([[[[-6.],
[0.]],
[[0.],
[-6.]]]])
}
"""
return current_backend(x).depthwise_conv2d(
x,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv3d(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
filters: Union[ivy.Array, ivy.NativeArray, ivy.Container],
strides: Union[int, Tuple[int, int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 3-D convolution given 5-D input x and filters arrays.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]* or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_in,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
data_format
The ordering of the dimensions in the input, one of "NDHWC" or "NCDHW". "NDHWC"
corresponds to inputs with shape (batch_size, depth, height, width, channels),
while "NCDHW" corresponds to input with shape (batch_size, channels, depth,
height, width).
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "OIDHW",input data formats, while "channel_last" corresponds to "DHWIO".
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the convolution operation.
Both the description and the type hints above assumes an array input for simplicity,
but this function is *nestable*, and therefore also accepts :class:`ivy.Container`
instances in place of any of the arguments.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.array([[[1., 2. ,1.], [1., 2. ,1.], [1., 2. ,1.]],
... [[1., 2. ,1.], [1., 2. ,1.], [1., 2. ,1.]],
... [[1., 2. ,1.], [1., 2. ,1.], [1., 2. ,1.]]]).reshape((1, 3, 3, 3, 1))
>>> filters = ivy.array([[[0.,1.,0.],
... [0.,1.,0.],
... [0.,1.,0.]]]).reshape((1,3,3,1,1))
>>> result = ivy.conv3d(x, filters, 1, 'SAME', data_format='NDHWC', dilations=1)
>>> print(result)
ivy.array([[[[[2.],[4.],[2.]],[[3.],[6.],[3.]],[[2.],[4.],[2.]]],
[[[2.],[4.],[2.]],[[3.],[6.],[3.]],[[2.],[4.],[2.]]],
[[[2.],[4.],[2.]],[[3.],[6.],[3.]],[[2.],[4.],[2.]]]]])
With one :class:`ivy.Container` input:
>>> x = ivy.Container(a = ivy.ones((1, 3, 3, 3, 1)).astype(ivy.float32))
>>> filters = ivy.ones((3, 3, 3, 1, 1)).astype(ivy.float32)
>>> result = ivy.conv3d(x, filters, 2, 'SAME')
>>> print(result)
{
a: ivy.array([[[[[8.],[8.]],[[8.],[8.]]],[[[8.],[8.]],[[8.],[8.]]]]])
}
With multiple :class:`ivy.Container` input:
>>> x = ivy.Container( a = ivy.random_normal(mean = 0, std = 1,
... shape = [1, 3, 5, 5, 1]),
... b = ivy.random_normal(mean = 0, std = 1,
... shape = [1, 5, 32 ,32, 1]),
... c = ivy.random_normal(mean = 0, std = 1,
... shape = [1, 32, 32, 32, 1]))
>>> filters = ivy.ones((3, 5, 5, 1, 3)).astype(ivy.float32)
>>> result = ivy.conv3d(x, filters, 1, 'SAME')
>>> print(result.cont_shapes)
{
a: ivy.Shape(1, 3, 5, 5, 3),
b: ivy.Shape(1, 5, 32, 32, 3),
c: ivy.Shape(1, 32, 32, 32, 3)
}
"""
return current_backend(x).conv3d(
x,
filters,
strides,
padding,
data_format=data_format,
filter_format=filter_format,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv3d_transpose(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int, int, int]],
padding: str,
/,
*,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[ivy.Array] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 3-D transpose convolution given 5-D input x and filters
arrays.
Parameters
----------
x
Input volume *[batch_size,d,h,w,d_in]* or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
Either ‘SAME’ (padding so that the output's shape is the same as the
input's), or ‘VALID’ (padding so that the output's shape is `output_shape`).
output_shape
Shape of the output (Default value = None)
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IODHW",input data formats, while "channel_last" corresponds to "DHWOI".
data_format
The ordering of the dimensions in the input, one of "NDHWC" or "NCDHW". "NDHWC"
corresponds to inputs with shape (batch_size, depth, height, width, channels),
while "NCDHW" corresponds to input with shape (batch_size, channels, depth,
height, width).
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 3, 28, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3])
>>> y = ivy.conv3d_transpose(x, filters, [2, 2, 2], 'SAME')
>>> print(y.shape)
ivy.Shape(1, 6, 56, 56, 6)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 3, 64, 64, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3])
>>> y = ivy.conv3d_transpose(x, filters, [2, 2, 2], 'VALID', dilations=[1, 1, 1])
>>> print(y.shape)
ivy.Shape(1, 7, 129, 129, 6)
With :class:`ivy.Container` inputs:
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 3, 14, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 3, 28, 28, 3])
>>> c = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3, 3])
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = ivy.conv3d_transpose(x, filters, [2, 2, 2], 'SAME')
>>> print(y.shape)
{
a: {
c: ivy.Shape(1, 6, 28, 28, 3),
d: ivy.Shape(1, 6, 28, 28, 3)
},
b: {
c: ivy.Shape(1, 6, 56, 56, 3),
d: ivy.Shape(1, 6, 56, 56, 3)
},
c: {
c: ivy.Shape(6, 6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 6, 3)
},
d: {
c: ivy.Shape(6, 6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 6, 3)
}
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.full((1, 6, 6, 6, 1), 2.7)
>>> a = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1])
>>> b = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1])
>>> filters = ivy.Container(a=a, b=b)
>>> y = ivy.conv3d_transpose(x, filters, [1, 1, 1], 'VALID', dilations=[1, 1, 1])
>>> print(y.shape)
{
a: ivy.Shape(1, 8, 8, 8, 1),
b: ivy.Shape(1, 8, 8, 8, 1)
}
>>> x = ivy.full((1, 6, 6, 6, 1), 1.23)
>>> a = ivy.array(ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1]))
>>> b = ivy.array(ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1]))
>>> filters = ivy.Container(a=a, b=b)
>>> y = ivy.conv3d_transpose(x, filters, [1, 1, 1], 'VALID', dilations=[1, 1, 1])
>>> print(y.shape)
{
a: ivy.Shape(1, 8, 8, 8, 1),
b: ivy.Shape(1, 8, 8, 8, 1)
}
"""
return current_backend(x).conv3d_transpose(
x,
filters,
strides,
padding,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
bias=bias,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv_general_dilated(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
dims: int = 2,
data_format: str = "channel_last",
filter_format: str = "channel_last",
feature_group_count: int = 1,
x_dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D, 2-D, and 3-D convolution given 3-D, 4-D and 5-D input x
respectively and filters arrays.
Parameters
----------
x
Input image *[batch_size,d,h,w,d_in]* or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_in/feature_group_count,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
dims
Either 1, 2, or 3 corresponding to 1-D, 2-D, and 3-D convolution.
data_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "NCW",
"NCHW", "NCDHW" input data formatS for 1-D, 2-D, 3-D convolution respectively,
while "channel_last" corresponds to "NWC", "NHWC", "NDHWC" respectively.
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "OIW",
"OIHW", "OIDHW" input data formats for 1-D, 2-D, 3-D convolution respectively,
while "channel_last" corresponds to "WIO", "HWIO", "DHWIO" respectively.
feature_group_count
split input into groups, d_in should be divisible by the number of groups.
(Default value = 1)
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of filter. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
"""
return current_backend(x).conv_general_dilated(
x,
filters,
strides,
padding,
dims=dims,
data_format=data_format,
filter_format=filter_format,
feature_group_count=feature_group_count,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_array_function
@handle_device
def conv_general_transpose(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: str,
/,
*,
dims: int = 2,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
filter_format: str = "channel_last",
data_format: str = "channel_last",
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
feature_group_count: int = 1,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D, 2-D, and 3-D transpose convolution given 3-D, 4-D and 5-D
input x respectively and filters arrays.
Parameters
----------
x
Input image *[batch_size,d,h,w,d_in]* or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_out,d_in]*.
strides
The stride of the sliding window for each dimension of input.
padding
Either ‘SAME’ (padding so that the output's shape is the same as the
input's), or ‘VALID’ (padding so that the output's shape is `output_shape`).
dims
Either 1, 2, or 3 corresponding to 1-D, 2-D, and 3-D convolution.
output_shape
Shape of the output.
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds
to "IODHW",input data formats, while "channel_last" corresponds to "DHWOI".
data_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "NCW",
"NCHW", "NCDHW" input data formatS for 1-D, 2-D, 3-D convolution respectively,
while "channel_last" corresponds to "NWC", "NHWC", "NDHWC" respectively.
dilations
The dilation factor for each dimension of input. (Default value = 1)
feature_group_count
split input into groups, d_in should be divisible by the number of groups.
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the transpose convolution operation.
Examples
--------
With :class:`ivy.Array` input:
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 3, 28, 28, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3])
>>> y = ivy.conv3d_transpose(x, filters, [2, 2, 2], 'SAME')
>>> print(y.shape)
ivy.Shape(1, 6, 56, 56, 6)
>>> x = ivy.random_normal(mean=0, std=1, shape=[1, 3, 64, 64, 3])
>>> filters = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 6, 3])
>>> y = ivy.conv3d_transpose(x, filters, [2, 2, 2], 'VALID', dilations=[1, 1, 1])
>>> print(y.shape)
ivy.Shape(1, 7, 129, 129, 6)
With :class: 'ivy.Container' inputs:
>>> a = ivy.random_normal(mean=0, std=1, shape=[1, 3, 14, 14, 3])
>>> b = ivy.random_normal(mean=0, std=1, shape=[1, 3, 28, 28, 3])
>>> c = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3, 3])
>>> d = ivy.random_normal(mean=0, std=1, shape=[6, 3, 3, 3, 3])
>>> x = ivy.Container(a=a, b=b)
>>> filters = ivy.Container(c=c, d=d)
>>> y = ivy.conv3d_transpose(x, filters, [2, 2, 2], 'SAME')
>>> print(y.shape)
{
a: {
c: ivy.Shape(1, 6, 28, 28, 3),
d: ivy.Shape(1, 6, 28, 28, 3)
},
b: {
c: ivy.Shape(1, 6, 56, 56, 3),
d: ivy.Shape(1, 6, 56, 56, 3)
},
c: {
c: ivy.Shape(6, 6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 6, 3)
},
d: {
c: ivy.Shape(6, 6, 6, 6, 3),
d: ivy.Shape(6, 6, 6, 6, 3)
}
}
With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs:
>>> x = ivy.full((1, 6, 6, 6, 1), 2.7)
>>> a = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1])
>>> b = ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1])
>>> filters = ivy.Container(a=a, b=b)
>>> y = ivy.conv3d_transpose(x, filters, [1, 1, 1], 'VALID', dilations=[1, 1, 1])
>>> print(y.shape)
{
a: ivy.Shape(1, 8, 8, 8, 1),
b: ivy.Shape(1, 8, 8, 8, 1)
}
>>> x = ivy.full((1, 6, 6, 6, 1), 1.23)
>>> a = ivy.array(ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1]))
>>> b = ivy.array(ivy.random_normal(mean=0, std=1, shape=[3, 3, 3, 1, 1]))
>>> filters = ivy.Container(a=a, b=b)
>>> y = ivy.conv3d_transpose(x, filters, [1, 1, 1], 'VALID', dilations=[1, 1, 1])
>>> print(y.shape)
{
a: ivy.Shape(1, 8, 8, 8, 1),
b: ivy.Shape(1, 8, 8, 8, 1)
}
"""
return current_backend(x).conv_general_transpose(
x,
filters,
strides,
padding,
dims=dims,
output_shape=output_shape,
filter_format=filter_format,
data_format=data_format,
dilations=dilations,
feature_group_count=feature_group_count,
bias=bias,
out=out,
)
@handle_exceptions
@handle_array_like_without_promotion
@handle_out_argument
@inputs_to_native_shapes
@handle_array_function
def conv(
x: Union[ivy.Array, ivy.NativeArray],
filters: Union[ivy.Array, ivy.NativeArray],
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: Union[str, Sequence[Tuple[int, int]]],
/,
*,
transpose: bool = False,
dims: int = 2,
output_shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
data_format: str = "channel_last",
filter_format: str = "channel_last",
feature_group_count: int = 1,
x_dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Compute a 1-D, 2-D, and 3-D transpose or dilated convolution given 3-D,
4-D and 5-D input x respectively and filters arrays.
Parameters
----------
x
Input image *[batch_size,d,h,w,d_in]* or *[batch_size,d_in,d,h,w]*.
filters
Convolution filters *[fd,fh,fw,d_in/feature_group_count,d_out]*.
strides
The stride of the sliding window for each dimension of input.
padding
either the string ‘SAME’ (padding with zeros evenly), the string ‘VALID’ (no
padding), or a sequence of n (low, high) integer pairs that give the padding to
apply before and after each spatial dimension.
transpose
True for computing transpose convolution, and False for dilated convolution.
When True, `x_dilations` must be 1 (the default).
dims
Either 1, 2, or 3 corresponding to 1-D, 2-D, and 3-D convolution.
output_shape
Shape of the output (Default value = None)
data_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "NCW",
"NCHW", "NCDHW" input data formatS for 1-D, 2-D, 3-D convolution respectively,
while "channel_last" corresponds to "NWC", "NHWC", "NDHWC" respectively.
filter_format
Either "channel_first" or "channel_last". "channel_first" corresponds to "OIW",
"OIHW", "OIDHW" input data formats for 1-D, 2-D, 3-D convolution respectively,
while "channel_last" corresponds to "WIO", "HWIO", "DHWIO" respectively.
feature_group_count
split input into groups, d_in should be divisible by the number of groups.
(Default value = 1)
x_dilations
The dilation factor for each dimension of input. (Default value = 1)
dilations
The dilation factor for each dimension of input. (Default value = 1)
bias
Bias array of shape *[d_out]*.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
The result of the transpose or dilated convolution operation.
"""
if transpose:
return conv_general_transpose(
x,
filters,
strides,
padding,
dims=dims,
output_shape=output_shape,
data_format=data_format,
dilations=dilations,
feature_group_count=feature_group_count,
bias=bias,
out=out,
)
else:
return conv_general_dilated(
x,
filters,
strides,
padding,
dims=dims,
data_format=data_format,
filter_format=filter_format,
feature_group_count=feature_group_count,
x_dilations=x_dilations,
dilations=dilations,
bias=bias,
out=out,
)
# LSTM #
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def lstm_update(
x: Union[ivy.Array, ivy.NativeArray],
init_h: Union[ivy.Array, ivy.NativeArray],
init_c: Union[ivy.Array, ivy.NativeArray],
kernel: Union[ivy.Array, ivy.NativeArray],
recurrent_kernel: Union[ivy.Array, ivy.NativeArray],
/,
*,
bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
recurrent_bias: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
time_major: bool = False,
) -> Tuple[ivy.Array, Tuple[ivy.Array, ivy.Array]]:
"""Perform long-short term memory update by unrolling time dimension of
input array.
Parameters
----------
x
input tensor of LSTM layer *[batch_shape, t, in]* if time_major=False,
else *[t, batch_shape, in]*.
init_h
initial state tensor for the cell output *[batch_shape, out]*.
init_c
initial state tensor for the cell hidden state *[batch_shape, out]*.
kernel
weights for cell kernel *[in, 4 x out]*.
recurrent_kernel
weights for cell recurrent kernel *[out, 4 x out]*.
bias
bias for cell kernel *[4 x out]*. (Default value = None)
recurrent_bias
bias for cell recurrent kernel *[4 x out]*. (Default value = None)
time_major
whether or not the input tensor `x` has the time dimension before batch dim.
Returns
-------
ret
hidden state for all timesteps of shape *[batch_shape,t,out]* if time_major
is False, else *[t, batch_shape, out]*, and a tuple containing the final cell
states, both of shape *[batch_shape,out]*.
"""
# ToDo: test_lstm_update needs to be fixed
if time_major:
x = ivy.swapaxes(x, 0, 1)
# get shapes
x_shape = list(x.shape)
batch_shape = x_shape[:-2]
timesteps = x_shape[-2]
input_channels = x_shape[-1]
x_flat = ivy.reshape(x, (-1, input_channels))
# input kernel
Wi = kernel
Wi_x = ivy.reshape(
ivy.matmul(x_flat, Wi) + (bias if bias is not None else 0),
batch_shape + [timesteps, -1],
)
Wii_x, Wif_x, Wig_x, Wio_x = ivy.split(Wi_x, num_or_size_splits=4, axis=-1)
# recurrent kernel
Wh = recurrent_kernel
# lstm states
ht = init_h
ct = init_c
# lstm outputs
hts_list = []
# unrolled time dimension with lstm steps
for Wii_xt, Wif_xt, Wig_xt, Wio_xt in zip(
ivy.unstack(Wii_x, axis=-2),
ivy.unstack(Wif_x, axis=-2),
ivy.unstack(Wig_x, axis=-2),
ivy.unstack(Wio_x, axis=-2),
):
htm1 = ht
ctm1 = ct
Wh_htm1 = ivy.matmul(htm1, Wh) + (
recurrent_bias if recurrent_bias is not None else 0
)
Whi_htm1, Whf_htm1, Whg_htm1, Who_htm1 = ivy.split(
Wh_htm1, num_or_size_splits=4, axis=-1
)
it = ivy.sigmoid(Wii_xt + Whi_htm1)
ft = ivy.sigmoid(Wif_xt + Whf_htm1)
gt = ivy.tanh(Wig_xt + Whg_htm1)
ot = ivy.sigmoid(Wio_xt + Who_htm1)
ct = ft * ctm1 + it * gt
ht = ot * ivy.tanh(ct)
hts_list.append(ivy.expand_dims(ht, axis=-2))
ret = ivy.concat(hts_list, axis=-2)
if time_major:
ret = ivy.swapaxes(ret, 0, 1)
return ret, (ht, ct)
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def lstm(
input: ivy.Array,
initial_states: Tuple[ivy.Array],
all_weights: Tuple[ivy.Array],
num_layers: int,
dropout: float,
train: bool,
bidirectional: bool,
batch_first: bool = False,
batch_sizes: Sequence = None,
weights_transposed: bool = False,
has_ih_bias: bool = True,
has_hh_bias: bool = True,
):
"""Applies a multi-layer long-short term memory to an input sequence.
Parameters
----------
input
input array of shape (seq_len, batch, input_size) when `batch_first` is False
or (batch, seq_len, input_size) when `batch_first` is True
initial_states
tuple of two arrays (h_0, c_0) where h_0 is the initial hidden state of shape
(num_layers * num_directions, batch, hidden_size) and c_0 is the initial cell
state of shape (num_layers * num_directions, batch, hidden_size)
(num_directions being 2 when `bidirectional`, otherwise 1)
all_weights
tuple of arrays representing the learnable weights of the lstm, with each
layer having up to four arrays (w_ih, w_hh, b_ih, b_hh) representing the weights
and biases (if biases are being used)
w_ih: weight of shape (4 * hidden_size, input_size)
w_hh: weight of shape (4 * hidden_size, hidden_size)
b_ih: bias of shape (4 * hidden_size,)
b_hh: bias of shape (4 * hidden_size,)
num_layers
number of layers for the lstm to use
dropout
dropout rate
train
whether to run the lstm in train mode or eval mode
bidirectional
whether the lstm is bidirectional or unidirectional
batch_first
defines the data format of the input and output arrays
batch_sizes
specifies the batch size at each timestep, when the input is a packed sequence
weights_transposed
whether the weights are transposed compared to the format
in which they are expected (input_size, 4 * hidden_size)
rather than (4 * hidden_size, input_size)
has_ih_bias
whether the `all_weights` argument includes a input-hidden bias
has_hh_bias
whether the `all_weights` argument includes a hidden-hidden bias
Returns
-------
output
output array of shape (seq_len, batch, num_directions * hidden_size) or
(batch, seq_len, num_directions * hidden_size), depending on `batch_first`
h_outs
final hidden state of shape (num_layers * num_directions, batch, hidden_size)
c_outs
final cell state of shape (num_layers * num_directions, batch, hidden_size)
"""
# TODO: the test for this function needs to be fixed -
# see ivy_tests/test_ivy/test_functional/test_nn/test_layers.py::test_lstm
if weights_transposed:
# transpose the weights if they are in the wrong format
all_weights = [
ivy.swapaxes(weight, 1, 0) if weight.dim() == 2 else weight
for weight in all_weights
]
else:
all_weights = list(all_weights)
if (has_ih_bias and not has_hh_bias) or (has_hh_bias and not has_ih_bias):
# insert zero biases into the weights where one set of biases is not used
shapes = []
for i in range(2, len(all_weights), 3):
shapes.append(tuple(all_weights[i].shape))
for i, shape in enumerate(shapes):
idx = (i + 1) * 4 - (1 if has_ih_bias else 2)
all_weights.insert(idx, ivy.zeros(shape))
has_ih_bias = True
has_hh_bias = True
weights_per_layer = 2
if has_ih_bias:
weights_per_layer += 1
if has_hh_bias:
weights_per_layer += 1
assert len(all_weights) == num_layers * weights_per_layer * (1 + bidirectional)
layer_weights = [
all_weights[i : i + weights_per_layer]
for i in range(0, len(all_weights), weights_per_layer)
]
if batch_sizes is not None:
input, batch_sizes = _pad_packed_sequence(input, batch_sizes)
if batch_first:
input = ivy.swapaxes(input, 0, 1)
if dropout and train:
raise ivy.utils.exceptions.IvyNotImplementedException()
unidirectional = not bidirectional
h0, c0 = initial_states
h_outs, c_outs = [], []
output = input
for i in range(num_layers):
if unidirectional:
if weights_per_layer == 4:
weight_ih, weight_hh, (bias_i, bias_h) = _transform_weights(
layer_weights, i
)
else:
weight_ih, weight_hh = _transform_weights_no_bias(layer_weights, i)
bias_i = bias_h = None
state_indices = i, i + 1
else:
if weights_per_layer == 4:
weight_ih_f, weight_hh_f, (bias_i_f, bias_h_f) = _transform_weights(
layer_weights, 2 * i
)
weight_ih_b, weight_hh_b, (bias_i_b, bias_h_b) = _transform_weights(
layer_weights, 2 * i + 1
)
else:
weight_ih_f, weight_hh_f = _transform_weights_no_bias(
layer_weights, 2 * i
)
weight_ih_b, weight_hh_b = _transform_weights_no_bias(
layer_weights, 2 * i + 1
)
bias_i_f = bias_h_f = bias_i_b = bias_h_b = None
weight_ih = weight_ih_f, weight_ih_b
weight_hh = weight_hh_f, weight_hh_b
bias_i = bias_i_f, bias_i_b
bias_h = bias_h_f, bias_h_b
state_indices = 2 * i, 2 * i + 2
output, (h_out, c_out) = _lstm_layer(
output,
(
_retrieve_state(h0, *state_indices, num_layers),
_retrieve_state(c0, *state_indices, num_layers),
),
(weight_ih, weight_hh),
(bias_i, bias_h),
bidirectional,
batch_first=False,
batch_sizes=batch_sizes,
)
h_outs.append(h_out)
c_outs.append(c_out)
if batch_first:
output = ivy.swapaxes(output, 0, 1)
h_outs = h_out if num_layers == 1 else ivy.concat(h_outs, axis=0)
c_outs = c_out if num_layers == 1 else ivy.concat(c_outs, axis=0)
if batch_sizes is not None:
output = _pack_padded_sequence(output, batch_sizes)[0]
return output[:, -1], output, (h_outs, c_outs)
# Helpers #
def _handle_padding(x, strides, filters, padding):
if isinstance(padding, str) and padding.upper() == "SAME":
if x % strides == 0:
pad = max(filters - strides, 0)
else:
pad = max(filters - (x % strides), 0)
else:
pad = 0
return pad
def _validate_max_pool_params(
kernel, strides, padding, dilation, ceil_mode, dims, data_format
):
if isinstance(kernel, int):
kernel = (kernel,) * dims
elif len(kernel) == 1:
kernel = (kernel[0],) * dims
elif len(kernel) not in [dims, dims + 2]:
raise ValueError(
"The kernel should be an integer, or a tuple of length"
f" {list({1, dims, dims + 2})}"
)
if isinstance(strides, int):
strides = (strides,) * dims
elif len(strides) == 1:
strides = (strides[0],) * dims
elif len(strides) not in [dims, dims + 2]:
raise ValueError(
"The stride should be an integer, or a tuple of length"
f" {list({1, dims, dims + 2})}"
)
if isinstance(padding, int):
padding = [(padding,) * 2] * dims
elif isinstance(padding, tuple) and len(padding) == 1:
padding = [(padding[0],) * 2] * dims
elif isinstance(padding, tuple) and len(padding) == dims:
padding = [(padding[i],) * 2 for i in range(dims)]
elif isinstance(padding, list) and len(padding) == dims:
if not all(isinstance(p, tuple) and len(p) == 2 for p in padding):
raise ValueError("Explicit padding must be a list of tuple of two integers")
if isinstance(padding, str) and padding.upper() not in ["VALID", "SAME"]:
raise ValueError(
f"Invalid padding arg {padding}Must be one of: 'VALID' or 'SAME'"
)
if isinstance(dilation, int):
dilation = (dilation,) * dims
elif len(dilation) == 1:
dilation = (dilation[0],) * dims
elif len(dilation) != dims:
raise ValueError(
f"Dilation must be an integer or a tuple of length {list({1, dims})}"
)
if min(dilation) < 1:
raise ValueError("All values of `dilation` must be positive")
# Other errors
if isinstance(padding, str) and (padding.upper() == "VALID") and ceil_mode:
raise ValueError("When 'padding' is 'VALID', 'ceil_mode' must be False")
assert len(kernel) == len(strides), f"len({kernel}) must equal len({strides})"
ret = kernel, strides, padding, dilation
# Account for dilation when padding > kernel/2. Not the case in torch by default.
if len(dilation) < len(kernel):
if data_format[:2] == "NC":
dilation = [1, 1, *dilation]
else:
dilation = [1, *dilation, 1]
elif len(dilation) > len(kernel):
if data_format[:2] == "NC":
kernel = [1, 1, *kernel]
else:
kernel = [1, *kernel, 1]
new_kernel = tuple(dilation[i] * (kernel[i] - 1) + 1 for i in range(1, len(kernel)))
if isinstance(padding, list) and len(padding) == len(new_kernel):
ivy.utils.assertions.check_kernel_padding_size(new_kernel, padding)
return ret
def _depth_max_pooling_helper(
x_shape, kernel, strides, dims, data_format="channel_last"
):
# Determine depth pooling.
# We assume that the kernel and the data have the same data_format.
depth_pooling = False
CHANNEL_LAST = "channel_last"
channel_idx = -1 if data_format == CHANNEL_LAST else 1
if len(kernel) == dims + 2:
spatial_kernel = kernel[1:-1] if data_format == CHANNEL_LAST else kernel[2:]
if kernel[channel_idx] != 1:
depth_pooling = True
if any(i != 1 for i in spatial_kernel):
raise NotImplementedError(
"MaxPooling supports exactly one of pooling across"
" depth or pooling across width/height."
)
if len(strides) != dims + 2 or strides[channel_idx] != kernel[channel_idx]:
raise NotImplementedError(
"Depthwise max pooling requires the depth window to equal the depth"
" stride"
)
if x_shape[channel_idx] % kernel[channel_idx] != 0:
raise NotImplementedError(
"Depthwise max pooling requires the depth window to evenly divide"
" the input depth"
)
kernel = [kernel[channel_idx], *[1] * (dims - 1)]
strides = [strides[channel_idx], *[1] * (dims - 1)]
else:
kernel = spatial_kernel
if len(strides) == dims + 2:
strides = strides[1:-1] if data_format == CHANNEL_LAST else strides[2:]
return kernel, strides, depth_pooling
def _deconv_length(dim_size, stride_size, kernel_size, padding, dilation=1):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
if padding == "SAME":
dim_size = dim_size * stride_size
else:
dim_size = dim_size * stride_size + max(kernel_size - stride_size, 0)
return dim_size
def _get_x_data_format(dims: int = 2, data_format: str = "channel_first"):
if dims == 1:
if data_format == "channel_first":
return "NCW"
else:
return "NWC"
if dims == 2:
if data_format == "channel_first":
return "NCHW"
else:
return "NHWC"
elif dims == 3:
if data_format == "channel_first":
return "NCDHW"
else:
return "NDHWC"
def _get_num_padded_values(i, p, n, k, s):
"""Get number of padded values in a specific window.
Parameters
----------
i window index
p total amount of padding
n input size
k kernel size
s stride
Returns
-------
number of padded values in a particular window represented by i
"""
current_index = s * i
left_padding = p // 2
return max(0, left_padding - current_index) + max(
0, current_index + k - n - left_padding
)
# TODO : integrate logic for adaptive sampling points in ivy.interpolate
def _bilinear_interpolate(
input, # [N, C, H, W]
roi_batch_ind, # [K]
y, # [K, PH, IY]
x, # [K, PW, IX]
ymask, # [K, IY]
xmask, # [K, IX]
):
_, channels, height, width = input.shape
# deal with inverse element out of feature map boundary
y = y.clip(0, None)
x = x.clip(0, None)
y_low = y.astype(ivy.int32)
x_low = x.astype(ivy.int32)
y_high = ivy.where(y_low >= height - 1, height - 1, y_low + 1)
y_low = ivy.where(y_low >= height - 1, height - 1, y_low)
y = ivy.where(y_low >= height - 1, y.astype(input.dtype), y)
x_high = ivy.where(x_low >= width - 1, width - 1, x_low + 1)
x_low = ivy.where(x_low >= width - 1, width - 1, x_low)
x = ivy.where(x_low >= width - 1, x.astype(input.dtype), x)
ly = y - y_low
lx = x - x_low
hy = 1.0 - ly
hx = 1.0 - lx
def masked_index(
y, # [K, PH, IY]
x, # [K, PW, IX]
):
if ymask is not None:
assert xmask is not None
y = ivy.where(ymask[:, None, :], y, 0)
x = ivy.where(xmask[:, None, :], x, 0)
return input[
roi_batch_ind[:, None, None, None, None, None],
ivy.arange(channels, device=input.device)[None, :, None, None, None, None],
y[:, None, :, None, :, None], # prev [K, PH, IY]
x[:, None, None, :, None, :], # prev [K, PW, IX]
] # [K, C, PH, PW, IY, IX]
v1 = masked_index(y_low, x_low)
v2 = masked_index(y_low, x_high)
v3 = masked_index(y_high, x_low)
v4 = masked_index(y_high, x_high)
# all ws preemptively [K, C, PH, PW, IY, IX]
def outer_prod(y, x):
return y[:, None, :, None, :, None] * x[:, None, None, :, None, :]
w1 = outer_prod(hy, hx)
w2 = outer_prod(hy, lx)
w3 = outer_prod(ly, hx)
w4 = outer_prod(ly, lx)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
return val
def _convert_boxes_to_roi_format(boxes):
concat_boxes = ivy.concat(boxes, axis=0)
temp = []
for i, b in enumerate(boxes):
temp.append(ivy.full_like(b[:, :1], i))
ids = ivy.concat(temp, axis=0)
rois = ivy.concat([ids, concat_boxes], axis=1)
return rois
def _lstm_cell(
x,
init_h,
init_c,
kernel,
recurrent_kernel,
bias,
recurrent_bias,
batch_first,
batch_sizes=None,
):
init_h = ivy.squeeze(init_h, axis=0)
init_c = ivy.squeeze(init_c, axis=0)
out, states = ivy.lstm_update(
x,
init_h,
init_c,
kernel,
recurrent_kernel,
bias=bias,
recurrent_bias=recurrent_bias,
time_major=not batch_first,
)
h, c = states
h = ivy.expand_dims(h) if len(h.shape) == 2 else h
c = ivy.expand_dims(c) if len(c.shape) == 2 else c
return out, (h, c)
def _lstm_layer(
x, hidden, weights, biases, bidirectional, batch_first, batch_sizes=None
):
if not bidirectional:
result, (h, c) = _lstm_cell(
x,
*hidden,
*weights,
*biases,
batch_first=batch_first,
batch_sizes=batch_sizes,
)
else:
result_fw, (h_fw, c_fw) = _lstm_cell(
x,
hidden[0][:1],
hidden[1][:1],
weights[0][0],
weights[1][0],
biases[0][0],
biases[1][0],
batch_first=batch_first,
batch_sizes=batch_sizes,
)
x_reversed = ivy.flip(x, axis=0)
result_bw, (h_bw, c_bw) = _lstm_cell(
x_reversed,
hidden[0][1:],
hidden[1][1:],
weights[0][1],
weights[1][1],
biases[0][1],
biases[1][1],
batch_first=batch_first,
batch_sizes=batch_sizes,
)
result_bw = ivy.flip(result_bw, axis=0)
result = ivy.concat([result_fw, result_bw], axis=len(result_fw.shape) - 1)
c = ivy.concat([c_fw, c_bw], axis=0)
h = ivy.concat([h_fw, h_bw], axis=0)
return result, (h, c)
def _pack_padded_sequence(input, lengths):
input = ivy.swapaxes(input, 0, 1)
data = []
batch_sizes = []
for i in range(int(max(lengths))):
valid_data_mask = ivy.array(lengths) > i
data.append(input[valid_data_mask, i])
batch_sizes.append(int(sum(valid_data_mask)))
data = ivy.concat(data)
batch_sizes = ivy.array(batch_sizes, dtype=ivy.int64)
return data, batch_sizes
def _pad_packed_sequence(data, batch_sizes):
padded_data = ivy.full(
(len(batch_sizes), int(max(batch_sizes)), *data.shape[1:]),
0,
dtype=data.dtype,
device=data.device,
)
data_offset = 0
for i, batch_size in enumerate(batch_sizes):
batch_size = int(batch_size)
padded_data[i, :batch_size] = data[data_offset : data_offset + batch_size]
data_offset += batch_size
lengths = ivy.sum(
ivy.arange(1, int(max(batch_sizes)) + 1)[:, ivy.newaxis] <= batch_sizes,
axis=1,
dtype=ivy.int64,
)
return padded_data, lengths
def _retrieve_state(x, start, end, num_layers):
return x if num_layers == 1 else _slice_along_axis(x, start=start, stop=end, axis=0)
def _transform_weights(layer_weights, layer_index):
weights = layer_weights[layer_index]
weight_ih, weight_hh, bias_ih, bias_hh = weights
return (
ivy.swapaxes(weight_ih, 0, 1),
ivy.swapaxes(weight_hh, 0, 1),
(bias_ih, bias_hh),
)
def _transform_weights_no_bias(layer_weights, layer_index):
weights = layer_weights[layer_index]
weight_ih, weight_hh = weights
return ivy.swapaxes(weight_ih, 0, 1), ivy.swapaxes(weight_hh, 0, 1)
def _slice_along_axis(x, start=0, stop=None, stride=1, axis=0):
if axis >= 0:
slices = [slice(None)] * axis + [slice(start, stop, stride)]
else:
slices = [Ellipsis, slice(start, stop, stride)] + [slice(None)] * (-1 - axis)
return x[tuple(slices)]
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def roi_align(
input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1, aligned=False
):
pooled_height, pooled_width = (
(output_size, output_size) if isinstance(output_size, int) else output_size
)
if not isinstance(boxes, ivy.Array):
boxes = _convert_boxes_to_roi_format(boxes)
orig_dtype = input.dtype
_, _, height, width = input.shape
ph = ivy.arange(pooled_height, device=input.device) # [PH]
pw = ivy.arange(pooled_width, device=input.device) # [PW]
# input: [N, C, H, W]
# boxes: [K, 5]
roi_batch_ind = boxes[:, 0].astype(ivy.int32) # [K]
offset = 0.5 if aligned else 0.0
roi_start_w = boxes[:, 1] * spatial_scale - offset # [K]
roi_start_h = boxes[:, 2] * spatial_scale - offset # [K]
roi_end_w = boxes[:, 3] * spatial_scale - offset # [K]
roi_end_h = boxes[:, 4] * spatial_scale - offset # [K]
roi_width = roi_end_w - roi_start_w # [K]
roi_height = roi_end_h - roi_start_h # [K]
if not aligned:
roi_width = ivy.clip(roi_width, 1.0, None) # [K]
roi_height = ivy.clip(roi_height, 1.0, None) # [K]
bin_size_h = roi_height / pooled_height # [K]
bin_size_w = roi_width / pooled_width # [K]
exact_sampling = sampling_ratio > 0
roi_bin_grid_h = (
sampling_ratio if exact_sampling else ivy.ceil(roi_height / pooled_height)
) # scalar or [K]
roi_bin_grid_w = (
sampling_ratio if exact_sampling else ivy.ceil(roi_width / pooled_width)
) # scalar or [K]
"""Iy, ix = dims(2)"""
if exact_sampling:
count = max(roi_bin_grid_h * roi_bin_grid_w, 1) # scalar
iy = ivy.arange(roi_bin_grid_h, device=input.device) # [IY]
ix = ivy.arange(roi_bin_grid_w, device=input.device) # [IX]
ymask = None
xmask = None
else:
count = ivy.clip(roi_bin_grid_h * roi_bin_grid_w, 1, None) # [K]
iy = ivy.arange(height, device=input.device) # [IY]
ix = ivy.arange(width, device=input.device) # [IX]
ymask = iy[None, :] < roi_bin_grid_h[:, None] # [K, IY]
xmask = ix[None, :] < roi_bin_grid_w[:, None] # [K, IX]
def from_K(t):
return t[:, None, None]
y = (
from_K(roi_start_h)
+ ph[None, :, None] * from_K(bin_size_h)
+ (iy[None, None, :] + 0.5).astype(input.dtype)
* from_K(bin_size_h / roi_bin_grid_h)
) # [K, PH, IY]
x = (
from_K(roi_start_w)
+ pw[None, :, None] * from_K(bin_size_w)
+ (ix[None, None, :] + 0.5).astype(input.dtype)
* from_K(bin_size_w / roi_bin_grid_w)
) # [K, PW, IX]
val = _bilinear_interpolate(
input, roi_batch_ind, y, x, ymask, xmask
) # [K, C, PH, PW, IY, IX]
# Mask out samples that weren't actually adaptively needed
if not exact_sampling:
val = ivy.where(ymask[:, None, None, None, :, None], val, 0)
val = ivy.where(xmask[:, None, None, None, None, :], val, 0)
output = val.sum(axis=(-1, -2)) # remove IY, IX ~> [K, C, PH, PW]
if isinstance(count, ivy.Array):
output /= count[:, None, None, None]
else:
output /= count
output = output.astype(orig_dtype)
return output
# TODO add paddle backend implementation back,
# once paddle.argsort uses a stable algorithm
# https://github.com/PaddlePaddle/Paddle/issues/57508
@handle_exceptions
@handle_nestable
@handle_array_like_without_promotion
@inputs_to_ivy_arrays
@handle_array_function
def nms(
boxes,
scores=None,
iou_threshold=0.5,
max_output_size=None,
score_threshold=float("-inf"),
):
change_id = False
if score_threshold is not float("-inf") and scores is not None:
keep_idx = scores > score_threshold
boxes = boxes[keep_idx]
scores = scores[keep_idx]
change_id = True
nonzero = ivy.nonzero(keep_idx)[0].flatten()
if scores is None:
scores = ivy.ones((boxes.shape[0],), dtype=boxes.dtype)
if len(boxes) < 2:
if len(boxes) == 1:
ret = ivy.array([0], dtype=ivy.int64)
else:
ret = ivy.array([], dtype=ivy.int64)
else:
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
areas = (x2 - x1) * (y2 - y1)
order = ivy.argsort(
(-1 * scores), stable=True
) # get boxes with more ious first
keep = []
while order.size > 0:
i = order[0] # pick maxmum iou box
keep.append(i)
xx1 = ivy.maximum(x1[i], x1[order[1:]])
yy1 = ivy.maximum(y1[i], y1[order[1:]])
xx2 = ivy.minimum(x2[i], x2[order[1:]])
yy2 = ivy.minimum(y2[i], y2[order[1:]])
w = ivy.maximum(0.0, xx2 - xx1) # maximum width
h = ivy.maximum(0.0, yy2 - yy1) # maximum height
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = ivy.nonzero(ovr <= iou_threshold)[0]
order = order[inds + 1]
ret = ivy.array(keep)
if len(ret) > 1 and scores is not None:
ret = sorted(
ret.flatten().tolist(), reverse=True, key=lambda x: (scores[x], -x)
)
ret = ivy.array(ret, dtype=ivy.int64).flatten()
if change_id and len(ret) > 0:
ret = ivy.array(nonzero[ret], dtype=ivy.int64).flatten()
return ret.flatten()[:max_output_size]
nms.mixed_backend_wrappers = {
"to_add": (
"handle_backend_invalid",
"inputs_to_native_arrays",
"outputs_to_ivy_arrays",
"handle_device",
),
"to_skip": ("inputs_to_ivy_arrays",),
}
| ivy/ivy/functional/ivy/layers.py/0 | {
"file_path": "ivy/ivy/functional/ivy/layers.py",
"repo_id": "ivy",
"token_count": 53861
} | 49 |
"""Base class for helper module methods."""
# global
import functools
import logging
# local
import ivy
from ivy.data_classes.container import Container
from ivy.func_wrapper import _get_first_array
class ModuleHelpers:
def _find_variables(
self,
/,
*,
obj=None,
without_initialisation=False,
_visited=None,
):
"""Find all internal variables in obj. Return empty Container if obj is
None.
Parameters
----------
obj
The submodule whose internal variables are to be returned. Default
is None.
without_initialization
Whether or not to initialize the variables, to avoid initialization when
the model variables are passed in the input directly.
_visited
Placeholder for tracking the visited nodes, do not set this parameter.
Returns
-------
ret
The internal variables of the submodule passed in the argument.
"""
_visited = ivy.default(_visited, {})
vs = Container()
if id(obj) in _visited:
return vs
_visited[id(obj)] = True
# ToDo: add support for finding local variables, if/when JAX supports
# uniquely flagging variables
if isinstance(obj, ModuleHelpers) and obj is not self:
if not obj.built and without_initialisation:
return lambda: obj._build_and_return_v(
*obj._args, dynamic_backend=self._dynamic_backend, **obj._kwargs
)
return obj._build_and_return_v(
*obj._args, dynamic_backend=obj._dynamic_backend, **obj._kwargs
)
elif isinstance(obj, (list, tuple)):
for i, v in enumerate(obj):
ret = self._find_variables(
obj=v,
without_initialisation=without_initialisation,
_visited=_visited,
)
if ret:
vs[f"v{str(i)}"] = ret
return vs
elif isinstance(obj, dict):
for k, v in obj.items():
ret = self._find_variables(
obj=v,
without_initialisation=without_initialisation,
_visited=_visited,
)
if ret:
vs[k[1:] if k[0] == "_" else k] = ret
return vs
elif not hasattr(obj, "__dict__"):
return vs
for k, v in obj.__dict__.items():
if v is not None and k[0:2] != "__" and k != "_module_dict":
ret = self._find_variables(
obj=v,
without_initialisation=without_initialisation,
_visited=_visited,
)
if ret:
vs[k[1:] if k[0] == "_" else k] = ret
return vs
def _find_buffers(self):
if hasattr(self, "_module_dict"):
for key, sub_module in self._module_dict.items():
if len(sub_module._buffers) > 0:
self._buffers[key] = sub_module._buffers
def _build_and_return_v(self, *args, **kwargs):
self.build(*args, **kwargs)
return self.v
@staticmethod
def _extract_v(v, keychain_mappings: dict, orig_key_chain, /):
"""Extract the variables from the variables container v using the key
orig_key_chain and reinstantiate the duplicate variables that were
removed by _remove_duplicate_variables in their correct locations using
keychain_mappings.
Parameters
----------
v
The variables container
keychain_mappings
The keychain mappings of duplicate vatriables
orig_key_chain
keychain of the variables to be extracted
Returns
-------
ret_cont
container with the extracted variables.
"""
if v.cont_has_key_chain(orig_key_chain):
ret_cont = v.cont_at_key_chain(orig_key_chain)
else:
ret_cont = Container()
for old_kc, new_kc in keychain_mappings.items():
if orig_key_chain in old_kc:
# Check if `v` contains `new_kc` before replacing in `ret_cont`
if v.cont_has_key_chain(new_kc):
ret_cont = ret_cont.cont_set_at_key_chain(
"/".join(old_kc.split("/")[1:]), v.cont_at_key_chain(new_kc)
)
else:
continue
return ret_cont
@staticmethod
def _remove_duplicate_variables(vs, created, /):
"""Remove duplicate variables in `vs` referring to `created`.
Parameters
----------
vs
The container that needs to be pruned.
created
The container as the duplication reference.
Returns
-------
vs
The container after removing duplicate variables.
keychain_mappings
Dict storing those keys and ids being removed.
"""
created_ids = created.cont_map(lambda x, kc: id(x))
vs_ids = vs.cont_map(lambda x, kc: id(x))
ids = {}
duplicate_keychains = []
keychain_mappings = {}
def unique_callback(x, kc):
ids[x] = kc
def found_dup_callback(x, kc):
if ids[x] == kc:
return
duplicate_keychains.append(kc)
keychain_mappings[kc] = ids[x]
created_ids.cont_map(lambda x, kc: unique_callback(x, kc))
vs_ids.cont_map(
lambda x, kc: (
unique_callback(x, kc) if x not in ids else found_dup_callback(x, kc)
)
)
for dup_kc in duplicate_keychains:
vs = vs.cont_prune_key_chain(dup_kc)
return vs, keychain_mappings
def _wrap_call_methods(
self, keychain_mappings, /, *, key="", obj=None, _visited=None
):
"""Wrap the call methods of the Module object by looping over all the
items within the module, wrapping the __call__ methods of all
submodules using _fn_with_var_arg.
Parameters
----------
keychain_mappings
The keychain mappings of the object
key
The keychain of the object obj, used for recursion.
obj
the object whose __call__ method is to be wrapped
_visited
Placeholder for tracking the visited nodes, do not set this parameter.
Returns
-------
None
"""
_visited = ivy.default(_visited, {})
if id(obj) in _visited or not isinstance(key, str):
return
_visited[id(obj)] = True
if isinstance(obj, ModuleHelpers) and obj is not self:
orig_key_chain = key[1:] if key[0] == "_" else key
obj.__call__ = self._fn_with_var_arg(
obj.__call__, self._extract_v, keychain_mappings, orig_key_chain
)
return
elif isinstance(obj, (list, tuple)):
for i, val in enumerate(obj):
self._wrap_call_methods(
keychain_mappings,
key=f"{key}/v{str(i)}",
obj=val,
_visited=_visited,
)
return
elif isinstance(obj, dict):
for k, val in obj.items():
k = f"{key}/{k}" if key != "" and isinstance(k, str) else k
self._wrap_call_methods(
keychain_mappings, key=k, obj=val, _visited=_visited
)
return
if not hasattr(obj, "__dict__"):
return
for k, val in obj.__dict__.items():
if k[0:2] == "__":
continue
k = f"{key}/{k}" if key != "" else k
if val is not None:
self._wrap_call_methods(
keychain_mappings, key=k, obj=val, _visited=_visited
)
return
def _call(self, *args, v=None, buffers=None, **kwargs):
"""Compute forward pass of the layer, treating layer instance as
callable function.
Parameters
----------
args
Positional arguments to the _build method.
v
Replace `v` of current layer when forwarding. Restore
after the forward finished.
buffers
Replace `v` of current layer when forwarding. Restore
after the forward finished.
kwargs
Keyword arguments to the _build method.
Returns
-------
ret
Result of the forward pass of the layer.
"""
if not self._built:
first_arr = _get_first_array(*args, **kwargs)
self.build(
*args,
**kwargs,
from_call=True,
dtype=first_arr.dtype if ivy.exists(first_arr) else ivy.default_dtype(),
)
# If `v` was provided, replace with the module's v
replace_v = False
if v is not None:
v_orig = self.v
self._v = v
replace_v = True
# If `buffers` were provided, replace with the module's buffers
replace_buffers = False
if buffers is not None:
buffers_orig = self.buffers
self._buffers = buffers
replace_buffers = True
if replace_v or replace_buffers:
# Call the forward pass
ret = self._forward(*args, **kwargs)
# Replace v, buffers if needed
self._v = v_orig if replace_v else self._v
self._buffers = buffers_orig if replace_buffers else self._buffers
return ret
elif hasattr(self.__call__, "wrapped"):
return self.__call__(*args, **kwargs)
return self._forward(*args, **kwargs)
def _rebuild(self):
logging.warning(
"Building the module again as a trainable module was modified, "
'please use the "explicit" or "on_call" build_modes instead '
'of "on_init" to avoid repetitive building after each addition'
)
self._v = Container()
self._built = False
self.build(*self._args, **self._kwargs)
def _compute_module_dict(self):
self._module_dict = Container()
for key, value in self.__dict__.items():
if isinstance(value, ivy.Module):
if "stateful" in value.__module__ or hasattr(value, "_frontend_module"):
self._module_dict[key] = value
else:
self._module_dict[key] = value._module_dict
@staticmethod
def _addindent(s_, numSpaces):
s = s_.split("\n")
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
def _fn_with_var_arg_wrapper(
self, *a, fn, v_fn, keychain_mappings, orig_key_chain, **kw
):
if "v" in kw:
del kw["v"]
v = v_fn(self.v, keychain_mappings, orig_key_chain)
return fn(*a, **kw, v=v)
def _fn_with_var_arg(self, fn, v_fn, /, keychain_mappings, orig_key_chain):
"""Extract variables from `v_fn` and use it as inputs for `fn`.
Use `v_fn` to extract the variables and use the extracted
variables as inputs to the call function fn of the module.
"""
_fn_with_var_arg_wrapper = functools.partial(
self._fn_with_var_arg_wrapper,
fn=fn,
v_fn=v_fn,
keychain_mappings=keychain_mappings,
orig_key_chain=orig_key_chain,
)
_fn_with_var_arg_wrapper.wrapped = True
return _fn_with_var_arg_wrapper
def _convert_tensors_to_numpy(self):
"""Recursively traverses the module_dict attribute of a Module object
and converts every container containing tensors to numpy using the
to_numpy() method.
Returns
-------
Module
The converted Module object.
"""
if self.module_dict:
for module in self.module_dict.values():
module._convert_tensors_to_numpy()
self.v = self.v.to_numpy()
def _convert_numpy_to_tensors(self):
"""Recursively traverses the module_dict attribute of a Module object
and converts every container containing tensors to numpy using the
to_numpy() method.
Returns
-------
Module
The converted Module object.
"""
if self.module_dict:
for module in self.module_dict.values():
module._convert_numpy_to_tensors()
self.v = self.v.to_ivy()
else:
self.v = self.v.to_ivy()
| ivy/ivy/stateful/helpers.py/0 | {
"file_path": "ivy/ivy/stateful/helpers.py",
"repo_id": "ivy",
"token_count": 6492
} | 50 |
# NOQA
import ivy
from importlib import import_module as builtin_import
def import_module(name, package=None):
if ivy.is_local():
with ivy.utils._importlib.LocalIvyImporter():
return ivy.utils._importlib._import_module(name=name, package=package)
return builtin_import(name=name, package=package)
| ivy/ivy/utils/dynamic_import.py/0 | {
"file_path": "ivy/ivy/utils/dynamic_import.py",
"repo_id": "ivy",
"token_count": 121
} | 51 |
# global
import os
import redis
from colorama import Fore
from hypothesis import settings, HealthCheck, Phase
from hypothesis.database import (
MultiplexedDatabase,
ReadOnlyDatabase,
DirectoryBasedExampleDatabase,
)
from hypothesis.extra.redis import RedisExampleDatabase
hypothesis_cache = f"{os.getcwd()}/.hypothesis/examples/"
redis_connect_dev = None
redis_connect_master = None
try:
os.makedirs(hypothesis_cache)
except FileExistsError:
pass
def is_db_available(master=False, credentials=None):
global redis_connect_dev, redis_connect_master
redis_connect_local = None
if master:
redis_connect_master = redis.Redis.from_url(
url=credentials[0], password=credentials[1]
)
redis_connect_local = redis_connect_master
else:
redis_connect_dev = redis.Redis.from_url(
url="redis://redis-17011.c259.us-central1-2.gce.cloud.redislabs.com:17011",
username="general_use",
password="Hypothesiscache@123",
max_connections=2,
)
redis_connect_local = redis_connect_dev
try:
redis_connect_local.get("b")
except redis.exceptions.ConnectionError:
print("Fallback to DirectoryBasedExamples")
return False
return True
def pytest_terminal_summary(terminalreporter):
from .test_ivy.conftest import mod_backend
session = terminalreporter._session
if session.testscollected == 0:
return
passed_ratio = 1 - (session.testsfailed / session.testscollected)
text = f" {passed_ratio:.1%} of {session.testscollected} passed "
text = text.center(terminalreporter._screen_width, "=")
terminalreporter.write(content=Fore.GREEN + text)
for key in mod_backend:
if mod_backend[key]:
mod_backend[key][0].terminate()
def pytest_addoption(parser):
parser.addoption(
"-N",
"--num-examples",
action="store",
default=25,
type=int,
help="set max examples generated by Hypothesis",
)
parser.addoption(
"--deadline",
action="store",
default=500000,
type=int,
help="set deadline for testing one example",
)
parser.addoption(
"--ivy-tb",
action="store",
default="full",
type=str,
help="ivy traceback",
)
parser.addoption(
"--reuse-only",
default=False,
action="store_true",
help="Only reuse stored examples from database",
)
parser.addoption(
"-R",
"--robust",
action="store_true",
default=False,
help=(
"Disable Hypothesis Shrinking. Allow all Hypothesis HealthChecks."
"Disabling the HealthChecks will most likely introduce new failures, "
"this mode should be only used during development on the testing pipeline."
),
)
def pytest_configure(config):
profile_settings = {}
getopt = config.getoption
max_examples = getopt("--num-examples")
deadline = getopt("--deadline")
if (
os.getenv("REDIS_URL", default=None)
and os.environ["REDIS_URL"]
and is_db_available(
master=True,
credentials=(os.environ["REDIS_URL"], os.environ["REDIS_PASSWD"]),
)
):
print("Update Database with examples !")
profile_settings["database"] = RedisExampleDatabase(
redis_connect_master, key_prefix=b"hypothesis-example:"
)
elif not os.getenv("REDIS_URL") and is_db_available():
print("Use Database in ReadOnly Mode with local caching !")
shared = RedisExampleDatabase(
redis_connect_dev, key_prefix=b"hypothesis-example:"
)
profile_settings["database"] = MultiplexedDatabase(
DirectoryBasedExampleDatabase(path=hypothesis_cache),
ReadOnlyDatabase(shared),
)
else:
print("Database unavailable, local caching only !")
profile_settings["database"] = DirectoryBasedExampleDatabase(
path=hypothesis_cache
)
if max_examples:
profile_settings["max_examples"] = max_examples
if deadline:
profile_settings["deadline"] = deadline
if getopt("--reuse-only"):
profile_settings["phases"] = [Phase.explicit, Phase.reuse]
settings.register_profile(
"ivy_profile",
**profile_settings,
suppress_health_check=(HealthCheck(3), HealthCheck(2), HealthCheck(1)),
print_blob=True,
)
settings.register_profile(
"robust",
phases=[Phase.explicit, Phase.reuse, Phase.generate, Phase.target],
)
settings.register_profile(
"diff",
database=None,
derandomize=True,
max_examples=100,
deadline=5000,
phases=[Phase.generate],
suppress_health_check=(HealthCheck(3), HealthCheck(2), HealthCheck(1)),
)
if getopt("robust"):
settings.load_profile("robust")
else:
settings.load_profile("ivy_profile")
| ivy/ivy_tests/conftest.py/0 | {
"file_path": "ivy/ivy_tests/conftest.py",
"repo_id": "ivy",
"token_count": 2175
} | 52 |
from dataclasses import dataclass
@dataclass(frozen=True)
class FrontendMethodData:
ivy_init_module: str
framework_init_module: str
init_name: str
method_name: str
| ivy/ivy_tests/test_ivy/helpers/structs.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/helpers/structs.py",
"repo_id": "ivy",
"token_count": 69
} | 53 |
from .base import SupportedDtypes, SupportedDeviecs, FrontendConfig
import numpy as np
import xgboost as xgb
def get_config():
return XGBoostFrontendConfig()
class XGBoostFrontendConfig(FrontendConfig):
Dtype = np.dtype
Device = str
valid_devices = ("cpu", "gpu")
invalid_devices = ("tpu",)
valid_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"bool",
]
invalid_dtypes = ["bfloat16", "complex64", "complex128"]
valid_numeric_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
]
invalid_numeric_dtypes = ["bfloat16", "complex64", "complex128"]
valid_int_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
]
invalid_int_dtypes = []
valid_uint_dtypes = [
"uint8",
"uint16",
"uint32",
"uint64",
]
invalid_uint_dtypes = []
valid_float_dtypes = [
"float16",
"float32",
"float64",
]
invalid_float_dtypes = ["bfloat16"]
valid_complex_dtypes = []
invalid_complex_dtypes = ["complex64", "complex128"]
@property
def supported_devices(self):
return SupportedDeviecs(
valid_devices=self.valid_devices, invalid_devices=self.invalid_devices
)
@property
def supported_dtypes(self):
return SupportedDtypes(
valid_dtypes=self.valid_dtypes,
invalid_dtypes=self.invalid_dtypes,
valid_numeric_dtypes=self.valid_numeric_dtypes,
invalid_numeric_dtypes=self.invalid_numeric_dtypes,
valid_int_dtypes=self.valid_int_dtypes,
invalid_int_dtypes=self.invalid_int_dtypes,
valid_uint_dtypes=self.valid_uint_dtypes,
invalid_uint_dtypes=self.invalid_uint_dtypes,
valid_float_dtypes=self.valid_float_dtypes,
invalid_float_dtypes=self.invalid_float_dtypes,
valid_complex_dtypes=self.valid_complex_dtypes,
invalid_complex_dtypes=self.invalid_complex_dtypes,
)
def native_array(self, x):
return x
def is_native_array(self, x):
return isinstance(x, xgb.DMatrix)
def to_numpy(self, x):
return x.get_data().toarray()
def as_native_dtype(self, dtype: str):
return np.dtype(dtype)
def as_native_device(self, device: str):
return device
def isscalar(self, x):
return np.isscalar(x)
| ivy/ivy_tests/test_ivy/test_frontends/config/xgboost.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/xgboost.py",
"repo_id": "ivy",
"token_count": 1401
} | 54 |
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
# --- Helpers --- #
# --------------- #
# one_hot
@st.composite
def _dtype_indices_classes_axis(draw):
classes = draw(helpers.ints(min_value=2, max_value=100))
dtype, indices, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
min_value=0,
max_value=classes - 1,
small_abs_safety_factor=4,
ret_shape=True,
)
)
axis = draw(st.integers(min_value=-1, max_value=len(shape) - 1))
return dtype, indices, classes, axis
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="jax.nn.celu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-5,
max_value=5,
safety_factor_scale="linear",
),
alpha=helpers.floats(min_value=0.01, max_value=1),
test_with_out=st.just(False),
)
def test_jax_celu(
*,
dtype_and_x,
alpha,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
alpha=alpha,
)
# elu
@handle_frontend_test(
fn_tree="jax.nn.elu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
min_value=-5,
max_value=5,
safety_factor_scale="linear",
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_jax_elu(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
alpha=xs[1],
rtol=1e-03,
atol=1e-03,
)
@handle_frontend_test(
fn_tree="jax.nn.gelu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-1e4,
max_value=1e4,
abs_smallest_val=1e-3,
),
approximate=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_gelu(
*,
dtype_and_x,
approximate,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
# As erf function doesn't support complex dtype
if "complex" in input_dtype[0]:
approximate = True
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-2,
atol=1e-2,
x=x[0],
approximate=approximate,
)
# glu
@handle_frontend_test(
fn_tree="jax.nn.glu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
min_value=-2,
min_num_dims=1,
min_dim_size=4,
max_dim_size=4,
),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_jax_glu(
*,
dtype_and_x,
axis,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-01,
atol=1e-01,
x=x[0],
axis=axis,
)
@handle_frontend_test(
fn_tree="jax.nn.hard_sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
test_with_out=st.just(False),
)
def test_jax_hard_sigmoid(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
)
@handle_frontend_test(
fn_tree="jax.nn.hard_silu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
),
test_with_out=st.just(False),
)
def test_jax_hard_silu(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
)
@handle_frontend_test(
fn_tree="jax.nn.hard_swish",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-10,
max_value=10,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_hard_swish(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
rtol=1e-02,
atol=1e-02,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.hard_tanh",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_hard_tanh(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
# TODO: enable this test for all valid dtypes as jax.nn.hard_tanh supports
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.leaky_relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
negative_slope=helpers.floats(
min_value=0.0, max_value=1.0, small_abs_safety_factor=16
),
test_with_out=st.just(False),
)
def test_jax_leaky_relu(
*,
dtype_and_x,
negative_slope,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
rtol=1e-01,
atol=1e-01,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
negative_slope=negative_slope,
)
@handle_frontend_test(
fn_tree="jax.nn.log_sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_value=-100,
max_value=100,
large_abs_safety_factor=8,
small_abs_safety_factor=8,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_log_sigmoid(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
rtol=1e-02,
atol=1e-02,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.log_softmax",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
min_value=-2,
min_num_dims=1,
min_dim_size=2,
),
axis=helpers.ints(min_value=-1, max_value=0),
test_with_out=st.just(False),
)
def test_jax_log_softmax(
*,
dtype_and_x,
axis,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
axis=axis,
)
@handle_frontend_test(
fn_tree="jax.nn.logsumexp",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
num_arrays=2,
shared_dtype=True,
),
axis=st.just(None),
keepdims=st.booleans(),
return_sign=st.booleans(),
test_with_out=st.just(False),
)
def test_jax_logsumexp(
*,
dtype_and_x,
axis,
keepdims,
return_sign,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=xs[0],
axis=axis,
b=xs[1],
keepdims=keepdims,
return_sign=return_sign,
)
@handle_frontend_test(
fn_tree="jax.nn.normalize",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
num_arrays=3,
shared_dtype=True,
),
axis=st.just(-1),
epsilon=helpers.floats(
min_value=0.01,
max_value=1,
),
where=st.none(),
test_with_out=st.just(False),
)
def test_jax_normalize(
*,
dtype_and_x,
axis,
epsilon,
where,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
rtol=1e-02,
atol=1e-02,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=xs[0],
axis=axis,
mean=xs[1],
variance=xs[2],
epsilon=epsilon,
where=where,
)
@handle_frontend_test(
fn_tree="jax.nn.one_hot",
dtype_indices_classes_axis=_dtype_indices_classes_axis(),
dtype=helpers.get_dtypes("float", full=False),
test_with_out=st.just(False),
)
def test_jax_one_hot(
*,
dtype_indices_classes_axis,
dtype,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, indices, num_classes, axis = dtype_indices_classes_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
x=indices[0],
num_classes=num_classes,
dtype=dtype[0],
axis=axis,
)
# Todo : turn on complex dtype activation tests once supported in all backends
@handle_frontend_test(
fn_tree="jax.nn.relu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=3,
small_abs_safety_factor=3,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_relu(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.relu6",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_relu6(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.selu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_selu(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.sigmoid",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_sigmoid(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.silu",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_silu(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.soft_sign",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_integer"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_soft_sign(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.softmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float_and_complex"),
min_num_dims=2,
max_axes_size=2,
force_int_axis=True,
valid_axis=True,
),
test_with_out=st.just(False),
)
def test_jax_softmax(
*,
dtype_x_axis,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
x_dtype, x, axis = dtype_x_axis
helpers.test_frontend_function(
input_dtypes=x_dtype,
frontend=frontend,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-02,
atol=1e-02,
x=x[0],
axis=axis,
)
@handle_frontend_test(
fn_tree="jax.nn.softplus",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
),
test_with_out=st.just(False),
)
def test_jax_softplus(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
@handle_frontend_test(
fn_tree="jax.nn.swish",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
large_abs_safety_factor=2,
small_abs_safety_factor=2,
safety_factor_scale="linear",
),
test_with_out=st.just(False),
)
def test_jax_swish(
*,
dtype_and_x,
test_flags,
on_device,
fn_tree,
frontend,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x=x[0],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_nn/test_non_linear_activations.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_nn/test_non_linear_activations.py",
"repo_id": "ivy",
"token_count": 9919
} | 55 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import (
_get_first_matrix_and_dtype,
_get_second_matrix_and_dtype,
_get_dtype_value1_value2_axis_for_tensordot,
)
from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_linalg import (
_generate_multi_dot_dtype_and_arrays,
)
# --- Helpers --- #
# --------------- #
# cross
@st.composite
def dtype_value1_value2_axis(
draw,
available_dtypes,
abs_smallest_val=None,
min_value=None,
max_value=None,
allow_inf=False,
exclude_min=False,
exclude_max=False,
min_num_dims=1,
max_num_dims=10,
min_dim_size=1,
max_dim_size=10,
specific_dim_size=3,
large_abs_safety_factor=4,
small_abs_safety_factor=4,
safety_factor_scale="log",
):
# Taken from functional helpers
# For cross product, a dim with size 3 is required
shape = draw(
helpers.get_shape(
allow_none=False,
min_num_dims=min_num_dims,
max_num_dims=max_num_dims,
min_dim_size=min_dim_size,
max_dim_size=max_dim_size,
)
)
axis = draw(helpers.ints(min_value=0, max_value=len(shape)))
# make sure there is a dim with specific dim size
shape = list(shape)
shape = shape[:axis] + [specific_dim_size] + shape[axis:]
shape = tuple(shape)
dtype = draw(st.sampled_from(draw(available_dtypes)))
values = []
for i in range(2):
values.append(
draw(
helpers.array_values(
dtype=dtype,
shape=shape,
abs_smallest_val=abs_smallest_val,
min_value=min_value,
max_value=max_value,
allow_inf=allow_inf,
exclude_min=exclude_min,
exclude_max=exclude_max,
large_abs_safety_factor=large_abs_safety_factor,
small_abs_safety_factor=small_abs_safety_factor,
safety_factor_scale=safety_factor_scale,
)
)
)
value1, value2 = values[0], values[1]
return [dtype], value1, value2, axis
@handle_frontend_test(
fn_tree="numpy.linalg.cross",
gt_fn_tree="numpy.cross",
dtype_x1_x2_axis=dtype_value1_value2_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
max_num_dims=5,
min_dim_size=3,
max_dim_size=3,
min_value=-1e5,
max_value=1e5,
abs_smallest_val=0.01,
safety_factor_scale="log",
),
test_with_out=st.just(True),
)
def test_numpy_cross(
*,
dtype_x1_x2_axis,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
dtypes, x1, x2, axis = dtype_x1_x2_axis
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
rtol=1e-3,
atol=1e-3,
a=x1,
b=x2,
axis=axis,
)
# dot
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.dot",
gt_fn_tree="numpy.dot",
dtype_a_b=np_frontend_helpers._get_dtype_input_and_vectors(),
)
def test_numpy_dot(
dtype_a_b,
frontend,
backend_fw,
test_flags,
fn_tree,
gt_fn_tree,
on_device,
):
dtype, a, b = dtype_a_b
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
backend_to_test=backend_fw,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
test_flags=test_flags,
rtol=1e-01,
atol=1e-01,
a=a,
b=b,
)
# einsum
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.einsum",
gt_fn_tree="numpy.einsum",
args=st.sampled_from(
[
(
"ii",
np.arange(25).reshape(5, 5),
),
(
"ii->i",
np.arange(25).reshape(5, 5),
),
("ij,j", np.arange(25).reshape(5, 5), np.arange(5)),
]
),
dtype=helpers.get_dtypes("float", full=False),
)
def test_numpy_einsum(
*,
args,
dtype,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
kw = {}
i = 0
for arg in args:
kw[f"x{i}"] = arg
i += 1
test_flags.num_positional_args = i
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
test_flags=test_flags,
**kw,
optimize=False,
order="K",
casting="safe",
)
# inner
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.inner",
gt_fn_tree="numpy.inner",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-10,
max_value=10,
num_arrays=2,
shared_dtype=True,
),
test_with_out=st.just(False),
)
def test_numpy_inner(
dtype_and_x,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
a=xs[0],
b=xs[1],
)
# kron
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.kron",
gt_fn_tree="numpy.kron",
dtype_and_x=helpers.dtype_and_values(
num_arrays=2,
allow_inf=True,
allow_nan=True,
shared_dtype=True,
),
)
def test_numpy_kron(
*,
dtype_and_x,
frontend,
fn_tree,
gt_fn_tree,
on_device,
test_flags,
backend_fw,
):
dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
test_flags=test_flags,
a=xs[0],
b=xs[1],
)
# matmul
@handle_frontend_test(
fn_tree="numpy.matmul",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[_get_first_matrix_and_dtype, _get_second_matrix_and_dtype],
),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="matmul"
),
)
def test_numpy_matmul(
dtypes_values_casting,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtypes, x, casting, dtype = dtypes_values_casting
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=x[0],
x2=x[1],
out=None,
casting=casting,
order="K",
dtype=dtype,
# The arguments below are currently unused.
# subok=True,
)
# matrix_power
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.matrix_power",
gt_fn_tree="numpy.linalg.matrix_power",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=50,
shape=helpers.ints(min_value=2, max_value=8).map(lambda x: (x, x)),
),
n=helpers.ints(min_value=1, max_value=8),
test_with_out=st.just(False),
)
def test_numpy_matrix_power(
dtype_and_x,
n,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
a=x[0],
n=n,
)
# multi_dot
@handle_frontend_test(
fn_tree="numpy.linalg.multi_dot",
dtype_and_x=_generate_multi_dot_dtype_and_arrays(),
)
def test_numpy_multi_dot(
dtype_and_x,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtypes, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
test_flags=test_flags,
arrays=x,
rtol=1e-3,
atol=1e-3,
)
# outer
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.outer",
gt_fn_tree="numpy.outer",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
min_value=-10,
max_value=10,
num_arrays=2,
min_num_dims=1,
max_num_dims=1,
shared_dtype=True,
),
)
def test_numpy_outer(
dtype_and_x,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
on_device=on_device,
a=xs[0],
b=xs[1],
)
# tensordot
@handle_frontend_test(
fn_tree="numpy.linalg.matrix_and_vector_products.tensordot",
gt_fn_tree="numpy.tensordot",
dtype_values_and_axes=_get_dtype_value1_value2_axis_for_tensordot(
helpers.get_dtypes(kind="numeric")
),
test_with_out=st.just(False),
)
def test_numpy_tensordot(
dtype_values_and_axes,
frontend,
test_flags,
fn_tree,
gt_fn_tree,
backend_fw,
):
dtype, a, b, axes = dtype_values_and_axes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
gt_fn_tree=gt_fn_tree,
a=a,
b=b,
axes=axes,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_and_vector_products.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_matrix_and_vector_products.py",
"repo_id": "ivy",
"token_count": 5716
} | 56 |
# global
import numpy as np
from hypothesis import assume, strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy import inf
# amax
@handle_frontend_test(
fn_tree="numpy.amax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_amax(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is +inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keepdims,
initial=initial,
where=where,
)
# amin
@handle_frontend_test(
fn_tree="numpy.amin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_amin(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keepdims,
initial=initial,
where=where,
)
# fmax
@handle_frontend_test(
fn_tree="numpy.fmax",
dtype_and_inputs=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=2,
min_value=-np.inf,
max_value=np.inf,
shared_dtype=True,
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="fmax"
),
)
def test_numpy_fmax(
dtype_and_inputs,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs = dtype_and_inputs
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
)
# fmin
@handle_frontend_test(
fn_tree="numpy.fmin",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="fmin"
),
)
def test_numpy_fmin(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
@handle_frontend_test(
fn_tree="numpy.max",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_max(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is +inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keepdims,
initial=initial,
where=where,
)
# maximum
@handle_frontend_test(
fn_tree="numpy.maximum",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="maximum"
),
)
def test_numpy_maximum(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
@handle_frontend_test(
fn_tree="numpy.min",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_min(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
keepdims=keepdims,
initial=initial,
where=where,
)
# minimum
@handle_frontend_test(
fn_tree="numpy.minimum",
dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype(
arr_func=[
lambda: helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=2,
shared_dtype=True,
)
],
),
where=np_frontend_helpers.where(),
number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc(
fn_name="minimum"
),
)
def test_numpy_minimum(
dtypes_values_casting,
where,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtypes, xs, casting, dtype = dtypes_values_casting
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
x1=xs[0],
x2=xs[1],
out=None,
where=where,
casting=casting,
order="K",
dtype=dtype,
subok=True,
)
# nanmax
@handle_frontend_test(
fn_tree="numpy.nanmax",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
allow_nan=True,
allow_inf=True,
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_nanmax(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is -inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
initial=initial,
where=where,
)
# nanmin
@handle_frontend_test(
fn_tree="numpy.nanmin",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
large_abs_safety_factor=2,
safety_factor_scale="log",
allow_nan=True,
allow_inf=True,
),
initial=st.one_of(st.floats(min_value=-1000, max_value=1000), st.none()),
keepdims=st.booleans(),
where=np_frontend_helpers.where(),
)
def test_numpy_nanmin(
dtype_x_axis,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
where,
initial,
keepdims,
):
if initial is None and np.all(where) is not True:
assume(initial is inf)
input_dtypes, x, axis = dtype_x_axis
where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools(
where=where,
input_dtype=input_dtypes,
test_flags=test_flags,
)
np_frontend_helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
a=x[0],
axis=axis,
out=None,
keepdims=keepdims,
initial=initial,
where=where,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_extrema_finding.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_extrema_finding.py",
"repo_id": "ivy",
"token_count": 6519
} | 57 |
# global
import ivy
from hypothesis import given
import pytest
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy.functional.frontends.onnx import Tensor
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_onnx_tensor_property_device(
dtype_x,
):
_, data = dtype_x
x = Tensor(data[0])
x.ivy_array = data[0]
ivy.utils.assertions.check_equal(
x.device, ivy.dev(ivy.array(data[0])), as_array=False
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_onnx_tensor_property_dtype(
dtype_x,
):
dtype, data = dtype_x
x = Tensor(data[0])
x.ivy_array = data[0]
ivy.utils.assertions.check_equal(x.dtype, dtype[0], as_array=False)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False)
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_onnx_tensor_property_ivy_array(
dtype_x,
):
_, data = dtype_x
x = Tensor(data[0])
x.ivy_array = data[0]
ret = helpers.flatten_and_to_np(ret=x.ivy_array.data, backend="torch")
ret_gt = helpers.flatten_and_to_np(ret=data[0], backend="torch")
helpers.value_test(
ret_np_flat=ret,
ret_np_from_gt_flat=ret_gt,
backend="torch",
)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_onnx_tensor_property_ndim(
dtype_x,
):
dtype, data, shape = dtype_x
x = Tensor(data[0])
ivy.utils.assertions.check_equal(x.ndim, data[0].ndim, as_array=False)
@pytest.mark.skip("Testing pipeline not yet implemented")
@given(
dtype_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid", prune_function=False),
ret_shape=True,
).filter(lambda x: "bfloat16" not in x[0]),
)
def test_onnx_tensor_property_shape(dtype_x):
dtype, data, shape = dtype_x
x = Tensor(data[0])
ivy.utils.assertions.check_equal(
x.ivy_array.shape, ivy.Shape(shape), as_array=False
)
| ivy/ivy_tests/test_ivy/test_frontends/test_onnx/test_tensor.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_onnx/test_tensor.py",
"repo_id": "ivy",
"token_count": 1144
} | 58 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_nn import _x_and_filters
# --- Helpers --- #
# --------------- #
@st.composite
def _batch_norm_helper(draw):
num_dims = draw(st.integers(min_value=4, max_value=5))
dtype, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=num_dims,
max_num_dims=num_dims,
min_value=-1e02,
max_value=1e02,
)
)
epsilon = draw(st.floats(min_value=1e-07, max_value=1e-04))
factor = draw(st.floats(min_value=0.5, max_value=1))
training = draw(st.booleans())
if num_dims == 4:
data_format = draw(st.sampled_from(["NHWC", "NCHW"]))
else:
data_format = draw(st.sampled_from(["NDHWC", "NCDHW"]))
num_channels = x[0].shape[data_format.rfind("C")]
dtypes, vectors = draw(
helpers.dtype_and_values(
available_dtypes=["float32"],
shape=(num_channels,),
num_arrays=4,
min_value=-1e02,
max_value=1e02,
)
)
vectors[3] = np.abs(vectors[3]) # non-negative variance
return dtype + dtypes, x, epsilon, factor, training, data_format, vectors
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="tensorflow.compat.v1.nn.depthwise_conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="depthwise",
),
test_with_out=st.just(False),
)
def test_tensorflow_depthwise_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
filter=filters,
strides=stride,
padding=padding,
rate=dilation,
name=None,
data_format=data_format,
)
@handle_frontend_test(
fn_tree="tensorflow.compat.v1.nn.fused_batch_norm",
dtypes_args=_batch_norm_helper(),
test_with_out=st.just(False),
)
def test_tensorflow_fused_batch_norm(
*,
dtypes_args,
test_flags,
frontend,
backend_fw,
fn_tree,
on_device,
):
dtypes, x, epsilon, factor, training, data_format, vectors = dtypes_args
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
frontend=frontend,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-02,
x=x[0],
scale=vectors[0],
offset=vectors[1],
mean=vectors[2],
variance=vectors[3],
epsilon=epsilon,
data_format=data_format,
is_training=training,
exponential_avg_factor=factor,
)
# max_pool
@handle_frontend_test(
fn_tree="tensorflow.compat.v1.nn.max_pool",
data_format=st.just("NHWC"),
x_k_s_p=helpers.arrays_for_pooling(min_dims=4, max_dims=4, min_side=1, max_side=4),
test_with_out=st.just(False),
)
def test_tensorflow_max_pool(
*,
x_k_s_p,
data_format,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, ksize, strides, padding = x_k_s_p
data_format = data_format
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
value=x[0],
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
@handle_frontend_test(
fn_tree="tensorflow.compat.v1.nn.separable_conv2d",
x_f_d_df=_x_and_filters(
dtypes=helpers.get_dtypes("float", full=False),
data_format=st.sampled_from(["NHWC"]),
padding=st.sampled_from(["VALID", "SAME"]),
type="separable",
),
test_with_out=st.just(False),
)
def test_tensorflow_separable_conv2d(
*,
x_f_d_df,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
input_dtype, x, filters, dilation, data_format, stride, padding = x_f_d_df
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
depthwise_filter=filters[0],
pointwise_filter=filters[1],
strides=stride,
padding=padding,
rate=dilation,
name=None,
data_format=data_format,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_compat/test_v1/test_nn.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_compat/test_v1/test_nn.py",
"repo_id": "ivy",
"token_count": 2538
} | 59 |
# global
import random
from hypothesis import strategies as st
import math
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
import ivy_tests.test_ivy.helpers.globals as test_globals
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_manipulation import _get_splits
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import (
two_broadcastable_shapes,
)
# --- Helpers --- #
# --------------- #
# noinspection DuplicatedCode
@st.composite
def _array_idxes_n_dtype(draw, **kwargs):
num_dims = draw(helpers.ints(min_value=1, max_value=4))
dtype, x = draw(
helpers.dtype_and_values(
**kwargs, min_num_dims=num_dims, max_num_dims=num_dims, shared_dtype=True
)
)
idxes = draw(
st.lists(
helpers.ints(min_value=0, max_value=num_dims - 1),
min_size=num_dims,
max_size=num_dims,
unique=True,
)
)
return x, idxes, dtype
@st.composite
def _arrays_dim_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = 2
common_shape = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_dims - 1,
max_size=num_dims - 1,
)
)
_dim = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_arrays,
max_size=num_arrays,
)
)
min_dim = min(unique_dims)
max_dim = max(unique_dims)
_idx = draw(
helpers.array_values(
shape=min_dim,
dtype="int64",
min_value=0,
max_value=max_dim,
exclude_min=False,
)
)
xs = []
available_input_types = draw(helpers.get_dtypes("numeric"))
available_input_types.remove("float16") # half summation unstable in backends
input_dtypes = draw(
helpers.array_dtypes(
available_dtypes=available_input_types,
num_arrays=num_arrays,
shared_dtype=True,
)
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:_dim] + [ud] + common_shape[_dim:],
dtype=dt,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
)
)
xs.append(x)
return xs, input_dtypes, _dim, _idx
@st.composite
def _arrays_dim_idx_n_dtypes_extend(
draw, support_dtypes="numeric", unsupport_dtypes=()
):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = 2
common_shape = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_dims - 1,
max_size=num_dims - 1,
)
)
_dim = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.lists(
x=helpers.ints(min_value=2, max_value=3),
min_size=num_arrays,
max_size=num_arrays,
)
)
min_dim = min(unique_dims)
max_dim = max(unique_dims)
_idx = draw(
helpers.array_values(
shape=min_dim,
dtype="int64",
min_value=0,
max_value=max_dim,
exclude_min=False,
)
)
xs = []
available_input_types = draw(helpers.get_dtypes(support_dtypes))
unstabled_dtypes = ["float16"]
available_input_types = [
dtype for dtype in available_input_types if dtype not in unstabled_dtypes
]
available_input_types = [
dtype for dtype in available_input_types if dtype not in unsupport_dtypes
]
input_dtypes = draw(
helpers.array_dtypes(
available_dtypes=available_input_types,
num_arrays=num_arrays,
shared_dtype=True,
)
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:_dim] + [ud] + common_shape[_dim:],
dtype=dt,
large_abs_safety_factor=2.5,
small_abs_safety_factor=2.5,
safety_factor_scale="log",
)
)
xs.append(x)
return xs, input_dtypes, _dim, _idx
# noinspection DuplicatedCode
@st.composite
def _arrays_idx_n_dtypes(draw):
num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims"))
num_arrays = draw(
st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays")
)
common_shape = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_dims - 1,
)
)
unique_idx = draw(helpers.ints(min_value=0, max_value=num_dims - 1))
unique_dims = draw(
helpers.list_of_size(
x=helpers.ints(min_value=2, max_value=3),
size=num_arrays,
)
)
xs = []
input_dtypes = draw(
helpers.array_dtypes(available_dtypes=draw(helpers.get_dtypes("float")))
)
for ud, dt in zip(unique_dims, input_dtypes):
x = draw(
helpers.array_values(
shape=common_shape[:unique_idx] + [ud] + common_shape[unique_idx:],
dtype=dt,
)
)
xs.append(x)
return xs, input_dtypes, unique_idx
@st.composite
def _chunk_helper(draw):
dtype, x, shape = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_num_dims=1,
ret_shape=True,
)
)
axis = draw(helpers.get_axis(shape=shape, force_int=True))
if shape[axis] == 0:
chunks = 0
else:
factors = []
for i in range(1, shape[axis] + 1):
if shape[axis] % i == 0:
factors.append(i)
chunks = draw(st.sampled_from(factors))
return dtype, x, axis, chunks
# diagonal_scatter
@st.composite
def _diag_x_y_offset_axes(draw):
currentshape = random.randint(2, 4)
if test_globals.CURRENT_BACKEND == "paddle":
currentshape = 2
offset = draw(
helpers.ints(min_value=-(currentshape - 1), max_value=currentshape - 1)
)
available_input_types = draw(helpers.get_dtypes("float"))
available_input_types = helpers.array_dtypes(available_dtypes=available_input_types)
dtype, x = draw(
helpers.dtype_and_values(
min_num_dims=currentshape,
max_num_dims=currentshape,
min_dim_size=currentshape,
max_dim_size=currentshape,
num_arrays=1,
available_dtypes=available_input_types,
),
)
diagonal_shape = draw(
helpers.get_shape(
min_num_dims=currentshape - 1,
max_num_dims=currentshape - 1,
min_dim_size=currentshape,
max_dim_size=currentshape,
),
)
diagonal_shape = diagonal_shape[:-1] + (diagonal_shape[-1] - abs(offset),)
y = draw(
helpers.array_values(
shape=diagonal_shape,
dtype=available_input_types,
exclude_min=False,
)
)
prohibited_pairs = {(2, -1), (-2, 1), (1, -2), (-1, 2)}
axes = draw(
st.lists(
helpers.ints(min_value=-2, max_value=1), min_size=2, max_size=2, unique=True
).filter(
lambda axes: (axes[0] % 2 != axes[1] % 2)
and tuple(axes) not in prohibited_pairs,
)
)
return dtype, x, y, offset, axes
@st.composite
def _dtype_input_dim_start_length(draw):
_shape = draw(helpers.get_shape(min_num_dims=1, min_dim_size=1))
_dtype, _x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
shape=_shape,
)
)
_dim = draw(
helpers.get_axis(
shape=_shape,
force_int=True,
),
)
_start = draw(helpers.ints(min_value=1, max_value=_shape[_dim]))
_length = draw(helpers.ints(min_value=0, max_value=_shape[_dim] - _start))
return _dtype, _x, _dim, _start, _length
@st.composite
def _dtype_input_idx_axis(draw):
dtype_x_axis_shape = draw(
helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
force_int_axis=True,
ret_shape=True,
valid_axis=True,
min_num_dims=2,
)
)
input_dtype, x, axis, shape = dtype_x_axis_shape
max_idx = 0
if shape:
max_idx = shape[axis] - 1
idx = draw(helpers.ints(min_value=0, max_value=max_idx))
x = x[0]
return input_dtype, x, idx, axis
@st.composite
def _dtypes_input_mask(draw):
_shape = draw(helpers.get_shape(min_num_dims=1, min_dim_size=1))
_mask = draw(helpers.array_values(dtype=helpers.get_dtypes("bool"), shape=_shape))
_dtype, _x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
num_arrays=1,
shape=_shape,
)
)
return _dtype, _x, _mask
@st.composite
def _where_helper(draw):
shape_1, shape_2 = draw(two_broadcastable_shapes())
dtype_x1, x1 = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape_1,
)
)
dtype_x2, x2 = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=shape_1,
shared_dtype=True,
)
)
_, cond = draw(
helpers.dtype_and_values(
available_dtypes=["bool"],
shape=shape_2,
)
)
return ["bool", *dtype_x1, *dtype_x2], [cond[0], x1[0], x2[0]]
# reshape
@st.composite
def dtypes_x_reshape(draw):
shape = draw(helpers.get_shape(min_num_dims=1))
dtypes, x = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
shape=shape,
)
)
shape = draw(
helpers.get_shape(min_num_dims=1).filter(
lambda s: math.prod(s) == math.prod(shape)
)
)
return dtypes, x, shape
# --- Main --- #
# ------------ #
# adjoint
@handle_frontend_test(
fn_tree="torch.adjoint",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("real_and_complex"),
min_num_dims=2,
min_dim_size=2,
),
)
def test_torch_adjoint(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
)
@handle_frontend_test(
fn_tree="torch.argwhere",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_torch_argwhere(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
)
# cat
@handle_frontend_test(
fn_tree="torch.cat",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
)
def test_torch_cat(
*,
xs_n_input_dtypes_n_unique_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=xs,
dim=unique_idx,
)
# chunk
@handle_frontend_test(
fn_tree="torch.chunk",
x_dim_chunks=_chunk_helper(),
test_with_out=st.just(False),
)
def test_torch_chunk(
*,
x_dim_chunks,
fn_tree,
on_device,
frontend,
test_flags,
backend_fw,
):
dtype, x, axis, chunks = x_dim_chunks
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
chunks=chunks,
dim=axis,
)
# columnstack
@handle_frontend_test(
fn_tree="torch.column_stack",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_columnstack(
*,
dtype_value,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=value,
)
# concat
@handle_frontend_test(
fn_tree="torch.concat",
xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(),
)
def test_torch_concat(
*,
xs_n_input_dtypes_n_unique_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, unique_idx = xs_n_input_dtypes_n_unique_idx
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=xs,
dim=unique_idx,
)
@handle_frontend_test(
fn_tree="torch.conj",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float_and_complex"),
),
)
def test_torch_conj(
on_device,
frontend,
*,
dtype_and_x,
fn_tree,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
)
@handle_frontend_test(
fn_tree="torch.diagonal_scatter", dtype_and_values=_diag_x_y_offset_axes()
)
def test_torch_diagonal_scatter(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value, src, offset, axes = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
src=src,
offset=offset,
dim1=axes[0],
dim2=axes[1],
)
# dsplit
@handle_frontend_test(
fn_tree="torch.dsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=3), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=3,
axis=2,
allow_none=False,
allow_array_indices=False,
is_mod_split=True,
),
)
def test_torch_dsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
indices_or_sections=indices_or_sections,
)
# dstack
@handle_frontend_test(
fn_tree="torch.dstack",
dtype_value_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_dstack(
*,
dtype_value_shape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=value,
)
# gather
@handle_frontend_test(
fn_tree="torch.gather",
params_indices_others=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int64"],
indices_same_dims=True,
),
)
def test_torch_gather(
*,
params_indices_others,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, input, indices, axis, batch_dims = params_indices_others
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
dim=axis,
index=indices,
)
# hsplit
@handle_frontend_test(
fn_tree="torch.hsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=1,
axis=1,
allow_none=False,
allow_array_indices=False,
is_mod_split=True,
),
)
def test_torch_hsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
indices_or_sections=indices_or_sections,
)
# hstack
@handle_frontend_test(
fn_tree="torch.hstack",
dtype_value_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_hstack(
*,
dtype_value_shape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=value,
)
# index_add
@handle_frontend_test(
fn_tree="torch.index_add",
xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes(),
alpha=st.integers(min_value=1, max_value=2),
)
def test_torch_index_add(
*,
xs_dtypes_dim_idx,
alpha,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, axis, indices = xs_dtypes_dim_idx
if xs[0].shape[axis] < xs[1].shape[axis]:
source, input = xs
else:
input, source = xs
helpers.test_frontend_function(
input_dtypes=[input_dtypes[0], "int64", input_dtypes[1]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=input,
dim=axis,
index=indices,
source=source,
alpha=alpha,
)
# index_copy
@handle_frontend_test(
fn_tree="torch.index_copy",
xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes(),
)
def test_torch_index_copy(
*,
xs_dtypes_dim_idx,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, axis, indices = xs_dtypes_dim_idx
if xs[0].shape[axis] < xs[1].shape[axis]:
source, input = xs
else:
input, source = xs
helpers.test_frontend_function(
input_dtypes=[input_dtypes[0], "int64", input_dtypes[1]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
dim=axis,
index=indices,
source=source,
)
# index_reduce
@handle_frontend_test(
fn_tree="torch.index_reduce",
xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes_extend(
support_dtypes="numeric",
unsupport_dtypes=ivy.function_unsupported_dtypes(
ivy.functional.frontends.torch.index_reduce
),
),
reduce=st.sampled_from(["prod", "mean", "amin", "amax"]),
)
def test_torch_index_reduce(
*,
xs_dtypes_dim_idx,
reduce,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
xs, input_dtypes, axis, indices = xs_dtypes_dim_idx
if xs[0].shape[axis] < xs[1].shape[axis]:
source, input = xs
else:
input, source = xs
helpers.test_frontend_function(
input_dtypes=[input_dtypes[0], "int64", input_dtypes[1]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
rtol=1e-03,
input=input,
dim=axis,
index=indices,
source=source,
reduce=reduce,
)
# index_select
@handle_frontend_test(
fn_tree="torch.index_select",
params_indices_others=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("valid"),
indices_dtypes=["int64"],
max_num_dims=1,
indices_same_dims=True,
),
)
def test_torch_index_select(
*,
params_indices_others,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, input, indices, axis, batch_dims = params_indices_others
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
dim=axis,
index=indices,
)
@handle_frontend_test(
fn_tree="torch.masked_select",
dtype_input_mask=_dtypes_input_mask(),
)
def test_torch_masked_select(
*,
dtype_input_mask,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
(
input_dtype,
x,
mask,
) = dtype_input_mask
helpers.test_frontend_function(
input_dtypes=input_dtype + ["bool"],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
mask=mask,
)
# moveaxis
@handle_frontend_test(
fn_tree="torch.moveaxis",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
),
source=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
destination=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
test_with_out=st.just(False),
)
def test_torch_moveaxis(
*,
dtype_and_input,
source,
destination,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
source=source,
destination=destination,
)
# movedim
@handle_frontend_test(
fn_tree="torch.movedim",
dtype_and_input=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=-100,
max_value=100,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
),
source=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
destination=helpers.get_axis(
allow_none=False,
unique=True,
shape=st.shared(
helpers.get_shape(
min_num_dims=1,
max_num_dims=3,
min_dim_size=1,
max_dim_size=3,
),
key="a_s_d",
),
min_size=1,
force_int=True,
),
test_with_out=st.just(False),
)
def test_torch_movedim(
*,
dtype_and_input,
source,
destination,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_input
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
source=source,
destination=destination,
)
@handle_frontend_test(
fn_tree="torch.narrow",
dtype_input_dim_start_length=_dtype_input_dim_start_length(),
)
def test_torch_narrow(
*,
dtype_input_dim_start_length,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
(input_dtype, x, dim, start, length) = dtype_input_dim_start_length
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dim=dim,
start=start,
length=length,
)
# nonzero
@handle_frontend_test(
fn_tree="torch.nonzero",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("numeric"),
),
as_tuple=st.booleans(),
)
def test_torch_nonzero(
*,
dtype_and_values,
as_tuple,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input = dtype_and_values
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input[0],
as_tuple=as_tuple,
)
# permute
@handle_frontend_test(
fn_tree="torch.permute",
dtype_values_axis=_array_idxes_n_dtype(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_torch_permute(
*,
dtype_values_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
x, idxes, dtype = dtype_values_axis
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
dims=tuple(idxes),
)
@handle_frontend_test(
fn_tree="torch.reshape",
dtypes_x_reshape=dtypes_x_reshape(),
)
def test_torch_reshape(
*,
dtypes_x_reshape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, shape = dtypes_x_reshape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
shape=shape,
)
# row_stack
@handle_frontend_test(
fn_tree="torch.row_stack",
dtype_value_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=st.integers(1, 5),
),
)
def test_torch_row_stack(
*,
dtype_value_shape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=value,
)
@handle_frontend_test(
fn_tree="torch.select",
dtype_x_idx_axis=_dtype_input_idx_axis(),
)
def test_torch_select(
*,
dtype_x_idx_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, idx, axis = dtype_x_idx_axis
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
dim=axis,
index=idx,
)
# split
@handle_frontend_test(
fn_tree="torch.split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
split_size_or_sections=_get_splits(
allow_none=False, min_num_dims=1, allow_array_indices=False
),
dim=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
)
def test_torch_split(
*,
dtype_value,
split_size_or_sections,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensor=value[0],
split_size_or_sections=split_size_or_sections,
dim=dim,
)
# squeeze
@handle_frontend_test(
fn_tree="torch.squeeze",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
dim=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
max_size=1,
).filter(lambda axis: isinstance(axis, int)),
)
def test_torch_squeeze(
*,
dtype_and_values,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dim=dim,
)
# stack
@handle_frontend_test(
fn_tree="torch.stack",
dtype_value_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
num_arrays=st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
),
dim=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"),
).filter(lambda axis: isinstance(axis, int)),
)
def test_torch_stack(
*,
dtype_value_shape,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=value,
dim=dim,
)
# swapaxes
@handle_frontend_test(
fn_tree="torch.swapaxes",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
),
axis0=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
force_int=True,
),
axis1=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
force_int=True,
),
)
def test_torch_swapaxes(
*,
dtype_and_values,
axis0,
axis1,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
axis0=axis0,
axis1=axis1,
)
# swapdims
@handle_frontend_test(
fn_tree="torch.swapdims",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
),
dim0=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
force_int=True,
),
dim1=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
force_int=True,
),
)
def test_torch_swapdims(
*,
dtype_and_values,
dim0,
dim1,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dim0=dim0,
dim1=dim1,
)
# t
@handle_frontend_test(
fn_tree="torch.t",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(max_num_dims=2), key="shape"),
),
)
def test_torch_t(
*,
dtype_and_values,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
)
@handle_frontend_test(
fn_tree="torch.take",
dtype_and_x=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes(), indices_dtypes=["int64"]
),
)
def test_torch_take(
*,
dtype_and_x,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtypes, xs, indices, _, _ = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=xs,
index=indices,
)
# take_along_dim
@handle_frontend_test(
fn_tree="torch.take_along_dim",
dtype_indices_axis=helpers.array_indices_axis(
array_dtypes=helpers.get_dtypes("numeric"),
indices_dtypes=["int64"],
min_num_dims=1,
max_num_dims=5,
min_dim_size=1,
max_dim_size=10,
indices_same_dims=True,
),
)
def test_torch_take_along_dim(
*,
dtype_indices_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, value, indices, axis, _ = dtype_indices_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value,
indices=indices,
dim=axis,
)
# tensor_split
@handle_frontend_test(
fn_tree="torch.tensor_split",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=1, allow_none=False, allow_array_indices=False
),
axis=st.shared(
helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"),
force_int=True,
),
key="target_axis",
),
number_positional_args=st.just(2),
test_with_out=st.just(False),
)
def test_torch_tensor_split(
*,
dtype_value,
indices_or_sections,
axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
indices_or_sections=indices_or_sections,
dim=axis,
)
# tile
@handle_frontend_test(
fn_tree="torch.tile",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
dim=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=False,
force_tuple=True,
),
)
def test_torch_tile(
*,
dtype_value,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dims=dim,
)
# transpose
@handle_frontend_test(
fn_tree="torch.transpose",
dtype_and_values=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
),
dim0=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
force_int=True,
),
dim1=helpers.get_axis(
shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"),
force_int=True,
),
)
def test_torch_transpose(
*,
dtype_and_values,
dim0,
dim1,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_and_values
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dim0=dim0,
dim1=dim1,
)
# unbind
@handle_frontend_test(
fn_tree="torch.unbind",
dtype_value_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_num_dims=1,
valid_axis=True,
force_int_axis=True,
),
)
def test_torch_unbind(
*,
dtype_value_axis,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtypes, value, axis = dtype_value_axis
helpers.test_frontend_function(
input_dtypes=input_dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dim=axis,
)
# unsqueeze
@handle_frontend_test(
fn_tree="torch.unsqueeze",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(), key="shape"),
),
dim=helpers.get_axis(
shape=st.shared(helpers.get_shape(), key="shape"),
allow_neg=True,
force_int=True,
),
)
def test_torch_unsqueeze(
*,
dtype_value,
dim,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
dim=dim,
)
# vsplit
@handle_frontend_test(
fn_tree="torch.vsplit",
dtype_value=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"),
),
indices_or_sections=_get_splits(
min_num_dims=2,
axis=0,
allow_none=False,
allow_array_indices=False,
is_mod_split=True,
),
)
def test_torch_vsplit(
*,
dtype_value,
indices_or_sections,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=value[0],
indices_or_sections=indices_or_sections,
)
# vstack
@handle_frontend_test(
fn_tree="torch.vstack",
dtype_value_shape=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
),
)
def test_torch_vstack(
*,
dtype_value_shape,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, value = dtype_value_shape
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
tensors=value,
)
@handle_frontend_test(
fn_tree="torch.where",
broadcastables=_where_helper(),
only_cond=st.booleans(),
)
def test_torch_where(
*,
broadcastables,
only_cond,
frontend,
test_flags,
fn_tree,
backend_fw,
on_device,
):
dtypes, arrays = broadcastables
if only_cond:
helpers.test_frontend_function(
input_dtypes=[dtypes[0]],
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=arrays[0],
)
else:
helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
condition=arrays[0],
input=arrays[1],
other=arrays[2],
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_indexing_slicing_joining_mutating_ops.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_indexing_slicing_joining_mutating_ops.py",
"repo_id": "ivy",
"token_count": 22817
} | 60 |
from hypothesis import assume, strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_experimental.test_nn.test_layers import (
_interp_args,
)
# --- Helpers --- #
# --------------- #
@st.composite
def _affine_grid_helper(draw):
align_corners = draw(st.booleans())
dims = draw(st.integers(4, 5))
if dims == 4:
size = draw(
st.tuples(
st.integers(1, 20),
st.integers(1, 20),
st.integers(2, 20),
st.integers(2, 20),
)
)
theta_dtype, theta = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=1,
shape=(size[0], 2, 3),
)
)
return theta_dtype, theta[0], size, align_corners
else:
size = draw(
st.tuples(
st.integers(1, 20),
st.integers(1, 20),
st.integers(2, 20),
st.integers(2, 20),
st.integers(2, 20),
)
)
theta_dtype, theta = draw(
helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
max_value=1,
shape=(size[0], 3, 4),
)
)
return theta_dtype, theta[0], size, align_corners
@st.composite
def _pad_generator(draw, shape, mode):
pad = ()
m = max(int((len(shape) + 1) / 2), 1)
for i in range(m):
if mode != "constant":
if i < 2:
max_pad_value = 0
else:
max_pad_value = shape[i] - 1
pad = pad + draw(
st.tuples(
st.integers(min_value=0, max_value=max(0, max_pad_value)),
st.integers(min_value=0, max_value=max(0, max_pad_value)),
)
)
return pad
@st.composite
def _pad_helper(draw):
mode = draw(
st.sampled_from(
[
"constant",
"reflect",
"replicate",
"circular",
]
)
)
min_v = 1
max_v = 5
if mode != "constant":
min_v = 3
if mode == "reflect":
max_v = 4
dtype, input, shape = draw(
helpers.dtype_and_values(
available_dtypes=["float32", "float64"],
ret_shape=True,
min_num_dims=min_v,
max_num_dims=max_v,
min_dim_size=2,
min_value=-1e05,
max_value=1e05,
)
)
padding = draw(_pad_generator(shape, mode))
if mode == "constant":
value = draw(helpers.ints(min_value=0, max_value=4) | st.none())
else:
value = 0.0
return dtype, input[0], padding, value, mode
@st.composite
def grid_sample_helper(draw, dtype, mode, mode_3d, padding_mode):
dtype = draw(dtype)
align_corners = draw(st.booleans())
dims = draw(st.integers(4, 5))
height = draw(helpers.ints(min_value=5, max_value=10))
width = draw(helpers.ints(min_value=5, max_value=10))
channels = draw(helpers.ints(min_value=1, max_value=3))
grid_h = draw(helpers.ints(min_value=2, max_value=4))
grid_w = draw(helpers.ints(min_value=2, max_value=4))
batch = draw(helpers.ints(min_value=1, max_value=5))
padding_mode = draw(st.sampled_from(padding_mode))
if dims == 4:
mode = draw(st.sampled_from(mode))
x = draw(
helpers.array_values(
dtype=dtype[0],
shape=[batch, channels, height, width],
min_value=-1,
max_value=1,
)
)
grid = draw(
helpers.array_values(
dtype=dtype[0],
shape=[batch, grid_h, grid_w, 2],
min_value=-1,
max_value=1,
)
)
elif dims == 5:
mode = draw(st.sampled_from(mode_3d))
depth = draw(helpers.ints(min_value=10, max_value=15))
grid_d = draw(helpers.ints(min_value=5, max_value=10))
x = draw(
helpers.array_values(
dtype=dtype[0],
shape=[batch, channels, depth, height, width],
min_value=-1,
max_value=1,
)
)
grid = draw(
helpers.array_values(
dtype=dtype[0],
shape=[batch, grid_d, grid_h, grid_w, 3],
min_value=-1,
max_value=1,
)
)
return dtype, x, grid, mode, padding_mode, align_corners
# --- Main --- #
# ------------ #
@handle_frontend_test(
fn_tree="torch.nn.functional.affine_grid",
dtype_and_input_and_other=_affine_grid_helper(),
)
def test_torch_affine_grid(
*,
dtype_and_input_and_other,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
dtype, theta, size, align_corners = dtype_and_input_and_other
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
theta=theta,
size=size,
align_corners=align_corners,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.grid_sample",
dtype_x_grid_modes=grid_sample_helper(
dtype=helpers.get_dtypes("valid", full=False),
mode=["nearest", "bilinear", "bicubic"],
mode_3d=["nearest", "bilinear"],
padding_mode=["border", "zeros", "reflection"],
),
)
def test_torch_grid_sample(
*,
dtype_x_grid_modes,
on_device,
backend_fw,
fn_tree,
frontend,
test_flags,
):
dtype, x, grid, mode, padding_mode, align_corners = dtype_x_grid_modes
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x,
grid=grid,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.interpolate",
dtype_and_input_and_other=_interp_args(
mode_list="torch",
),
number_positional_args=st.just(2),
)
def test_torch_interpolate(
*,
dtype_and_input_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
(
input_dtype,
x,
mode,
size,
align_corners,
scale_factor,
recompute_scale_factor,
) = dtype_and_input_and_other
if mode not in ["linear", "bilinear", "bicubic", "trilinear"]:
align_corners = None
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
input=x[0],
size=size,
scale_factor=scale_factor,
mode=mode,
align_corners=align_corners,
recompute_scale_factor=recompute_scale_factor,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.pad",
dtype_and_input_and_other=_pad_helper(),
)
def test_torch_pad(
*,
dtype_and_input_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, input, padding, value, mode = dtype_and_input_and_other
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=input,
pad=padding,
mode=mode,
value=value,
)
# pixel_shuffle
@handle_frontend_test(
fn_tree="torch.nn.functional.pixel_shuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
),
factor=helpers.ints(min_value=1),
)
def test_torch_pixel_shuffle(
*,
dtype_and_x,
factor,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume(ivy.shape(x[0])[1] % (factor**2) == 0)
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
upscale_factor=factor,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.pixel_unshuffle",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("float"),
min_value=0,
min_num_dims=4,
max_num_dims=4,
min_dim_size=1,
),
factor=helpers.ints(min_value=1),
)
def test_torch_pixel_unshuffle(
*,
dtype_and_x,
factor,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x = dtype_and_x
assume((ivy.shape(x[0])[2] % factor == 0) & (ivy.shape(x[0])[3] % factor == 0))
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
downscale_factor=factor,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.upsample",
dtype_and_input_and_other=_interp_args(),
number_positional_args=st.just(2),
)
def test_torch_upsample(
*,
dtype_and_input_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, mode, size, align_corners = dtype_and_input_and_other
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
size=size,
mode=mode,
align_corners=align_corners,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.upsample_bilinear",
dtype_and_input_and_other=_interp_args(mode="bilinear"),
number_positional_args=st.just(2),
)
def test_torch_upsample_bilinear(
*,
dtype_and_input_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, _, size, _, scale_factor, _ = dtype_and_input_and_other
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
size=size,
scale_factor=scale_factor,
)
@handle_frontend_test(
fn_tree="torch.nn.functional.upsample_nearest",
dtype_and_input_and_other=_interp_args(mode="nearest"),
number_positional_args=st.just(2),
)
def test_torch_upsample_nearest(
*,
dtype_and_input_and_other,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
input_dtype, x, _, size, _ = dtype_and_input_and_other
helpers.test_frontend_function(
input_dtypes=input_dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
input=x[0],
size=size,
)
| ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_vision_functions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_vision_functions.py",
"repo_id": "ivy",
"token_count": 6206
} | 61 |
"""Collection of tests for utility functions."""
# global
from hypothesis import strategies as st
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# all
@handle_test(
fn_tree="functional.ivy.all",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid"),
valid_axis=True,
max_axes_size=1,
),
keepdims=st.booleans(),
test_gradients=st.just(False),
)
def test_all(dtype_x_axis, keepdims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis = dtype_x_axis
axis = axis if axis is None or isinstance(axis, int) else axis[0]
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keepdims,
)
# any
@handle_test(
fn_tree="functional.ivy.any",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("valid", full=True),
valid_axis=True,
max_axes_size=1,
),
keepdims=st.booleans(),
test_gradients=st.just(False),
)
def test_any(dtype_x_axis, keepdims, test_flags, backend_fw, fn_name, on_device):
input_dtype, x, axis = dtype_x_axis
axis = axis if axis is None or isinstance(axis, int) else axis[0]
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
backend_to_test=backend_fw,
fn_name=fn_name,
on_device=on_device,
x=x[0],
axis=axis,
keepdims=keepdims,
)
| ivy/ivy_tests/test_ivy/test_functional/test_core/test_utility.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_utility.py",
"repo_id": "ivy",
"token_count": 771
} | 62 |
# global
from hypothesis import strategies as st
import numpy as np
# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_test
# --- Helpers --- #
# --------------- #
@st.composite
def _invert_permutation_helper(draw, for_frontend_test=False):
perm = draw(
st.permutations(list(range(draw(st.integers(min_value=3, max_value=10)))))
)
if for_frontend_test or draw(st.booleans()):
perm = np.array(perm)
dtype = draw(
st.sampled_from(["int32", "int64"] if not for_frontend_test else ["int64"])
)
return dtype, perm
# --- Main --- #
# ------------ #
# invert_permutation
@handle_test(
fn_tree="functional.ivy.experimental.invert_permutation",
dtype_and_perm=_invert_permutation_helper(),
test_instance_method=st.just(False),
test_with_out=st.just(False),
test_gradients=st.just(False),
ground_truth_backend="numpy",
)
def test_invert_permutation(dtype_and_perm, test_flags, backend_fw, fn_name, on_device):
dtype, perm = dtype_and_perm
helpers.test_function(
input_dtypes=[dtype],
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
x=perm,
)
# lexsort
@handle_test(
fn_tree="functional.ivy.experimental.lexsort",
dtype_x_axis=helpers.dtype_values_axis(
available_dtypes=helpers.get_dtypes("numeric"),
min_axis=-1,
max_axis=0,
min_num_dims=1,
force_int_axis=True,
),
ground_truth_backend="numpy",
test_with_out=st.just(False),
test_gradients=st.just(False),
)
def test_lexsort(
dtype_x_axis,
test_flags,
backend_fw,
fn_name,
on_device,
):
input_dtype, x, axis = dtype_x_axis
helpers.test_function(
input_dtypes=input_dtype,
test_flags=test_flags,
on_device=on_device,
backend_to_test=backend_fw,
fn_name=fn_name,
keys=x[0],
axis=axis,
)
| ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sorting.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_sorting.py",
"repo_id": "ivy",
"token_count": 920
} | 63 |
import sys
import os
import contextlib
import pytest
from ivy.utils.assertions import (
check_all,
check_all_or_any_fn,
check_any,
check_dev_correct_formatting,
check_dimensions,
check_elem_in_list,
check_equal,
check_exists,
check_false,
check_gather_input_valid,
check_gather_nd_input_valid,
check_greater,
check_inplace_sizes_valid,
check_isinstance,
check_kernel_padding_size,
check_less,
check_same_dtype,
check_shape,
check_shapes_broadcastable,
check_true,
check_unsorted_segment_valid_params,
)
from ivy.utils.assertions import _check_jax_x64_flag
import ivy
@pytest.mark.parametrize(
"results",
[
([0, 1, 2]),
([True, False]),
([True, True]),
],
)
def test_check_all(results):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_all(results)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not all(results):
assert "one" in lines.strip()
if all(results):
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("args", "fn", "type", "limit"),
[
# INVALID CASES
((1, 2, 0), ivy.array, "all", [3]),
((0, 0), ivy.array, "all", [2]),
((1, 1), ivy.array, "any", [3]),
((0, 0, 1), ivy.array, "any", [3]),
((1, 0, 1), ivy.array, "all_any", [3]),
# VALID
((1, 1), ivy.array, "any", [2]),
((0, 1), ivy.array, "any", [1]),
((1, 1, 2), ivy.array, "all", [3]),
],
)
def test_check_all_or_any_fn(args, fn, type, limit):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_all_or_any_fn(*args, fn=fn, type=type, limit=limit)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if type in ["all", "any"]:
if "e" in local_vars:
assert "args must exist according to" in lines.strip()
else:
assert not lines.strip()
else:
assert "type must be all or any" in lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
"results",
[([0, 1, 2]), ([False, False]), ([True, False]), ([0, False])],
)
def test_check_any(results):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_any(results)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not any(results):
assert "all" in lines.strip()
if all(results):
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
"device",
[
# VALID CASES
"cpu",
"gpu:0",
"tpu:1",
# INVALID
"cuda",
"gpu;",
"tpu:abc12",
],
)
def test_check_dev_correct_formatting(device):
with pytest.raises(AssertionError):
check_dev_correct_formatting(device)
@pytest.mark.parametrize(
"x",
[
# INVALID CASES
(ivy.array([1])),
(ivy.array([])),
# VALID
(ivy.array([1, 2])),
(ivy.array([[1, 2], [2, 3]])),
],
)
def test_check_dimensions(x):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_dimensions(x)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "greater than one dimension" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("elem", "list", "inverse"),
[
(1, [1, 2], False),
("a", [1, 2], False),
(1, [2, 3], True),
(0, ["a", "b", "c"], True),
],
)
def test_check_elem_in_list(elem, list, inverse):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_elem_in_list(elem, list, inverse)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not inverse:
if elem not in list:
assert "must be one" in lines.strip()
if elem in list:
assert not lines.strip()
if inverse:
if elem not in list:
assert not lines.strip()
if elem in list:
assert "must not be one" in lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x1", "x2", "inverse"),
[
(5, 10, False),
(10, 10, False),
(5, 5, True),
(10, 5, True),
],
)
def test_check_equal(x1, x2, inverse):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_equal(x1, x2, inverse)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if inverse:
if x1 == x2:
assert "must not be equal" in lines.strip()
if x1 != x2:
assert not lines.strip()
if not inverse:
if x1 != x2:
assert "must be equal" in lines.strip()
if x1 == x2:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x", "inverse"),
[(None, False), ([], False), (None, True), ("abc", True)],
)
def test_check_exists(x, inverse):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_exists(x, inverse)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not inverse:
if x is None:
assert "must not be" in lines.strip()
if x:
assert not lines.strip()
if inverse:
if x is None:
assert not lines.strip()
if x:
assert "must be None" in lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
"expression",
[
(True),
"a",
(None),
(False),
],
)
def test_check_false(expression):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_false(expression)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not expression:
assert not lines.strip()
if expression:
assert "False" in lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("params", "indices", "axis", "batch_dims"),
[
# INVALID CASES
(ivy.array([1, 2, 3]), ivy.array([1]), 2, 3),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([[0], [2]]), 1, 2),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([[0, 1], [1, 2], [2, 3]]), 1, 0),
(ivy.array([1, 2, 3]), ivy.array([[1, 2]]), 1, 0),
# VALID
(ivy.array([1, 2, 3]), ivy.array([1]), 0, 1),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([0, 2]), -1, 0),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([[0, 1], [1, 2]]), -1, 0),
],
)
def test_check_gather_input_valid(params, indices, axis, batch_dims):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_gather_input_valid(params, indices, axis, batch_dims)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert (
"must be less than or equal" in lines.strip()
or "batch dimensions must match in" in lines.strip()
)
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("params", "indices", "batch_dims"),
[
# INVALID CASES
(ivy.array([1, 2, 3]), ivy.array([1]), 2),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([0, 2]), 1),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([[0, 1], [1, 2], [2, 3]]), 1),
(ivy.array([1, 2, 3]), ivy.array([[1, 2]]), 0),
# VALID
(ivy.array([1, 2, 3]), ivy.array([1]), 0),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([0, 2]), 0),
(ivy.array([[1, 2, 3], [4, 5, 6]]), ivy.array([[0, 1], [1, 2]]), 1),
],
)
def test_check_gather_nd_input_valid(params, indices, batch_dims):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_gather_nd_input_valid(params, indices, batch_dims)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert (
"less than rank(`params`)" in lines.strip()
or "less than rank(`indices`)" in lines.strip()
or "dimensions must match in `params` and `indices`" in lines.strip()
or "index innermost dimension length must be <=" in lines.strip()
)
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x1", "x2", "allow_equal"),
[
(5, 10, False),
(10, 5, False),
(5, 5, True),
(10, 5, True),
],
)
def test_check_greater(x1, x2, allow_equal):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_greater(x1, x2, allow_equal)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if x1 < x2 and allow_equal:
assert "greater than or equal" in lines.strip()
if x1 < x2 and not allow_equal:
assert "greater than" in lines.strip()
if x1 > x2:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("var", "data"),
[
# INVALID CASES
(ivy.array([1]), ivy.array([1, 2])),
(ivy.array([[1], [1], [2]]), ivy.array([1, 2])),
# VALID
(ivy.array([1, 2]), ivy.array([1])),
(ivy.array([[[1]]]), ivy.array([1, 2])),
],
)
def test_check_inplace_sizes_valid(var, data):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_inplace_sizes_valid(var, data)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "Could not output values of shape" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x", "allowed_types"),
[(5.0, float), (ivy.array(5), type(ivy.array(8))), (5, float), ([5, 10], tuple)],
)
def test_check_isinstance(x, allowed_types):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_isinstance(x, allowed_types)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not isinstance(x, allowed_types):
assert "must be one of the" in lines.strip()
if isinstance(x, allowed_types):
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
"dtype",
[
# INVALID CASES
"float64",
"int64",
"uint64",
"complex128"
# VALID
"float16",
"float32int32",
"int16",
"complex64",
],
)
def test_check_jax_x64_flag(dtype):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
_check_jax_x64_flag(dtype)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "output not supported while jax_enable_x64" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("kernel_size", "padding_size"),
[
# INVALID CASES
(((2, 2), ((2, 2), (1, 1)))),
(((3, 3), ((2, 2), (1, 1)))),
# VALID
(((5, 5), ((1, 1), (2, 2)))),
(((3, 3), ((1, 1), (0, 0)))),
],
)
def test_check_kernel_padding_size(kernel_size, padding_size):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_kernel_padding_size(kernel_size, padding_size)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "less than or equal to half" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x1", "x2", "allow_equal"),
[
(5, 10, False),
(10, 5, False),
(5, 5, True),
(10, 5, True),
],
)
def test_check_less(x1, x2, allow_equal):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_less(x1, x2, allow_equal)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if x1 > x2 and allow_equal:
assert "lesser than or equal" in lines.strip()
if x1 > x2 and not allow_equal:
assert "lesser than" in lines.strip()
if x1 < x2:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x1", "x2"),
[
(ivy.array([1, 2, 3]), ivy.array([4, 5, 6])),
(ivy.array([1.0, 2.0, 3.0]), ivy.array([4, 5, 6])),
(ivy.array([1, 2, 3]), ivy.array([4j, 5 + 1j, 6])),
(ivy.array([1j]), ivy.array([2, 3 + 4j])),
],
)
def test_check_same_dtype(x1, x2):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_same_dtype(x1, x2)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "same dtype" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("x1", "x2"),
[
(ivy.array([1, 2, 3]), ivy.array([[4, 5, 6], [2, 3, 1]])),
(ivy.array([[1.0, 2.0], [3.0, 4.0]]), ivy.array([4, 5, 6])),
(ivy.array([1, 2]), ivy.array([3, 4, 5])),
(ivy.array([1]), ivy.array([2])),
(ivy.array([1, 2]), ivy.array([2, 3])),
],
)
def test_check_shape(x1, x2):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_shape(x1, x2)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "same shape" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("var", "data"),
[
# INVALID CASES
((2, 1), (1, 2, 1)),
((2, 1), (3, 1)),
# VALID
((1, 2), (1, 2)),
((1, 2), (1, 1, 1)),
],
)
def test_check_shapes_broadcastable(var, data):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_shapes_broadcastable(var, data)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert "Could not broadcast shape" in lines.strip()
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
"expression",
[
(True),
"a",
(None),
(False),
],
)
def test_check_true(expression):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_true(expression)
except Exception as e:
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if not expression:
assert "True" in lines.strip()
if expression:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
@pytest.mark.parametrize(
("data", "segment_ids", "num_segments"),
[
# INVALID CASES
(ivy.array([1, 2, 3]), ivy.array([0, 1, 0], dtype=ivy.int32), 2.0),
(ivy.array([1, 2, 3]), ivy.array([0, 1, 0], dtype=ivy.int32), 0),
(ivy.array([1, 2, 3]), ivy.array([0, 1, 0], dtype=ivy.int32), -2),
(ivy.array([1, 2, 3]), ivy.array([0.0, 1.0, 0.0], dtype=ivy.float16), 0),
(ivy.array([1, 2]), ivy.array([0, 1, 0], dtype=ivy.int32), 0),
(ivy.array([1, 2, 3]), ivy.array([0, 1], dtype=ivy.int32), 0),
(ivy.array([1, 2, 3]), ivy.array([0, 1, 2], dtype=ivy.int32), 2),
# VALID
(
ivy.array([1, 2, 3]),
ivy.array([0, 1, 0], dtype=ivy.int32),
2,
),
(ivy.array([1, 2, 3]), ivy.array([0, 1, 0], dtype=ivy.int32), ivy.array([2])),
],
)
def test_check_unsorted_segment_valid_params(data, segment_ids, num_segments):
filename = "except_out.txt"
orig_stdout = sys.stdout
with open(filename, "w") as f:
sys.stdout = f
lines = ""
try:
check_unsorted_segment_valid_params(data, segment_ids, num_segments)
local_vars = {**locals()}
except Exception as e:
local_vars = {**locals()}
print(e)
sys.stdout = orig_stdout
with open(filename) as f:
lines += f.read()
if "e" in local_vars:
assert (
"num_segments must be of integer type" in lines.strip()
or "segment_ids must have an integer dtype" in lines.strip()
or "segment_ids should be equal to data.shape[0]" in lines.strip()
or "is out of range" in lines.strip()
or "num_segments must be positive" in lines.strip()
)
if "e" not in local_vars:
assert not lines.strip()
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
| ivy/ivy_tests/test_ivy/test_misc/test_assertions.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_assertions.py",
"repo_id": "ivy",
"token_count": 10970
} | 64 |
import pytest
import logging
import ivy
def test_invalid_logging_mode():
with pytest.raises(AssertionError):
ivy.set_logging_mode("INVALID")
def test_set_logging_mode():
ivy.set_logging_mode("DEBUG")
assert logging.getLogger().level == logging.DEBUG
ivy.set_logging_mode("INFO")
assert logging.getLogger().level == logging.INFO
ivy.set_logging_mode("WARNING")
assert logging.getLogger().level == logging.WARNING
ivy.set_logging_mode("ERROR")
assert logging.getLogger().level == logging.ERROR
def test_unset_logging_mode():
ivy.set_logging_mode("DEBUG")
ivy.set_logging_mode("INFO")
ivy.unset_logging_mode()
assert logging.getLogger().level == logging.DEBUG
| ivy/ivy_tests/test_ivy/test_misc/test_logging.py/0 | {
"file_path": "ivy/ivy_tests/test_ivy/test_misc/test_logging.py",
"repo_id": "ivy",
"token_count": 278
} | 65 |
total_jobs = 40
job_prefix = "run_tests_"
print("name: intelligent-tests-pr")
print("on:")
print(" workflow_dispatch:")
print(" pull_request:")
print(" types: [opened, synchronize, reopened, review_requested ]")
print()
print("permissions:")
print(" actions: read")
print("jobs:")
print(" display_test_results:")
print(" if: ${{ always() }}")
print(" runs-on: ubuntu-latest")
print(" needs:")
for i in range(1, total_jobs + 1):
print(f" - {job_prefix}{i}")
print()
print(" steps:")
print(" - name: Download all test results")
print(" uses: actions/download-artifact@v3")
print()
print(" - name: Combined Test Results")
print(" run: |")
print(
' find . -name "test_results_*.txt" -exec cat {} + >'
" combined_test_results.txt"
)
print(' echo "Test results summary:"')
print(" cat combined_test_results.txt")
print()
print(" - name: New Failures Introduced")
print(" run: |")
print(
' find . -name "new_failures_*.txt" -exec cat {} + > combined_failures.txt'
)
print(" if [ -s combined_failures.txt ]")
print(" then")
print(' echo "This PR introduces the following new failing tests:"')
print(" cat combined_failures.txt")
print(" else")
print(' echo "This PR does not introduce any new test failures! Yippee!"')
print(" fi")
print()
for i in range(1, total_jobs + 1):
print(f" {job_prefix}{i}:")
print(" runs-on: ubuntu-latest")
print(" steps:")
print(" - name: Checkout Ivy 🛎")
print(" uses: actions/checkout@v2")
print(" with:")
print(" path: ivy")
print(" persist-credentials: false")
print(' submodules: "recursive"')
print(" fetch-depth: 100")
print()
print(" - name: Determine and Run Tests")
print(" id: tests")
print(" run: |")
print(
f" git clone -b master{i} https://github.com/unifyai/Mapping.git"
" --depth 1"
)
print(" pip install pydriller")
print(" cp Mapping/tests.pbz2 ivy/")
print(" cd ivy")
print(" mkdir .ivy")
print(" touch .ivy/key.pem")
print(" echo -n ${{ secrets.USER_API_KEY }} > .ivy/key.pem")
if i == 1:
print(" python scripts/determine_tests/determine_tests.py extra")
else:
print(" python scripts/determine_tests/determine_tests.py")
print(" set -o pipefail")
print(
f" python scripts/run_tests/run_tests_pr.py new_failures_{i}.txt | tee"
f" test_results_{i}.txt"
)
print(" continue-on-error: true")
print()
print(" - name: Upload test results")
print(" uses: actions/upload-artifact@v3")
print(" with:")
print(f" name: test_results_{i}")
print(f" path: ivy/test_results_{i}.txt")
print()
print(" - name: Upload New Failures")
print(" uses: actions/upload-artifact@v3")
print(" with:")
print(f" name: new_failures_{i}")
print(f" path: ivy/new_failures_{i}.txt")
print()
print(" - name: Check on failures")
print(" if: steps.tests.outcome != 'success'")
print(" run: exit 1")
print()
| ivy/scripts/generate_intelligent_tests_workflow.py/0 | {
"file_path": "ivy/scripts/generate_intelligent_tests_workflow.py",
"repo_id": "ivy",
"token_count": 1600
} | 66 |
import sys
run = int(sys.argv[1])
backends = ["numpy", "jax", "tensorflow", "torch"]
submodules = [
"activations",
"layers",
"losses",
"norms",
]
N = len(backends)
M = len(submodules)
num_tests = N * M
run = run % num_tests
i = run // M
j = run % M
backend = backends[i]
submodule = submodules[j]
with open("./fwsubmod.txt", "w") as outfile:
outfile.write(f"{backend}-{submodule}")
with open("./backend.txt", "w") as f:
f.write(f"{backend}")
with open("./submodule.txt", "w") as f:
f.write(f"test_{submodule}")
| ivy/scripts/setup_tests/run_ivy_nn_test.py/0 | {
"file_path": "ivy/scripts/setup_tests/run_ivy_nn_test.py",
"repo_id": "ivy",
"token_count": 250
} | 67 |
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<Languages>
<language minSize="5420" name="Python" />
</Languages>
</inspection_tool>
<inspection_tool class="DuplicatedInspection" enabled="false" level="WARNING" enabled_by_default="false" />
<inspection_tool class="InconsistentLineSeparators" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="ProblematicWhitespace" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="PyAbstractClassInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
<inspection_tool class="PyAttributeOutsideInitInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
<inspection_tool class="PyBehaveInspection" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="PyClassicStyleClassInspection" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="PyMandatoryEncodingInspection" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="PyMissingTypeHintsInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true" />
<inspection_tool class="PyNoneFunctionAssignmentInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredPackages">
<value>
<list size="62">
<item index="0" class="java.lang.String" itemvalue="tensorflow" />
<item index="1" class="java.lang.String" itemvalue="psutil" />
<item index="2" class="java.lang.String" itemvalue="pandas" />
<item index="3" class="java.lang.String" itemvalue="setproctitle" />
<item index="4" class="java.lang.String" itemvalue="pystrict" />
<item index="5" class="java.lang.String" itemvalue="numpy" />
<item index="6" class="java.lang.String" itemvalue="pytest" />
<item index="7" class="java.lang.String" itemvalue="moviepy" />
<item index="8" class="java.lang.String" itemvalue="git" />
<item index="9" class="java.lang.String" itemvalue="tensorflow_probability" />
<item index="10" class="java.lang.String" itemvalue="tensorflow-addons" />
<item index="11" class="java.lang.String" itemvalue="numpy-stl" />
<item index="12" class="java.lang.String" itemvalue="scipy" />
<item index="13" class="java.lang.String" itemvalue="stl" />
<item index="14" class="java.lang.String" itemvalue="tensorflow_addons" />
<item index="15" class="java.lang.String" itemvalue="tensorflow_io" />
<item index="16" class="java.lang.String" itemvalue="pyrealsense2" />
<item index="17" class="java.lang.String" itemvalue="opencv-python" />
<item index="18" class="java.lang.String" itemvalue="google_images_download" />
<item index="19" class="java.lang.String" itemvalue="pathos" />
<item index="20" class="java.lang.String" itemvalue="torch" />
<item index="21" class="java.lang.String" itemvalue="mxnet" />
<item index="22" class="java.lang.String" itemvalue="cv2" />
<item index="23" class="java.lang.String" itemvalue="lxml" />
<item index="24" class="java.lang.String" itemvalue="multipledispatch" />
<item index="25" class="java.lang.String" itemvalue="ivy" />
<item index="26" class="java.lang.String" itemvalue="matplotlib" />
<item index="27" class="java.lang.String" itemvalue="open3d" />
<item index="28" class="java.lang.String" itemvalue="pyrep" />
<item index="29" class="java.lang.String" itemvalue="gym" />
<item index="30" class="java.lang.String" itemvalue="tonic" />
<item index="31" class="java.lang.String" itemvalue="box2d-py" />
<item index="32" class="java.lang.String" itemvalue="ray" />
<item index="33" class="java.lang.String" itemvalue="hyperopt" />
<item index="34" class="java.lang.String" itemvalue="dm-tree" />
<item index="35" class="java.lang.String" itemvalue="tabulate" />
<item index="36" class="java.lang.String" itemvalue="GPUtil" />
<item index="37" class="java.lang.String" itemvalue="tensorboardX" />
<item index="38" class="java.lang.String" itemvalue="cffi" />
<item index="39" class="java.lang.String" itemvalue="tf-agents" />
<item index="40" class="java.lang.String" itemvalue="gitpython" />
<item index="41" class="java.lang.String" itemvalue="h5py" />
<item index="42" class="java.lang.String" itemvalue="cupy" />
<item index="43" class="java.lang.String" itemvalue="PIL" />
<item index="44" class="java.lang.String" itemvalue="jax" />
<item index="45" class="java.lang.String" itemvalue="mxboard" />
<item index="46" class="java.lang.String" itemvalue="tensorflow-probability" />
<item index="47" class="java.lang.String" itemvalue="lz4" />
<item index="48" class="java.lang.String" itemvalue="tensorflow-io-nightly" />
<item index="49" class="java.lang.String" itemvalue="imageio" />
<item index="50" class="java.lang.String" itemvalue="flax" />
<item index="51" class="java.lang.String" itemvalue="cupy-cuda102" />
<item index="52" class="java.lang.String" itemvalue="torchvision" />
<item index="53" class="java.lang.String" itemvalue="absl" />
<item index="54" class="java.lang.String" itemvalue="GLU" />
<item index="55" class="java.lang.String" itemvalue="pyglet" />
<item index="56" class="java.lang.String" itemvalue="Pillow" />
<item index="57" class="java.lang.String" itemvalue="pyquaternion" />
<item index="58" class="java.lang.String" itemvalue="ivy" />
<item index="59" class="java.lang.String" itemvalue="ivy-vision" />
<item index="60" class="java.lang.String" itemvalue="ivy-mech" />
<item index="61" class="java.lang.String" itemvalue="tensorflow-io" />
</list>
</value>
</option>
</inspection_tool>
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="E127" />
<option value="E741" />
<option value="E402" />
<option value="E501" />
<option value="W605" />
<option value="E722" />
<option value="E731" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false">
<option name="ignoredErrors">
<list>
<option value="N806" />
<option value="N803" />
<option value="N802" />
<option value="N801" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyShadowingBuiltinsInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredNames">
<list>
<option value="copyright" />
<option value="round" />
<option value="abs" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyTypeCheckerInspection" enabled="false" level="WARNING" enabled_by_default="false" />
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredIdentifiers">
<list>
<option value="builtins.abs.*" />
<option value="object.__add__" />
<option value="array.shape" />
<option value="mxnet.ndarray.shape" />
<option value="mxnet.symbol.shape" />
<option value="list.asnumpy" />
<option value="bool.*" />
<option value="demos.create_doc_images.Simulator._default_vision_sensor" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyUnusedLocalInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
<inspection_tool class="RestRoleInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredRoles">
<value>
<list size="0" />
</value>
</option>
</inspection_tool>
<inspection_tool class="SSBasedInspection" enabled="true" level="WARNING" enabled_by_default="true" />
<inspection_tool class="ShellCheck" enabled="true" level="ERROR" enabled_by_default="true">
<shellcheck_settings value="SC2046,SC2124,SC2145" />
</inspection_tool>
<inspection_tool class="SmkPyUnboundLocalVariableInspection" enabled="false" level="WARNING" enabled_by_default="false" />
<inspection_tool class="SpellCheckingInspection" enabled="false" level="TYPO" enabled_by_default="false">
<option name="processCode" value="true" />
<option name="processLiterals" value="true" />
<option name="processComments" value="true" />
</inspection_tool>
</profile>
</component>
| ivy/.idea/inspectionProfiles/Project_Default.xml/0 | {
"file_path": "ivy/.idea/inspectionProfiles/Project_Default.xml",
"repo_id": "ivy",
"token_count": 3940
} | 0 |
include requirements/requirements.txt
include ivy/compiler/utils/*.so
include ivy/compiler/*.so
include ivy/compiler/*.py
include binaries.json
include available_configs.json
include wrappers.json
| ivy/MANIFEST.in/0 | {
"file_path": "ivy/MANIFEST.in",
"repo_id": "ivy",
"token_count": 59
} | 1 |
# flake8: noqa
import os
import subprocess
import sys
import json
def directory_generator(req, base="/opt/fw/"):
for versions in req:
if "/" in versions:
pkg, ver = versions.split("/")
path = base + pkg + "/" + ver
if not os.path.exists(path):
install_pkg(path, pkg + "==" + ver)
else:
install_pkg(base + versions, versions)
def install_pkg(path, pkg, base="fw/"):
if pkg.split("==")[0] if "==" in pkg else pkg == "torch":
subprocess.run(
f"pip3 install --upgrade {pkg} --target {path} --default-timeout=100"
" --extra-index-url https://download.pytorch.org/whl/cpu "
" --no-cache-dir",
shell=True,
)
elif pkg.split("==")[0] == "jax":
subprocess.run(
f"pip install --upgrade {pkg} --target {path} -f"
" https://storage.googleapis.com/jax-releases/jax_releases.html "
" --no-cache-dir",
shell=True,
)
else:
subprocess.run(
f"pip3 install --upgrade {pkg} --target {path} --default-timeout=100 "
" --no-cache-dir",
shell=True,
)
def install_deps(pkgs, path_to_json, base="/opt/fw/"):
for fw in pkgs:
fw, ver = fw.split("/")
path = base + fw + "/" + ver
# check to see if this pkg has specific version dependencies
with open(path_to_json, "r") as file:
json_data = json.load(file)
for keys in json_data[fw]:
# check if key is dict
if isinstance(keys, dict):
# this is a dep with just one key
# being the dep
dep = list(keys.keys())[0]
# check if version is there in this
if ver in keys[dep].keys():
subprocess.run(
"pip3 install --upgrade"
f" {dep}=={keys[dep][ver]} --target"
f" {path} --default-timeout=100 --upgrade --no-cache-dir",
shell=True,
)
else:
subprocess.run(
f"pip3 install {dep} --target"
f" {path} --default-timeout=100 --no-cache-dir",
shell=True,
)
else:
subprocess.run(
"pip3 install "
f" {keys} {f'-f https://data.pyg.org/whl/torch-{ver}%2Bcpu.html' if keys=='torch-scatter' else ''} --target"
f" {path} --default-timeout=100 --no-cache-dir",
shell=True,
)
if __name__ == "__main__":
arg_lis = sys.argv
json_path = os.path.join( # path to the json file storing version specific deps
os.path.dirname(os.path.realpath(sys.argv[0])),
"requirement_mappings_multiversion.json",
)
directory_generator(arg_lis[1:])
install_deps(arg_lis[1:], json_path)
| ivy/docker/multiversion_framework_directory.py/0 | {
"file_path": "ivy/docker/multiversion_framework_directory.py",
"repo_id": "ivy",
"token_count": 1763
} | 2 |
Contributing
============
.. _`issues`: https://github.com/unifyai/ivy/issues
.. _`pull-requests`: https://github.com/unifyai/ivy/pulls
We **strongly** welcome contributions and/or any form of engagement from absolutely anyone in the community, regardless of skill-level!
Whether you're a veteran developer 🥷 or a total beginner 🤷, everyone is welcome to create `issues`_ and create `pull-requests`_.
If you're new to any aspects of open-source development, we'll guide you through the process.
We want our ML unification journey to be as inclusive as possible, this is all only possible with a big team effort, and all are totally welcome on board for our journey! 🙂
The contributor guide is split into the sections below, it's best to go from start to finish, but you can also dive in at any stage! We're excited for you to get involved! 🦾
| (a) `Setting Up <contributing/setting_up.rst>`_
| Building the right environment 🏛️
|
| (b) `The Basics <contributing/the_basics.rst>`_
| Managing your fork 🇾, creating issues ⭕, and creating pull-requests ⬆️
|
| (c) `Open Tasks <contributing/open_tasks.rst>`_
| See where you can help us out! 🙋
|
| (d) `Contributor Rewards <contributing/contributor_rewards.rst>`_
| Check out our contributor badges and achievements! 🏅
|
| (e) `Contributor Program <contributing/volunteer_program.rst>`_
| How to climb up the ranks in our Contributor program 🧗
|
| (f) `Building the Docs <contributing/building_the_docs.rst>`_
| How to build the documentation locally 🏗️
|
| (g) `Deep Dive <deep_dive.rst>`_
| Take a deep dive into the codebase 🤿
|
| (h) `Helpful Resources <contributing/helpful_resources.rst>`_
| Resources you would find useful when learning Ivy 📖
|
| (i) `Error Handling <contributing/error_handling.rst>`_
| Common errors you will be facing contributing to Ivy ❌
.. toctree::
:hidden:
:maxdepth: -1
:caption: Contributing
contributing/setting_up.rst
contributing/the_basics.rst
contributing/building_the_docs.rst
Deep Dive <https://unify.ai/docs/ivy/overview/deep_dive.html>
contributing/open_tasks.rst
contributing/volunteer_program.rst
contributing/contributor_rewards.rst
contributing/helpful_resources.rst
contributing/error_handling.rst
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/HF-ZLF23g38" class="video" allowfullscreen="true">
</iframe>
| ivy/docs/overview/contributing.rst/0 | {
"file_path": "ivy/docs/overview/contributing.rst",
"repo_id": "ivy",
"token_count": 811
} | 3 |
Data Types
==========
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`backend setting`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/backend_handler.py#L204
.. _`infer_dtype`: https://github.com/unifyai/ivy/blob/1eb841cdf595e2bb269fce084bd50fb79ce01a69/ivy/func_wrapper.py#L249
.. _`import time`: https://github.com/unifyai/ivy/blob/9c2eb725387152d721040d8638c8f898541a9da4/ivy/__init__.py#L225
.. _`ivy.Dtype`: https://github.com/unifyai/ivy/blob/48c70bce7ff703d817e130a17f63f02209be08ec/ivy/__init__.py#L65
.. _`empty class`: https://github.com/unifyai/ivy/blob/9c2eb725387152d721040d8638c8f898541a9da4/ivy/__init__.py#L38
.. _`also specified`: https://github.com/unifyai/ivy/blob/9c2eb725387152d721040d8638c8f898541a9da4/ivy/__init__.py#L241
.. _`tuples`: https://github.com/unifyai/ivy/blob/9c2eb725387152d721040d8638c8f898541a9da4/ivy/__init__.py#L256
.. _`valid tuples`: https://github.com/unifyai/ivy/blob/9c2eb725387152d721040d8638c8f898541a9da4/ivy/__init__.py#L303
.. _`invalid tuples`: https://github.com/unifyai/ivy/blob/9c2eb725387152d721040d8638c8f898541a9da4/ivy/__init__.py#L309
.. _`data type class`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/functional/backends/torch/__init__.py#L14
.. _`true native data types`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/functional/backends/torch/__init__.py#L16
.. _`valid data types`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/functional/backends/torch/__init__.py#L29
.. _`invalid data types`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/functional/backends/torch/__init__.py#L56
.. _`original definition`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/__init__.py#L225
.. _`new definition`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/functional/backends/torch/__init__.py#L16
.. _`handled`: https://github.com/unifyai/ivy/blob/a594075390532d2796a6b649785b93532aee5c9a/ivy/backend_handler.py#L194
.. _`data_type.py`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py
.. _`ivy.can_cast`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L246
.. _`ivy.default_dtype`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L879
.. _`ivy.set_default_dtype`: https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L1555
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`data type thread`: https://discord.com/channels/799879767196958751/1190234670806351892
The data types supported by Ivy are as follows:
* int8
* int16
* int32
* int64
* uint8
* uint16
* uint32
* uint64
* bfloat16
* float16
* float32
* float64
* bool
* complex64
* complex128
The supported data types are all defined at `import time`_, with each of these set as an `ivy.Dtype`_ instance.
The :class:`ivy.Dtype` class derives from :class:`str`, and has simple logic in the constructor to verify that the string formatting is correct.
All data types can be queried as attributes of the :mod:`ivy` namespace, such as ``ivy.float32`` etc.
In addition, *native* data types are `also specified`_ at import time.
Likewise, these are all *initially* set as `ivy.Dtype`_ instances.
There is also an :class:`ivy.NativeDtype` class defined, but this is initially set as an `empty class`_.
The following `tuples`_ are also defined: ``all_dtypes``, ``all_numeric_dtypes``, ``all_int_dtypes``, ``all_float_dtypes``.
These each contain all possible data types which fall into the corresponding category.
Each of these tuples is also replicated in a new set of four `valid tuples`_ and a set of four `invalid tuples`_.
When no backend is set, all data types are assumed to be valid, and so the invalid tuples are all empty, and the valid tuples are set as equal to the original four *"all"* tuples.
However, when a backend is set, then some of these are updated.
Firstly, the :class:`ivy.NativeDtype` is replaced with the backend-specific `data type class`_.
Secondly, each of the native data types are replaced with the `true native data types`_.
Thirdly, the `valid data types`_ are updated.
Finally, the `invalid data types`_ are updated.
This leaves each of the data types unmodified, for example ``ivy.float32`` will still reference the `original definition`_ in :mod:`ivy/ivy/__init__.py`,
whereas ``ivy.native_float32`` will now reference the `new definition`_ in :mod:`/ivy/functional/backends/backend/__init__.py`.
The tuples ``all_dtypes``, ``all_numeric_dtypes``, ``all_int_dtypes`` and ``all_float_dtypes`` are also left unmodified.
Importantly, we must ensure that unsupported data types are removed from the :mod:`ivy` namespace.
For example, torch supports ``uint8``, but does not support ``uint16``, ``uint32`` or ``uint64``.
Therefore, after setting a torch backend via :code:`ivy.set_backend('torch')`, we should no longer be able to access ``ivy.uint16``.
This is `handled`_ in :func:`ivy.set_backend`.
Data Type Module
----------------
The `data_type.py`_ module provides a variety of functions for working with data types.
A few examples include :func:`ivy.astype` which copies an array to a specified data type, :func:`ivy.broadcast_to` which broadcasts an array to a specified shape, and :func:`ivy.result_type` which returns the dtype that results from applying the type promotion rules to the arguments.
Many functions in the :mod:`data_type.py` module are *convenience* functions, which means that they do not directly modify arrays, as explained in the `Function Types <function_types.rst>`_ section.
For example, the following are all convenience functions:
`ivy.can_cast`_, which determines if one data type can be cast to another data type according to type-promotion rules, `ivy.dtype <https://github.com/unifyai/ivy/blob/8482eb3fcadd0721f339a1a55c3f3b9f5c86d8ba/ivy/functional/ivy/data_type.py#L1096>`__, which gets the data type for the input array, `ivy.set_default_dtype`_, which sets the global default data dtype, and `ivy.default_dtype`_, which returns the correct data type to use.
`ivy.default_dtype`_ is arguably the most important function.
Any function in the functional API that receives a ``dtype`` argument will make use of this function, as explained below.
Data Type Promotion
-------------------
In order to ensure that the same data type is always returned when operations are performed on arrays with different data types, regardless of which backend framework is set, Ivy has it's own set of data type promotion rules and corresponding functions.
These rules build directly on top of the `rules <https://data-apis.org/array-api/latest/API_specification/type_promotion.html>`_ outlined in the `Array API Standard`_.
The rules are simple: all data type promotions in Ivy should adhere a promotion table that extends Array API Standard `promotion table <https://github.com/unifyai/ivy/blob/7a048c1ad7193bc3033a68c1c80f0dfd5d4e74df/ivy/__init__.py#L1245-L1285>`_ using this `promotion table <https://github.com/unifyai/ivy/blob/7a048c1ad7193bc3033a68c1c80f0dfd5d4e74df/ivy/__init__.py#L1290-L1354>`_, and one of two extra `promotion tables <https://github.com/unifyai/ivy/blob/7a048c1ad7193bc3033a68c1c80f0dfd5d4e74df/ivy/__init__.py#L1356-L1400>`_ depending on precision mode that will be explained in the following section.
In order to ensure adherence to this promotion table, many backend functions make use of the functions `ivy.promote_types <https://github.com/unifyai/ivy/blob/db96e50860802b2944ed9dabacd8198608699c7c/ivy/functional/ivy/data_type.py#L1804>`_, `ivy.type_promote_arrays <https://github.com/unifyai/ivy/blob/db96e50860802b2944ed9dabacd8198608699c7c/ivy/functional/ivy/data_type.py#L1940>`_, or `ivy.promote_types_of_inputs <https://github.com/unifyai/ivy/blob/db96e50860802b2944ed9dabacd8198608699c7c/ivy/functional/ivy/data_type.py#L2085>`_.
These functions: promote data types in the inputs and return the new data types, promote the data types of the arrays in the input and return new arrays, and promote the data types of the numeric or array values inputs and return new type promoted values, respectively.
For an example of how some of these functions are used, the implementations for :func:`ivy.add` in each backend framework are as follows:
JAX:
.. code-block:: python
def add(
x1: Union[float, JaxArray],
x2: Union[float, JaxArray],
/,
*,
out: Optional[JaxArray] = None,
) -> JaxArray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return jnp.add(x1, x2)
NumPy:
.. code-block:: python
@_handle_0_dim_output
def add(
x1: Union[float, np.ndarray],
x2: Union[float, np.ndarray],
/,
*,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return np.add(x1, x2, out=out)
TensorFlow:
.. code-block:: python
def add(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.add(x1, x2)
PyTorch:
.. code-block:: python
def add(
x1: Union[float, torch.Tensor],
x2: Union[float, torch.Tensor],
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return torch.add(x1, x2, out=out)
It's important to always make use of the Ivy promotion functions as opposed to backend-specific promotion functions such as :func:`jax.numpy.promote_types`, :func:`numpy.promote_types`, :func:`tf.experimental.numpy.promote_types` and :func:`torch.promote_types`, as these will generally have promotion rules which will subtly differ from one another and from Ivy's unified promotion rules.
On the other hand, each frontend framework has its own set of rules for how data types should be promoted, and their own type promoting functions :func:`promote_types_frontend_name` and :func:`promote_types_of_frontend_name_inputs` in :mod:`ivy/functional/frontends/frontend_name/__init__.py`.
We should always use these functions in any frontend implementation, to ensure we follow exactly the same promotion rules as the frontend framework uses.
It should be noted that data type promotion is only used for unifying data types of inputs to a common one for performing various mathematical operations.
Examples shown above demonstrate the usage of the ``add`` operation.
As different data types cannot be simply summed, they are promoted to the least common type, according to the presented promotion table.
This ensures that functions always return specific and expected values, independently of the specified backend.
However, data promotion is never used for increasing the accuracy or precision of computations.
This is a required condition for all operations, even if the upcasting can help to avoid numerical instabilities caused by underflow or overflow.
Assume that an algorithm is required to compute an inverse of a nearly singular matrix, that is defined in ``float32`` data type.
It is likely that this operation can produce numerical instabilities and generate ``inf`` or ``nan`` values.
Temporary upcasting the input matrix to ``float64`` for computing an inverse and then downcasting the matrix back to ``float32`` may help to produce a stable result.
However, temporary upcasting and subsequent downcasting can not be performed as this is not expected by the user.
Whenever the user defines data with a specific data type, they expect a certain memory footprint.
The user expects specific behaviour and memory constraints whenever they specify and use concrete data types, and those decisions should be respected.
Therefore, Ivy does not upcast specific values to improve the stability or precision of the computation.
Precise Mode
~~~~~~~~~~~~~~~
There are cases that arise in mixed promotion (Integer and Float, Complex and Float) that aren't covered by the Array API Standard promotion table, and depending on each use case,
the mixed promotion rules differ as observed in different frameworks, for example Tensorflow leaves integer/floating mixed promotion undefined to make behavior utterly predictable (at some cost to user convenience), while Numpy avoids precision loss at all costs even if that meant casting the arrays to wider-than-necessary dtypes
Precise Promotion Table
"""""""""""""""""""""""""
This table focuses on numerical accuracy at the cost of a higher memory footprint. A 16-bit signed or unsigned integer cannot be represented at full precision by a 16-bit float, which has only 10 bits of mantissa. Therefore, it might make sense to promote integers to floats represented by twice the number of bits. There are two disadvantages of this approach:
#. It still leaves int64 and uint64 promotion undefined, because there is no standard floating point type with enough bits of mantissa to represent their full range of values. We could relax the precision constraint and use ``float64`` as the upper bound for this case.
#. Some operations result in types that are much wider than necessary; for example mixed operations between ``uint16`` and float16 would promote all the way to ``float64``, which is not ideal.
.. code-block:: python
with ivy.PreciseMode(True):
print(ivy.promote_types("float32","int32"))
# float64
Non-Precise Promotion Table
"""""""""""""""""""""""""""""""""
The advantage of this approach is that, outside unsigned ints, it avoids all wider-than-necessary promotions: you can never get an f64 output without a 64-bit input, and you can never get an ``float32`` output without a 32-bit input: this results in convenient semantics for working on accelerators while avoiding unwanted 64-bit values. This feature of giving primacy to floating point types resembles the type promotion behavior of PyTorch.
the disadvantage of this approach is that mixed float/integer promotion is very prone to precision loss: for example, ``int64`` (with a maximum value of 9.2*10^18 can be promoted to ``float16`` (with a maximum value of 6.5*10^4, meaning most representable values will become inf, but we are fine accepting potential loss of precision (but not loss of magnitude) in mixed type promotion which satisfies most of the use cases in deep learning scenarios.
.. code-block:: python
with ivy.PreciseMode(False):
print(ivy.promote_types("float32","int32"))
# float32
Arguments in other Functions
-------------------
All ``dtype`` arguments are keyword-only.
All creation functions include the ``dtype`` argument, for specifying the data type of the created array.
Some other non-creation functions also support the ``dtype`` argument, such as :func:`ivy.prod` and :func:`ivy.sum`, but most functions do not include it.
The non-creation functions which do support it are generally functions that involve a compounding reduction across the array, which could result in overflows, and so an explicit ``dtype`` argument is useful for handling such cases.
The ``dtype`` argument is handled in the `infer_dtype`_ wrapper, for all functions which have the decorator :code:`@infer_dtype`.
This function calls `ivy.default_dtype`_ in order to determine the correct data type.
As discussed in the `Function Wrapping <function_wrapping.rst>`_ section, this is applied to all applicable functions dynamically during `backend setting`_.
Overall, `ivy.default_dtype`_ infers the data type as follows:
#. if the ``dtype`` argument is provided, use this directly
#. otherwise, if an array is present in the arguments, set ``arr`` to this array.
This will then be used to infer the data type by calling :func:`ivy.dtype` on the array
#. otherwise, if a *relevant* scalar is present in the arguments, set ``arr`` to this scalar and derive the data type from this by calling either :func:`ivy.default_int_dtype` or :func:`ivy.default_float_dtype` depending on whether the scalar is an int or float.
This will either return the globally set default int data type or globally set default float data type (settable via :func:`ivy.set_default_int_dtype` and :func:`ivy.set_default_float_dtype` respectively).
An example of a *relevant* scalar is ``start`` in the function :func:`ivy.arange`, which is used to set the starting value of the returned array.
Examples of *irrelevant* scalars which should **not** be used for determining the data type are ``axis``, ``axes``, ``dims`` etc. which must be integers, and control other configurations of the function being called, with no bearing at all on the data types used by that function.
#. otherwise, if no arrays or relevant scalars are present in the arguments, then use the global default data type, which can either be an int or float data type.
This is settable via :func:`ivy.set_default_dtype`.
For the majority of functions which defer to `infer_dtype`_ for handling the data type, these steps will have been followed and the ``dtype`` argument will be populated with the correct value before the backend-specific implementation is even entered into.
Therefore, whereas the ``dtype`` argument is listed as optional in the ivy API at :mod:`ivy/functional/ivy/category_name.py`, the argument is listed as required in the backend-specific implementations at :mod:`ivy/functional/backends/backend_name/category_name.py`.
Let's take a look at the function :func:`ivy.zeros` as an example.
The implementation in :mod:`ivy/functional/ivy/creation.py` has the following signature:
.. code-block:: python
@outputs_to_ivy_arrays
@handle_out_argument
@infer_dtype
@infer_device
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> ivy.Array:
Whereas the backend-specific implementations in :mod:`ivy/functional/backends/backend_name/statistical.py`
all list ``dtype`` as required.
Jax:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device,
) -> JaxArray:
NumPy:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: np.dtype,
device: str,
) -> np.ndarray:
TensorFlow:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: tf.DType,
device: str,
) -> Union[tf.Tensor, tf.Variable]:
PyTorch:
.. code-block:: python
def zeros(
shape: Union[int, Sequence[int]],
*,
dtype: torch.dtype,
device: torch.device,
) -> torch.Tensor:
This makes it clear that these backend-specific functions are only entered into once the correct ``dtype`` has been determined.
However, the ``dtype`` argument for functions which don't have the :code:`@infer_dtype` decorator are **not** handled by `infer_dtype`_, and so these defaults must be handled by the backend-specific implementations themselves.
One reason for not adding :code:`@infer_dtype` to a function is because it includes *relevant* scalar arguments for inferring the data type from.
`infer_dtype`_ is not able to correctly handle such cases, and so the dtype handling is delegated to the backend-specific implementations.
For example :func:`ivy.full` doesn't have the :code:`@infer_dtype` decorator even though it has a ``dtype`` argument because of the *relevant* ``fill_value`` which cannot be correctly handled by `infer_dtype`_.
The PyTorch-specific implementation is as follows:
.. code-block:: python
def full(
shape: Union[int, Sequence[int]],
fill_value: Union[int, float],
*,
dtype: Optional[Union[ivy.Dtype, torch.dtype]] = None,
device: torch.device,
) -> Tensor:
return torch.full(
shape_to_tuple(shape),
fill_value,
dtype=ivy.default_dtype(dtype=dtype, item=fill_value, as_native=True),
device=device,
)
The implementations for all other backends follow a similar pattern to this PyTorch implementation, where the ``dtype`` argument is optional and :func:`ivy.default_dtype` is called inside the backend-specific implementation.
Supported and Unsupported Data Types
------------------------------------
Some backend functions (implemented in :mod:`ivy/functional/backends/<some_backend>`) make use of the decorators :attr:`@with_supported_dtypes` or :attr:`@with_unsupported_dtypes`, which flag the data types which this particular function does and does not support respectively for the associated backend.
Only one of these decorators can be specified for any given function.
In the case of :attr:`@with_supported_dtypes` it is assumed that all unmentioned data types are unsupported, and in the case of :attr:`@with_unsupported_dtypes` it is assumed that all unmentioned data types are supported.
The decorators take two arguments, a dictionary with the unsupported dtypes mapped to the corresponding version of the backend framework and the current version of the backend framework on the user's system.
Based on that, the version specific unsupported dtypes and devices are set for the given function every time the function is called.
For Backend Functions:
.. code-block:: python
@with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, backend_version)
def expm1(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor:
x = _cast_for_unary_op(x)
return torch.expm1(x, out=out)
and for frontend functions we add the corresponding framework string as the second argument instead of the version.
For Frontend Functions:
.. code-block:: python
@with_unsupported_dtypes({"2.0.1 and below": ("float16", "bfloat16")}, "torch")
def trace(input):
if "int" in input.dtype:
input = input.astype("int64")
target_type = "int64" if "int" in input.dtype else input.dtype
return ivy.astype(ivy.trace(input), target_type)
For compositional functions, the supported and unsupported data types can then be inferred automatically using the helper functions `function_supported_dtypes <https://github.com/unifyai/ivy/blob/9e71fc2b589bf8f6b7a0762602723ac084bb5d9e/ivy/functional/ivy/data_type.py#L1370>`_ and `function_unsupported_dtypes <https://github.com/unifyai/ivy/blob/9e71fc2b589bf8f6b7a0762602723ac084bb5d9e/ivy/functional/ivy/data_type.py#L1407>`_ respectively, which traverse the abstract syntax tree of the compositional function and evaluate the relevant attributes for each primary function in the composition.
The same approach applies for most stateful methods, which are themselves compositional.
It is also possible to add supported and unsupported dtypes as a combination of both class and individual dtypes. The allowed dtype classes are: ``valid``, ``numeric``, ``float``, ``integer``, and ``unsigned``.
For example, using the decorator:
.. code-block:: python
@with_unsupported_dtypes{{"2.0.1 and below": ("unsigned", "bfloat16", "float16")}, backend_version)
would consider all the unsigned integer dtypes (``uint8``, ``uint16``, ``uint32``, ``uint64``), ``bfloat16`` and ``float16`` as unsupported for the function.
In order to get the supported and unsupported devices and dtypes for a function, the corresponding documentation of that function for that specific framework can be referred.
However, sometimes new unsupported dtypes are discovered while testing too.
So it is suggested to explore it both ways.
It should be noted that :attr:`unsupported_dtypes` is different from ``ivy.invalid_dtypes`` which consists of all the data types that every function of that particular backend does not support, and so if a certain ``dtype`` is already present in the ``ivy.invalid_dtypes`` then we should not add it to the :attr:`@with_unsupported_dtypes` decorator.
Sometimes, it might be possible to support a natively unsupported data type by either
casting to a supported data type and then casting back, or explicitly handling these
data types without deferring to a backend function at all.
An example of the former is :func:`ivy.logical_not` with a tensorflow backend:
.. code-block:: python
def logical_not(
x: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
return tf.logical_not(tf.cast(x, tf.bool))
An example of the latter is :func:`ivy.abs` with a tensorflow backend:
.. code-block:: python
def abs(
x: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if "uint" in ivy.dtype(x):
return x
else:
return tf.abs(x)
The :code:`[un]supported_dtypes_and_devices` decorators can be used for more specific cases where a certain
set of dtypes is not supported by a certain device.
.. code-block:: python
@with_unsupported_device_and_dtypes({"2.6.0 and below": {"cpu": ("int8", "int16", "uint8")}}, backend_version)
def gcd(
x1: Union[paddle.Tensor, int, list, tuple],
x2: Union[paddle.Tensor, float, list, tuple],
/,
*,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2 = promote_types_of_inputs(x1, x2)
return paddle.gcd(x1, x2)
These decorators can also be used as context managers and be applied to a block of code at once or even a module, so that the decorator is applied to all the functions within that context. For example:
.. code-block:: python
# we define this function each time we use this context manager
# so that context managers can access the globals in the
# module they are being used
def globals_getter_func(x=None):
if not x:
return globals()
else:
globals()[x[0]] = x[1]
with with_unsupported_dtypes({"0.4.11 and below": ("complex",)}, backend_version):
def f1(*args,**kwargs):
pass
def f2(*args,**kwargs):
pass
from . import activations
from . import operations
In some cases, the lack of support for a particular data type by the backend function might be more difficult to handle correctly.
For example, in many cases casting to another data type will result in a loss of precision, input range, or both.
In such cases, the best solution is to simply add the data type to the :attr:`@with_unsupported_dtypes` decorator, rather than trying to implement a long and complex patch to achieve the desired behaviour.
Some cases where a data type is not supported are very subtle.
For example, ``uint8`` is not supported for :func:`ivy.prod` with a torch backend, despite :func:`torch.prod` handling ``torch.uint8`` types in the input totally fine.
The reason for this is that the `Array API Standard`_ mandates that :func:`prod` upcasts the unsigned integer return to have the same number of bits as the default integer data type.
By default, the default integer data type in Ivy is ``int32``, and so we should return an array of type ``uint32`` despite the input arrays being of type ``uint8``.
However, torch does not support ``uint32``, and so we cannot fully adhere to the requirements of the standard for ``uint8`` inputs.
Rather than breaking this rule and returning arrays of type ``uint8`` only with a torch backend, we instead opt to remove official support entirely for this combination of data type, function, and backend framework.
This will avoid all of the potential confusion that could arise if we were to have inconsistent and unexpected outputs when using officially supported data types in Ivy.
Another important point to note is that for cases where an entire dtype series is not supported or supported. For example if `float16`, `float32` and `float64` are not supported or is supported by a framework which could be a backend or frontend framework,
then we simply identify that by simply replacing the different float dtypes with the str `float`. The same logic is applied to other dtypes such as `complex`, where we simply replace the entire dtypes with the str `complex`
An example is :func:`ivy.fmin` with a tensorflow backend:
.. code-block:: python
@with_supported_dtypes({"2.13.0 and below": ("float",)}, backend_version)
def fmin(
x1: Union[tf.Tensor, tf.Variable],
x2: Union[tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = promote_types_of_inputs(x1, x2)
x1 = tf.where(tf.math.is_nan(x1), x2, x1)
x2 = tf.where(tf.math.is_nan(x2), x1, x2)
ret = tf.experimental.numpy.minimum(x1, x2)
return ret
As seen in the above code, we simply use the str `float` instead of writing all the float dtypes that are supported
Another example is :func:`ivy.floor_divide` with a tensorflow backend:
.. code-block:: python
@with_unsupported_dtypes({"2.13.0 and below": ("complex",)}, backend_version)
def floor_divide(
x1: Union[float, tf.Tensor, tf.Variable],
x2: Union[float, tf.Tensor, tf.Variable],
/,
*,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
return tf.experimental.numpy.floor_divide(x1, x2)
As seen in the above code, we simply use the str `complex` instead of writing all the complex dtypes that are not supported
Supported and Unsupported Data Types Attributes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In addition to the unsupported / supported data types decorator, we also have the :attr:`unsupported_dtypes` and :attr:`supported_dtypes` attributes. These attributes operate in a manner similar to the attr:`@with_unsupported_dtypes` and attr:`@with_supported_dtypes` decorators.
Special Case
""""""""""""
However, the major difference between the attributes and the decorators is that the attributes are set and assigned in the ivy function itself :mod:`ivy/functional/ivy/<ivy_functional_API>` ,
while the decorators are used within the frontend :mod:`ivy/functional/frontends/<some_frontend>` and backend :mod:`ivy/functional/backends/<some_backend>` to identify the supported or unsupported data types, depending on the use case.
The attributes are set for functions that don't have a specific backend implementation for each backend, where we provide the backend as one of the arguments to the attribute of the framework agnostic function (because all ivy functions are framework agnostic), which allows it to identify the supported or unsupported dtypes for each backend.
An example of an ivy function which does not have a specific backend implementation for each backend is the :attr:`einops_reduce` function. `This function <https://github.com/unifyai/ivy/blob/8516d3f12a8dfc4ec5f819789937d196c7e28566/ivy/functional/ivy/general.py#L1964>`_ , makes use of a third-party library :attr:`einops` which has its own backend-agnostic implementations.
The :attr:`unsupported_dtypes` and :attr:`supported_dtypes` attributes take two arguments, a dictionary with the unsupported dtypes mapped to the corresponding backend framework. Based on that, the specific unsupported dtypes are set for the given function every time the function is called.
For example, we use the :attr:`unsupported_dtypes` attribute for the :attr:`einops_reduce` function within the ivy functional API as shown below:
.. code-block:: python
einops_reduce.unsupported_dtypes = {
"torch": ("float16",),
"tensorflow": ("complex",),
"paddle": ("complex", "uint8", "int8", "int16", "float16"),
}
With the above approach, we ensure that anytime the backend is set to torch, the :attr:`einops_reduce` function does not support float16, likewise, complex dtypes are not supported with a tensorflow backend and
complex, uint8, int8, int16, float16 are not supported with a paddle backend.
Backend Data Type Bugs
----------------------
In some cases, the lack of support might just be a bug which will likely be resolved in a future release of the framework.
In these cases, as well as adding to the :attr:`unsupported_dtypes` attribute, we should also add a :code:`#ToDo` comment in the implementation, explaining that the support of the data type will be added as soon as the bug is fixed, with a link to an associated open issue in the framework repos included in the comment.
For example, the following code throws an error when ``dtype`` is ``torch.int32`` but not when it is ``torch.int64``.
This is tested with torch version ``1.12.1``.
This is a `known bug <https://github.com/pytorch/pytorch/issues/84530>`_:
.. code-block:: python
dtype = torch.int32 # or torch.int64
x = torch.randint(1, 10, ([1, 2, 3]), dtype=dtype)
torch.tensordot(x, x, dims=([0], [0]))
Despite ``torch.int32`` working correctly with :func:`torch.tensordot` in the vast majority of cases, our solution is to still add :code:`"int32"` into the :attr:`unsupported_dtypes` attribute, which will prevent the unit tests from failing in the CI.
We also add the following comment above the :attr:`unsupported_dtypes` attribute:
.. code-block:: python
# ToDo: re-add int32 support once
# (https://github.com/pytorch/pytorch/issues/84530) is fixed
@with_unsupported_dtypes({"2.0.1 and below": ("int32",)}, backend_version)
Similarly, the following code throws an error for torch version ``1.11.0``
but not ``1.12.1``.
.. code-block:: python
x = torch.tensor([0], dtype=torch.float32)
torch.cumsum(x, axis=0, dtype=torch.bfloat16)
Writing short-lived patches for these temporary issues would add unwarranted complexity to the backend implementations, and introduce the risk of forgetting about the patch, needlessly bloating the codebase with redundant code.
In such cases, we can explicitly flag which versions support which data types like so:
.. code-block:: python
@with_unsupported_dtypes(
{"2.0.1 and below": ("uint8", "bfloat16", "float16"), "1.12.1": ()}, backend_version
)
def cumsum(
x: torch.Tensor,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
*,
dtype: Optional[torch.dtype] = None,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
In the above example the :code:`torch.cumsum` function undergoes changes in the unsupported dtypes from one version to another.
Starting from version :code:`1.12.1` it doesn't have any unsupported dtypes.
The decorator assigns the version specific unsupported dtypes to the function and if the current version is not found in the dictionary, then it defaults to the behaviour of the last known version.
The same workflow has been implemented for :code:`supported_dtypes`, :code:`unsupported_devices` and :code:`supported_devices`.
The slight downside of this approach is that there is less data type coverage for each version of each backend, but taking responsibility for patching this support for all versions would substantially inflate the implementational requirements for ivy, and so we have decided to opt out of this responsibility!
Data Type Casting Modes
-----------------------
As discussed earlier, many backend functions have a set of unsupported dtypes which are otherwise supported by the
backend itself. This raises a question that whether we should support these dtypes by casting them to some other but close dtype. We avoid manually casting unsupported dtypes
for most of the part as this could be seen as undesirable behavior to some of users. This is where we have various dtype casting modes so as to give the users an option to automatically cast unsupported dtype operations to a supported and a nearly same dtype.
There are currently four modes that accomplish this.
1. :code:`upcast_data_types`
2. :code:`downcast_data_types`
3. :code:`crosscast_data_types`
4. :code:`cast_data_types`
:code:`upcast_data_types` mode casts the unsupported dtype encountered to the next highest supported dtype in the same
dtype group, i.e, if the unsupported dtype encountered is :code:`uint8` , then this mode will try to upcast it to the next available supported :code:`uint` dtype. If no
higher `uint` dtype is available, then there won't be any upcasting performed. You can set this mode by calling :code:`ivy.upcast_data_types()` with an optional :code:`val` keyword argument that defaults to :code:`True`.
Similarly, :code:`downcast_data_dtypes` tries to downcast to the next lower supported dtype in the same dtype group. No casting is performed if no lower dtype is found in the same group.
It can also be set by calling :code:`ivy.downcast_data_types()` with the optional :code:`val` keyword that defaults to boolean value :code:`True`.
:code:`crosscast_data_types` is for cases when a function doesn't support :code:`int` dtypes, but supports :code:`float` and vice-versa. In such cases,
we cast to the default supported :code:`float` dtype if it's the unsupported integer case or we cast to the default supported :code:`int` dtype if it's the unsupported :code:`float` case.
The :code:`cast_data_types` mode is the combination of all the three modes that we discussed till now. It works its way from crosscasting to upcasting and finally to downcasting to provide support
for any unsupported dtype that is encountered by the functions.
This is the unsupported dtypes for :code:`exmp1`. It doesn't support :code:`float16`. We will see how we can
still pass :code:`float16` arrays and watch it pass for different modes.
Example of Upcasting mode :
.. code-block:: python
@with_unsupported_dtypes({"2.0.1 and below": ("float16", "complex")}, backend_version)
@handle_numpy_arrays_in_specific_backend
def expm1(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor:
x = _cast_for_unary_op(x)
return torch.expm1(x, out=out)
The function :code:`expm1` has :code:`float16` as one of the unsupported dtypes, for the version :code:`2.0.1` which
is being used for execution at the time of writing this. We will see how cating modes handles this.
.. code-block:: python
import ivy
ivy.set_backend('torch')
ret = ivy.expm1(ivy.array([1], dtype='float16')) # raises exception
ivy.upcast_data_types()
ret = ivy.expm1(ivy.array([1], dtype='float16')) # doesn't raise exception
Example of Downcasting mode :
.. code-block:: python
import ivy
ivy.set_backend('torch')
try:
ret = ivy.expm1(ivy.array([1], dtype='float16')) # raises exception
ivy.downcast_data_types()
ret = ivy.expm1(ivy.array([1], dtype='float16')) # doesn't raise exception
Example of Mixed casting mode :
.. code-block:: python
import ivy
ivy.set_backend('torch')
ret = ivy.expm1(ivy.array([1], dtype='float16')) # raises exception
ivy.cast_data_types()
ret = ivy.expm1(ivy.array([1], dtype='float16')) # doesn't raise exception
Example of Cross casting mode :
.. code-block:: python
@with_unsupported_dtypes({"2.0.1 and below": ("float",)}, backend_version)
@handle_numpy_arrays_in_specific_backend
def lcm(
x1: torch.Tensor,
x2: torch.Tensor,
/,
*,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
x1, x2 = promote_types_of_inputs(x1, x2)
return torch.lcm(x1, x2, out=out)
This function doesn't support any of the :code:`float` dtypes, so we will see how cross casting mode can
enable :code:`float` dtypes to be passed here too.
.. code-block:: python
import ivy
ivy.set_backend('torch')
ret = ivy.lcm(ivy.array([1], dtype='float16'),ivy.array([1], dtype='float16')) # raises exception
ivy.crosscast_data_types()
ret = ivy.lcm(ivy.array([1], dtype='float16'),ivy.array([1], dtype='float16')) # doesn't raise exception
Since all :code:`float` dtypes are not supported by the :code:`lcm` function in :code:`torch`, it is
casted to the default integer dtype , i.e :code:`int32`.
While, casting modes can handle a lot of cases, it doesn't guarantee 100% support for the unsupported dtypes.
In cases where there is no other supported dtype available to cast to, casting mode won't work and the function
would throw the usual error. Since casting modes simply tries to cast an array or dtype to a different one that the
given function supports, it is not supposed to provide optimal performance or precision, and hence should be avoided
if these are the prime concerns of the user.
Together with these modes we provide some level of flexibility to users when they encounter functions that don't support a dtype which is otherwise supported by the backend. However, it should
be well understood that this may lead to loss of precision and/or an increase in memory consumption.
Superset Data Type Support
--------------------------
As explained in the superset section of the Deep Dive, we generally go for the superset of behaviour for all Ivy functions, and data type support is no exception.
Some backends like tensorflow do not support integer array inputs for certain functions.
For example :func:`tensorflow.cos` only supports non-integer values.
However, backends like torch and JAX support integer arrays as inputs.
To ensure that integer types are supported in Ivy when a tensorflow backend is set, we simply promote any integer array passed to the function to the default float dtype.
As with all superset design decisions, this behavior makes it much easier to support all frameworks in our frontends, without the need for lots of extra logic for handling integer array inputs for the frameworks which support it natively.
**Round Up**
This should have hopefully given you a good feel for data types, and how these are handled in Ivy.
If you have any questions, please feel free to reach out on `discord`_ in the `data types thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/2qOBzQdLXn4" class="video">
</iframe>
| ivy/docs/overview/deep_dive/data_types.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/data_types.rst",
"repo_id": "ivy",
"token_count": 13826
} | 4 |
Navigating the Code
===================
.. _`Array API Standard`: https://data-apis.org/array-api/latest/
.. _`repo`: https://github.com/unifyai/ivy
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`navigating the code thread`: https://discord.com/channels/799879767196958751/1189905165373935616
.. _`Array API Standard convention`: https://data-apis.org/array-api/2021.12/API_specification/array_object.html#api-specification-array-object--page-root
.. _`flake8`: https://flake8.pycqa.org/en/latest/index.html
Categorization
--------------
Ivy uses the following categories taken from the `Array API Standard`_:
* `constants <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/constants.py>`_
* `creation <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/creation.py>`_
* `data_type <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/data_type.py>`_
* `elementwise <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/elementwise.py>`_
* `linear_algebra <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/linear_algebra.py>`_
* `manipulation <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/manipulation.py>`_
* `searching <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/searching.py>`_
* `set <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/set.py>`_
* `sorting <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/sorting.py>`_
* `statistical <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/statistical.py>`_
* `utility <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/utility.py>`_
In addition to these, we also add the following categories, used for additional functions in Ivy that are not in the `Array API Standard`_:
* `activations <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/activations.py>`_
* `compilation <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/compilation.py>`_
* `device <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/device.py>`_
* `general <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/general.py>`_
* `gradients <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/gradients.py>`_
* `layers <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/layers.py>`_
* `losses <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/losses.py>`_
* `meta <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/meta.py>`_
* `nest <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/nest.py>`_
* `norms <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/norms.py>`_
* `random <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/random.py>`_
Some functions that you're considering adding might overlap several of these categorizations, and in such cases you should look at the other functions included in each file, and use your best judgement for which categorization is most suitable.
We can always suggest a more suitable location when reviewing your pull request if needed 🙂
Submodule Design
----------------
Ivy is designed so that all methods are called directly from the :mod:`ivy` namespace, such as :func:`ivy.matmul`, and not :func:`ivy.some_namespace.matmul`.
Therefore, inside any of the folders :mod:`ivy.functional.ivy`, :mod:`ivy.functional.backends.some_backend`, :mod:`ivy.functional.backends.another_backend` the functions can be moved to different files or folders without breaking anything at all.
This makes it very simple to refactor and re-organize parts of the code structure in an ongoing manner.
The :code:`__init__.py` inside each of the subfolders are very similar, importing each function via :code:`from .file_name import *` and also importing each file as a submodule via :code:`from . import file_name`.
For example, an extract from `ivy/ivy/functional/ivy/__init__.py <https://github.com/unifyai/ivy/blob/40836963a8edfe23f00a375b63bbb5c878bfbaac/ivy/functional/ivy/__init__.py>`_ is given below:
.. code-block:: python
from . import elementwise
from .elementwise import *
from . import general
from .general import *
# etc.
Ivy API
-------
All function signatures for the Ivy API are defined in the :mod:`ivy.functional.ivy` submodule.
Functions written here look something like the following, (explained in much more detail in the following sections):
.. code-block:: python
def my_func(
x: Union[ivy.Array, ivy.NativeArray],
/,
axes: Union[int, Sequence[int]],
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None
) -> ivy.Array:
"""
Explanation of the function.
.. note::
This is an important note.
**Special Cases**
For this particular case,
- If ``x`` is ``NaN``, do something
- If ``y`` is ``-0``, do something else
- etc.
Parameters
----------
x
input array. Should have a numeric data type.
axes
the axes along which to perform the op.
dtype
array data type.
device
the device on which to place the new array.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array.
Examples
--------
Some examples go here
"""
return ivy.current_backend(x).my_func(x, axes, dtype=dtype, device=device, out=out)
We follow the `Array API Standard convention`_ about positional and keyword arguments.
* Positional parameters must be positional-only parameters.
Positional-only parameters have no externally-usable name.
When a method accepting positional-only parameters is called, positional arguments are mapped to these parameters based solely on their order.
* Optional parameters must be keyword-only arguments.
This convention makes it easier for us to modify functions in the future.
Keyword-only parameters will mandate the use of argument names when calling functions, and this will increase our flexibility for extending function behaviour in future releases without breaking forward compatibility.
Similar arguments can be kept together in the argument list, rather than us needing to add these at the very end to ensure positional argument behaviour remains the same.
The :code:`dtype`, :code:`device` and :code:`out` arguments are always keyword-only.
Arrays always have a type hint :code:`Union[ivy.Array, ivy.NativeArray]` in the input and :class:`ivy.Array` in the output.
All functions which produce a single array include the :code:`out` argument.
The reasons for each of these features are explained in the following sections.
Backend API
-----------
Code in the backend submodules such as :mod:`ivy.functional.backends.torch` should then look something like:
.. code-block:: python
def my_func(
x: torch.Tensor,
/,
axes: Union[int, Sequence[int]],
*,
dtype: torch.dtype,
device: torch.device,
out: Optional[torch.Tensor] = None
) -> torch.Tensor:
return torch.function_name(x, axes, dtype, device, out)
The :code:`dtype`, :code:`device` and :code:`out` arguments are again all keyword-only, but :code:`dtype` and :code:`device` are now required arguments, rather than optional as they were in the Ivy API.
All arrays also now have the same type hint :class:`torch.Tensor`, rather than :code:`Union[ivy.Array, ivy.NativeArray]` in the input and :class:`ivy.Array` in the output.
The backend methods also should not add a docstring.
Again, the reasons for these features are explained in the following sections.
Submodule Helper Functions
--------------------------
At times, helper functions specific to the submodule are required to:
* keep the code clean and readable
* be imported in their respective backend implementations
To have a better idea on this, let's look at an example!
**Helper in Ivy**
.. code-block:: python
# in ivy/utils/assertions.py
def check_fill_value_and_dtype_are_compatible(fill_value, dtype):
if (
not (
(ivy.is_int_dtype(dtype) or ivy.is_uint_dtype(dtype))
and isinstance(fill_value, int)
)
and not (
ivy.is_complex_dtype(dtype) and isinstance(fill_value, (float, complex))
)
and not (
ivy.is_float_dtype(dtype)
and isinstance(fill_value, (float, np.float32))
or isinstance(fill_value, bool)
)
):
raise ivy.utils.exceptions.IvyException(
f"the fill_value: {fill_value} and data type: {dtype} are not compatible"
)
In the :func:`full_like` function in :mod:`creation.py`, the types of :code:`fill_value` and :code:`dtype` has to be verified to avoid errors.
This check has to be applied to all backends, which means the related code is common and identical.
In this case, we can extract the code to be a helper function on its own, placed in its related submodule (:mod:`creation.py` here).
In this example, the helper function is named as :func:`check_fill_value_and_dtype_are_compatible`.
Then, we import this submodule-specific helper function to the respective backends, where examples for each backend is shown below.
**Jax**
.. code-block:: python
# in ivy/functional/backends/jax/creation.py
def full_like(
x: JaxArray,
/,
fill_value: Number,
*,
dtype: jnp.dtype,
device: jaxlib.xla_extension.Device,
out: Optional[JaxArray] = None,
) -> JaxArray:
ivy.utils.assertions.check_fill_value_and_dtype_are_compatible(fill_value, dtype)
return _to_device(
jnp.full_like(x, fill_value, dtype=dtype),
device=device,
)
**NumPy**
.. code-block:: python
# in ivy/functional/backends/numpy/creation.py
def full_like(
x: np.ndarray,
/,
fill_value: Number,
*,
dtype: np.dtype,
device: str,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
ivy.utils.assertions.check_fill_value_and_dtype_are_compatible(fill_value, dtype)
return _to_device(np.full_like(x, fill_value, dtype=dtype), device=device)
**TensorFlow**
.. code-block:: python
# in ivy/functional/backends/tensorflow/creation.py
def full_like(
x: Union[tf.Tensor, tf.Variable],
/,
fill_value: Number,
*,
dtype: tf.DType,
device: str,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
ivy.utils.assertions.check_fill_value_and_dtype_are_compatible(fill_value, dtype)
with tf.device(device):
return tf.experimental.numpy.full_like(x, fill_value, dtype=dtype)
.. note::
We shouldn't be enabling numpy behaviour in tensorflow as it leads to issues with the bfloat16 datatype in tensorflow implementations
**Torch**
.. code-block:: python
# in ivy/functional/backends/torch/creation.py
def full_like(
x: torch.Tensor,
/,
fill_value: Number,
*,
dtype: torch.dtype,
device: torch.device,
out: Optional[torch.Tensor] = None,
) -> torch.Tensor:
ivy.utils.assertions.check_fill_value_and_dtype_are_compatible(fill_value, dtype)
return torch.full_like(x, fill_value, dtype=dtype, device=device)
Version Unpinning
-----------------
At any point in time, Ivy's development will be predominantly focused around the latest pypi version (and all prior versions) for each of the backend frameworks.
Earlier we had our versions pinned for each framework to provide stability but later concluded that by unpinnning the versions we would be able to account for the latest breaking changes if any and support the latest version of the framework.
Any prior version's compatibility would be tested by our multiversion testing pipeline, thus keeping us ahead and in light of the latest changes.
This helps to prevent our work from culminating over a fixed version while strides are being made in the said frameworks. Multiversion testing ensures the backward compatibility of the code while this approach ensures we support the latest changes too.
**Round Up**
This should have hopefully given you a good feel for how to navigate the Ivy codebase.
If you have any questions, please feel free to reach out on `discord`_ in the `navigating the code thread`_!
**Video**
.. raw:: html
<iframe width="420" height="315" allow="fullscreen;"
src="https://www.youtube.com/embed/67UYuLcAKbY" class="video">
</iframe>
| ivy/docs/overview/deep_dive/navigating_the_code.rst/0 | {
"file_path": "ivy/docs/overview/deep_dive/navigating_the_code.rst",
"repo_id": "ivy",
"token_count": 5276
} | 5 |
.. _`RWorks What does Ivy Add?`:
What does Ivy Add?
==================
.. _`Array API Standard`: https://data-apis.org/array-api
.. _`EagerPy`: https://eagerpy.jonasrauber.de/
.. _`TensorLy`: http://tensorly.org/
.. _`Thinc`: https://thinc.ai/
.. _`NeuroPod`: https://neuropod.ai/
.. _`Keras`: https://keras.io/
.. _`TensorFlow`: https://www.tensorflow.org/
.. _`torch.fx`: https://pytorch.org/docs/stable/fx.html
.. _`ONNX`: https://onnx.ai/
.. _`PyTorch`: https://pytorch.org/
.. _`JAX`: https://jax.readthedocs.io/
.. _`MLIR`: https://mlir.llvm.org/
.. _`Quansight`: https://quansight.com/
.. _`OctoML`: https://octoml.ai/
.. _`Modular`: https://www.modular.com/
.. _`Apache TVM`: https://tvm.apache.org/
.. _`discord`: https://discord.gg/sXyFF8tDtm
.. _`Flux`: https://fluxml.ai/
.. _`Julia`: https://julialang.org/
API Standards
-------------
Ivy fully adheres to the `Array API Standard`_, and we are strongly aligned with their unification vision.
Ivy is entirely complimentary to the standard, implementing all of the functions defined by the standard, and populating each of them with implementations supporting a variety of frameworks.
Further into the future, we have quite lofty ambitions for Ivy to extend beyond the Python realm, and to help build the bedrock of a more general, language-agnostic, purely mathematical standardized Array API.
Given that the function signatures at the Array API level are very much mathematically bound, it is certainly possible to define this purely mathematical, language-agnostic Array API, using the Array API Standard as a starting point.
In this light, the current release of Ivy would simply be the "Python backend" of Ivy, which itself of course would contain the same framework-specific backends itself, but just pushed one level further down the backend hierarchy.
In the future, we hope that it will be possible to use this mathematical API as an intermediate representation which would enable transpilations between frameworks in *different* languages.
For example, transpiling from `PyTorch`_ to the `Flux`_ framework written in `Julia`_ would be a great feature.
However, for the time being, we are focusing exclusively on Python, in order to mitigate the risk of "biting off more than we can chew"!
Wrapper Frameworks
------------------
Ivy is itself a Python Wrapper Framework.
The biggest difference between Ivy and all others listed in the `Wrapper Frameworks <wrapper_frameworks.rst>`_ section is that Ivy supports transpilations between frameworks, while all other frameworks only enable the creation of entirely new code which itself is framework-agnostic.
There are also other more subtle differences.
For example, Ivy includes both a low level fully functional API and a high level stateful API, offering both low level control and high level convenience.
In contrast, `EagerPy`_ and `TensorLy`_ both only include functional APIs, `Thinc`_ only includes a high level stateful API, and `NeuroPod`_ only supports an even higher level wrapper for deployment.
Similar to Ivy, `Keras`_ did also support both functional and stateful APIs, but since version 2.4 it only supports `TensorFlow`_ as a backend.
Frameworks
----------
Ivy wraps the standalone ML frameworks in Python, and enables transpilations between the various frameworks and framework versions.
It therefore extends what is possible in any of the specific individual frameworks in isolation.
Graph Tracers
-------------
Ivy’s `Tracer <../one_liners/trace>`_ exhibits similar properties to many of the framework-specific graph tracers.
Ivy’s tracer employs function tracing for computing the graph, and uses this graph as an intermediate representation during the transpilation process.
Of all the graph tracers, Ivy’s tracer is most similar to `torch.fx`_.
This is because :code:`torch.fx` also operates entirely in Python, without deferring to lower level languages for tracing and extracting the computation graph or the intermediate representation.
The main difference is that Ivy’s tracer is fully framework-agnostic; Ivy’s tracer is able to trace graphs from any framework, while framework-specific tracers are of course bound to their particular framework.
Exchange Formats
----------------
The neural network exchange formats have particular relevance to Ivy, given their utility for sharing networks between frameworks.
For example, models can be exported to the `ONNX`_ format from `TensorFlow`_, and then the ONNX model can be loaded into `PyTorch`_.
This could be seen as a form of transpilation, which is one of the central goals of Ivy.
However, there are some important differences between Ivy’s approach and that of exchange formats.
Firstly, Ivy requires no third-party "buy in" whatsoever.
We take the initiative of wrapping the functional APIs of each framework and each framework version ourselves, such that Ivy can transpile between any framework and version without any need for the existing frameworks to put their own time and effort into supporting Ivy.
Additionally, Ivy also enables transpilations for training, not only for deployment.
For example, Ivy enables users to fine-tune or retrain `JAX`_ models using their `PyTorch`_ training pipeline, something which the exchange formats do not enable.
Finally, Ivy exhaustively supports the full range of array processing functions available in all frameworks, which again the exchange formats do not do.
This makes it much more broadly applicable to a wider range of applications, spanning from cutting edge deep learning to more conventional machine learning, general numerical computing, and data analytics.
Compiler Infrastructure
-----------------------
Compiler infrastructure is essential in order to enable arbitrary frameworks to support arbitrary hardware targets.
`MLIR`_, for example, has hugely simplified `TensorFlow`_'s workflow for supporting various hardware vendors in a scalable manner, with minimal code duplication.
However, while infrastructure such as MLIR at the compiler level is essential for framework developers, in its current form it cannot easily be used to guide the creation of tools that enable code transpilations between the user facing functions higher up the ML stack.
The intermediate representations used by the compiler infrastructure sit further down the stack, closer to the compilers and to the hardware.
Transpilation between frameworks requires an IR that sits between the functional APIs of the frameworks themselves, in the way that Ivy does, and this is not really the purpose of these compiler infrastructure IRs.
Ivy's unification goals are therefore complimentary to the unifying goals of the various compiler infrastructure efforts.
Multi-Vendor Compiler Frameworks
--------------------------------
Multi-vendor compiler frameworks sit a bit further down the stack still, and can optionally make use of the compiler infrastructure as scaffolding.
Likewise, these greatly simplify the complex task of enabling models from any framework to be deployed on any hardware, but they do nothing to address the challenge of running code from one framework inside another framework at training time, which is the central problem Ivy addresses.
Therefore, again these efforts are complimentary to Ivy's high-level unification goals.
Vendor-Specific APIs
--------------------
Likewise, vendor-specific APIs sit even further down the stack.
These enable custom operations to be defined for execution on the specified hardware, and they form an essential part of the stack.
However, again they do nothing to address the challenge of running code from one framework inside another framework at training time, which is the central problem Ivy addresses.
Vendor-Specific Compilers
-------------------------
Finally, vendor-specific compilers sit at the very bottom of the stack as far as our diagram is concerned (ignoring assembly languages, byte code etc.).
These are essential for converting models into instructions which the specific hardware can actually understand, and they also of course form a critical part of the stack.
However, again they do nothing to address the challenge of running code from one framework inside another framework at training time, which is the central problem Ivy addresses.
ML-Unifying Companies
---------------------
The ML-unifying companies `Quansight`_, `OctoML`_ and `Modular`_ are/were directly involved with the `Array API Standard`_, `Apache TVM`_ and `MLIR`_ respectively, as explained in the `ML-Unifying Companies <ml_unifying_companies.rst>`_ section.
For the same reasons that Ivy as a framework is complementary to these three frameworks, Ivy as a company is also complementary to these three companies.
Firstly, we are adhering to the `Array API Standard`_ defined by Quansight.
In essence, they have written the standard and we have implemented it, which is pretty much as complementary as it gets.
Similarly, OctoML makes it easy for anyone to *deploy* their model anywhere, while Ivy makes it easy for anyone to mix and match any code from any frameworks and versions to *train* their model anywhere.
Again very complementary objectives.
Finally, Modular will perhaps make it possible for developers to make changes at various levels of the stack when creating ML models using their infrastructure, and this would also be a great addition to the field.
Compared to Modular which focuses on the lower levels of the stack, Ivy instead unifies the ML frameworks at the functional API level, enabling code conversions to and from the user-facing APIs themselves, without diving into any of the lower level details.
All of these features are entirely complementary, and together would form a powerful suite of unifying tools for ML practitioners.
| ivy/docs/overview/related_work/what_does_ivy_add.rst/0 | {
"file_path": "ivy/docs/overview/related_work/what_does_ivy_add.rst",
"repo_id": "ivy",
"token_count": 2253
} | 6 |
# global
import abc
from numbers import Number
from typing import Optional, Union, List
# local
import ivy
# Array API Standard #
# -------------------#
class _ArrayWithCreation(abc.ABC):
def asarray(
self: ivy.Array,
/,
*,
copy: Optional[bool] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.asarray. This method simply
wraps the function, and so the docstring for ivy.asarray also applies
to this method with minimal changes.
Parameters
----------
self
input data, in any form that can be converted to an array. This includes
lists, lists of tuples, tuples, tuples of tuples, tuples of lists and
ndarrays.
copy
boolean indicating whether or not to copy the input array.
If True, the function must always copy.
If False, the function must never copy and must
raise a ValueError in case a copy would be necessary.
If None, the function must reuse existing memory buffer if possible
and copy otherwise. Default: ``None``.
dtype
datatype, optional. Datatype is inferred from the input data.
device
device on which to place the created array. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
An array interpretation of ``self``.
Examples
--------
With list of lists as input:
>>> ivy.asarray([[1,2],[3,4]])
ivy.array([[1, 2],
[3, 4]])
With tuple of lists as input:
>>> ivy.asarray(([1.4,5.6,5.5],[3.1,9.1,7.5]))
ivy.array([[1.39999998, 5.5999999 , 5.5 ],
[3.0999999 , 9.10000038, 7.5 ]])
With ndarray as input:
>>> x = ivy.np.ndarray(shape=(2,2), order='C')
>>> x
array([[6.90786433e-310, 6.90786433e-310],
[6.90786433e-310, 6.90786433e-310]])
>>> ivy.asarray(x)
ivy.array([[6.90786433e-310, 6.90786433e-310],
[6.90786433e-310, 6.90786433e-310]])
"""
return ivy.asarray(self._data, copy=copy, dtype=dtype, device=device, out=out)
def full_like(
self: ivy.Array,
/,
fill_value: float,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.full_like. This method
simply wraps the function, and so the docstring for ivy.full_like also
applies to this method with minimal changes.
Parameters
----------
self
input array from which to derive the output array shape.
fill_value
Scalar fill value
dtype
output array data type. If ``dtype`` is `None`, the output array data type
must be inferred from ``self``. Default: ``None``.
device
device on which to place the created array. If ``device`` is ``None``, the
output array device must be inferred from ``self``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``self`` and where every element is equal
to ``fill_value``.
Examples
--------
With :code:`int` datatype:
>>> x = ivy.array([1,2,3])
>>> fill_value = 0
>>> x.full_like(fill_value)
ivy.array([0, 0, 0])
With float datatype:
>>> fill_value = 0.000123
>>> x = ivy.array(ivy.ones(5))
>>> y = x.full_like(fill_value)
>>> print(y)
ivy.array([0.000123, 0.000123, 0.000123, 0.000123, 0.000123])
With :class:`ivy.Array` input:
>>> x = ivy.array([1, 2, 3, 4, 5, 6])
>>> fill_value = 1
>>> y = x.full_like(fill_value)
>>> print(y)
ivy.array([1, 1, 1, 1, 1, 1])
"""
return ivy.full_like(
self._data, fill_value=fill_value, dtype=dtype, device=device, out=out
)
def ones_like(
self: ivy.Array,
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.ones_like. This method
simply wraps the function, and so the docstring for ivy.ones_like also
applies to this method with minimal changes.
Parameters
----------
self
input array from which to derive the output array shape.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be inferred from ``self``. Default ``None``.
device
device on which to place the created array. If device is ``None``, the
output array device must be inferred from ``self``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``self`` and filled with ones.
"""
return ivy.ones_like(self._data, dtype=dtype, device=device, out=out)
def zeros_like(
self: ivy.Array,
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.zeros_like. This method
simply wraps the function, and so the docstring for ivy.zeros_like also
applies to this method with minimal changes.
Parameters
----------
self
input array from which to derive the output array shape.
dtype
output array data type. If ``dtype`` is ``None``, the output array data type
must be inferred from ``self``. Default: ``None``.
device
device on which to place the created array. If ``device`` is ``None``, the
output array device must be inferred from ``self``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``self`` and filled with ``zeros``.
"""
return ivy.zeros_like(self._data, dtype=dtype, device=device, out=out)
def tril(
self: ivy.Array, /, *, k: int = 0, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.tril. This method simply
wraps the function, and so the docstring for ivy.tril also applies to
this method with minimal changes.
Parameters
----------
self
input array having shape (..., M, N) and whose innermost two dimensions form
MxN matrices.
k
diagonal above which to zero elements. If k = 0, the diagonal is the main
diagonal. If k < 0, the diagonal is below the main diagonal. If k > 0, the
diagonal is above the main diagonal. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the lower triangular part(s). The returned array must
have the same shape and data type as ``self``. All elements above the
specified diagonal k must be zeroed. The returned array should be allocated
on the same device as ``self``.
"""
return ivy.tril(self._data, k=k, out=out)
def triu(
self: ivy.Array, /, *, k: int = 0, out: Optional[ivy.Array] = None
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.triu. This method simply
wraps the function, and so the docstring for ivy.triu also applies to
this method with minimal changes.
Parameters
----------
self
input array having shape (..., M, N) and whose innermost two dimensions form
MxN matrices. *,
k
diagonal below which to zero elements. If k = 0, the diagonal is the main
diagonal. If k < 0, the diagonal is below the main diagonal. If k > 0, the
diagonal is above the main diagonal. Default: ``0``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the upper triangular part(s). The returned array must
have the same shape and data type as ``self``. All elements below the
specified diagonal k must be zeroed. The returned array should be allocated
on the same device as ``self``.
"""
return ivy.triu(self._data, k=k, out=out)
def empty_like(
self: ivy.Array,
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.empty_like. This method
simply wraps the function, and so the docstring for ivy.empty_like also
applies to this method with minimal changes.
Parameters
----------
self
input array from which to derive the output array shape.
dtype
output array data type. If dtype is None, the output array data type must be
inferred from ``self``. Default: ``None``.
device
device on which to place the created array. If device is None, the output
array device must be inferred from ``self``. Default: ``None``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array having the same shape as ``self`` and containing uninitialized
data.
"""
return ivy.empty_like(self._data, dtype=dtype, device=device, out=out)
def meshgrid(
self: ivy.Array,
/,
*arrays: Union[ivy.Array, ivy.NativeArray],
sparse: bool = False,
indexing: str = "xy",
) -> List[ivy.Array]:
"""ivy.Array instance method variant of ivy.meshgrid. This method
simply wraps the function, and so the docstring for ivy.meshgrid also
applies to this method with minimal changes.
Parameters
----------
self
one-dimensional input array.
arrays
an arbitrary number of one-dimensional arrays representing grid coordinates.
Each array should have the same numeric data type.
sparse
if True, a sparse grid is returned in order to conserve memory. Default:
``False``.
indexing
Cartesian ``'xy'`` or matrix ``'ij'`` indexing of output. If provided zero
or one one-dimensional vector(s) (i.e., the zero- and one-dimensional cases,
respectively), the ``indexing`` keyword has no effect and should be ignored.
Default: ``'xy'``.
Returns
-------
ret
list of N arrays, where ``N`` is the number of provided one-dimensional
input arrays. Each returned array must have rank ``N``. For ``N``
one-dimensional arrays having lengths ``Ni = len(xi)``.
"""
return ivy.meshgrid(self, *arrays, sparse=sparse, indexing=indexing)
def from_dlpack(
self: ivy.Array,
/,
*,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.from_dlpack. This method
simply wraps the function, and so the docstring for ivy.from_dlpack
also applies to this method with minimal changes.
Parameters
----------
self
input array.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
an array containing the data in ``self``.
"""
return ivy.from_dlpack(self._data, out=out)
# Extra #
# ----- #
def copy_array(
self: ivy.Array,
/,
*,
to_ivy_array: bool = True,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.copy_array. This method
simply wraps the function, and so the docstring for ivy.copy_array also
applies to this method with minimal changes.
Parameters
----------
self
input array
to_ivy_array
boolean, if True the returned array will be an ivy.Array object otherwise
returns an ivy.NativeArray object (i.e. a torch.tensor, np.array, etc.,
depending on the backend), defaults to True.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
a copy of the input array ``x``.
"""
return ivy.copy_array(self, to_ivy_array=to_ivy_array, out=out)
def native_array(
self: ivy.Array,
/,
*,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
) -> ivy.NativeArray:
"""ivy.Array instance method variant of ivy.native_array. This method
simply wraps the function, and so the docstring for ivy.native_array
also applies to this method with minimal changes.
Parameters
----------
self
input array.
dtype
datatype, optional. Datatype is inferred from the input data.
device
device on which to place the created array. Default: ``None``.
Returns
-------
ret
A native array interpretation of ``self``.
"""
return ivy.native_array(self._data, dtype=dtype, device=device)
def one_hot(
self: ivy.Array,
depth: int,
/,
*,
on_value: Optional[Number] = None,
off_value: Optional[Number] = None,
axis: Optional[int] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.one_hot. This method simply
wraps the function, and so the docstring for ivy.one_hot also applies
to this method with minimal changes.
Parameters
----------
self
input array containing the indices for which the ones should be scattered
depth
Scalar defining the depth of the one-hot dimension.
on_value
Value to fill in output when ``indices[j] == i``. Default 1.
off_value
Value to fill in output when ``indices[j] != i``. Default 0.
axis
The axis to scatter on. The default is ``-1`` which is the last axis.
dtype
The data type of the output array. If None, the data type of the on_value is
used, or if that is None, the data type of the off_value is used. Default
float32.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
Same as x if None.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Tensor of zeros with the same shape and type as a, unless dtype provided
which overrides.
Examples
--------
With :class:`ivy.Array` inputs:
>>> x = ivy.array([3, 1])
>>> y = 5
>>> z = x.one_hot(5)
>>> print(z)
ivy.array([[0., 0., 0., 1., 0.],
... [0., 1., 0., 0., 0.]])
>>> x = ivy.array([0])
>>> y = 5
>>> ivy.one_hot(x, y)
ivy.array([[1., 0., 0., 0., 0.]])
>>> x = ivy.array([0])
>>> y = 5
>>> ivy.one_hot(x, 5, out=z)
ivy.array([[1., 0., 0., 0., 0.]])
>>> print(z)
ivy.array([[1., 0., 0., 0., 0.]])
"""
return ivy.one_hot(
self,
depth,
on_value=on_value,
off_value=off_value,
axis=axis,
dtype=dtype,
device=device,
out=out,
)
def linspace(
self: ivy.Array,
stop: Union[ivy.Array, ivy.NativeArray, float],
/,
num: int,
*,
axis: Optional[int] = None,
endpoint: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Array:
return ivy.linspace(
self,
stop,
num=num,
axis=axis,
endpoint=endpoint,
dtype=dtype,
device=device,
out=out,
)
def logspace(
self: ivy.Array,
stop: Union[ivy.Array, ivy.NativeArray, float],
/,
num: int,
*,
base: float = 10.0,
axis: int = 0,
endpoint: bool = True,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.logspace. This method
simply wraps the function, and so the docstring for ivy.logspace also
applies to this method with minimal changes.
Parameters
----------
self
First value in the range in log space. base ** start is the starting value
in the sequence. Can be an array or a float.
stop
Last value in the range in log space. base ** stop is the final value in the
sequence. Can be an array or a float.
num
Number of values to generate.
base
The base of the log space. Default is 10.0
axis
Axis along which the operation is performed. Relevant only if start or stop
are array-like. Default is 0.
endpoint
If True, stop is the last sample. Otherwise, it is not included. Default is
True.
dtype
The data type of the output tensor. If None, the dtype of on_value is used
or if that is None, the dtype of off_value is used, or if that is None,
defaults to float32. Default is None.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Default
is None.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to. Default is None.
Returns
-------
ret
Tensor of evenly-spaced values in log space.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also accepts
:class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
With float input:
>>> x = ivy.array([1, 2])
>>> y = ivy.array([4, 5])
>>> x.logspace(y, 4)
ivy.array([[1.e+01, 1.e+02],
[1.e+02, 1.e+03],
[1.e+03, 1.e+04],
[1.e+04, 1.e+05])
>>> x.logspace(y, 4, axis = 1)
ivy.array([[[1.e+01, 1.e+02, 1.e+03, 1.e+04],
[1.e+02, 1.e+03, 1.e+04, 1.e+05]]])
>>> x = ivy.array([1, 2])
>>> y = ivy.array([4]) # Broadcasting example
>>> x.logspace(y, 4)
ivy.array([[10., 100.]
[100., 464.15888336]
[1000., 2154.43469003]
[10000., 10000.]])
"""
return ivy.logspace(
self,
stop,
num=num,
base=base,
axis=axis,
endpoint=endpoint,
dtype=dtype,
device=device,
out=out,
)
| ivy/ivy/data_classes/array/creation.py/0 | {
"file_path": "ivy/ivy/data_classes/array/creation.py",
"repo_id": "ivy",
"token_count": 9816
} | 7 |
# global
import abc
from typing import Optional, Union
# local
import ivy
class _ArrayWithLossesExperimental(abc.ABC):
def l1_loss(
self: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[str] = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.l1_loss. This method simply
wraps the function, and so the docstring for ivy.l1_loss also applies
to this method with minimal changes.
Parameters
----------
self
input array containing true labels.
target
input array containing targeted labels.
reduction
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed.
``'none'``: No reduction will be applied to the output. Default: ``'mean'``.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The L1 loss between the input array and the targeticted values.
Examples
--------
>>> x = ivy.array([1.0, 2.0, 3.0])
>>> y = ivy.array([0.7, 1.8, 2.9])
>>> z = x.l1_loss(y)
>>> print(z)
ivy.array(0.20000000000000004)
"""
return ivy.l1_loss(self._data, target, reduction=reduction, out=out)
def log_poisson_loss(
self: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
compute_full_loss: bool = False,
axis: int = -1,
reduction: str = "none",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.log_poisson_loss. This
method simply wraps the function, and so the docstring for ivy.l1_loss
also applies to this method with minimal changes.
Parameters
----------
self
input array containing true labels.
target
input array containing targeted labels.
compute_full_loss
whether to compute the full loss. If false, a constant term is dropped
in favor of more efficient optimization. Default: ``False``.
axis
the axis along which to compute the log-likelihood loss. If axis is ``-1``,
the log-likelihood loss will be computed along the last dimension.
Default: ``-1``.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The binary log-likelihood loss between the given distributions.
Examples
--------
>>> x = ivy.array([0, 0, 1, 0])
>>> y = ivy.array([0.25, 0.25, 0.25, 0.25])
>>> loss = x.log_poisson_loss(y)
>>> print(loss)
ivy.array([1.28402555, 1.28402555, 1.03402555, 1.28402555])
>>> z = ivy.array([0.1, 0.1, 0.7, 0.1])
>>> loss = x.log_poisson_loss(z, reduction='mean')
>>> print(loss)
ivy.array(1.1573164)
"""
return ivy.log_poisson_loss(
self._data,
target,
compute_full_loss=compute_full_loss,
axis=axis,
reduction=reduction,
out=out,
)
def huber_loss(
self: ivy.Array,
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[str] = "mean",
delta: Optional[float] = 1.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of huber_loss. This method simply
wraps the function, and so the docstring for huber_loss also applies to
this method with minimal changes.
Parameters
----------
self
input array containing true labels.
target
input array containing targeted labels.
reduction : str, optional
The type of reduction to apply to the loss.
Possible values are "mean" (default)
and "sum".
delta
The threshold parameter that determines the point where the loss transitions
from squared error to absolute error. Default is 1.0.
out
Optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The Huber loss between the true and predicted values.
Examples
--------
>>> true = ivy.array([2, 4, 7, 1])
>>> pred = ivy.array([2.5, 3.5, 8, 0.8])
>>> loss = true.huber_loss(pred, delta=1.0)
>>> print(loss)
ivy.array([0.125, 0.125, 0.5 , 0.125])
"""
return ivy.huber_loss(
self._data, target, reduction=reduction, delta=delta, out=out
)
def smooth_l1_loss(
self: ivy.Array,
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
beta: Optional[float] = 1.0,
reduction: Optional[str] = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy. smooth_l1_loss. This
method simply wraps the function, and so the docstring for
ivy.smooth_l1_loss also applies to this method with minimal changes.
Parameters
----------
self
input array containing true labels.
target
input array containing targeted labels.
beta
A float specifying the beta value for
the smooth L1 loss. Default: 1.0.
reduction
Reduction method for the loss.
Options are 'none', 'mean', or 'sum'.
Default: 'mean'.
out
Optional output array, for writing the result to.
It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The smooth L1 loss between the given labels.
Examples
--------
>>> x = ivy.array([1, 2, 3, 4])
>>> y = ivy.array([2, 2, 2, 2])
>>> z = x.smooth_l1_loss(y, beta=0.5)
>>> print(z)
ivy.array(0.8125)
"""
return ivy.smooth_l1_loss(
self._data, target, beta=beta, reduction=reduction, out=out
)
def soft_margin_loss(
self: ivy.Array,
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[str] = "mean",
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.soft_margin_loss. This
method simply wraps the function, and so the docstring for
ivy.soft_margin_loss also applies to this method with minimal changes.
Parameters
----------
self
input array containing true labels.
target
input array containing targeted labels.
reduction
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'sum'``.
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
The soft margin loss between the true and targeticted labels.
Examples
--------
>>> x = ivy.array([1, 1, 0])
>>> y = ivy.array([0.7, 0.8, 0.2])
>>> z = x.soft_margin_loss(y)
>>> print(z)
ivy.array([0.35667497, 0.22314353, 1.60943791])
"""
return ivy.soft_margin_loss(self._data, target, reduction=reduction, out=out)
def kl_div(
self: ivy.Array,
target: Union[ivy.Array, ivy.NativeArray],
/,
*,
reduction: Optional[str] = "mean",
log_target=False,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.kl_div. This method simply
wraps the function, and so the docstring for ivy.kl_div also applies to
this method with minimal changes.
Parameters
----------
self
Array containing input probability distribution.
target
Array contaiing target probability distribution.
reduction
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'batchmean': The output will be divided by batch size.
'sum': The output will be summed.
Default: 'mean'.
out
Optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
The Kullback-Leibler divergence loss between the two input arrays.
Examples
--------
>>> input = ivy.array([0.2, 0.8], [0.5, 0.5])
>>> target = ivy.array([0.6, 0.4], [0.3, 0.7])
>>> output_array = input.kl_div(target)
>>> print(output_array)
ivy.array(0.0916)
"""
return ivy.kl_div(
self._data, target, reduction=reduction, log_target=log_target, out=out
)
def poisson_nll_loss(
self: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
*,
log_input: bool = True,
full: bool = False,
eps: float = 1e-8,
reduction: str = "mean",
) -> ivy.Array:
r"""Compute the Poisson Negative Log Likelihood Loss.
This function calculates the negative log likelihood loss
between the `input` and `target`under the assumption that
the target follows a Poisson distribution. By default, the loss
is not the exact loss, but the loss minus a constant term [log(z!)].
This omission does not affect optimization but can be significant for
relative loss comparisons. The Stirling's Approximation is used to
approximate the log factorial term when `full` is set to True.
Parameters
----------
input
Expectation of the underlying Poisson distribution.
target
Random sample from the Poisson distribution described by the input.
log_input
If `True`, the loss is computed as
:math:`exp(input) - target * input`. If `False`, the loss is computed as
:math:`input - target * log(input + eps)`. Default is `True`.
full
Whether to compute the full loss, i.e.,
to add the Stirling approximation term
:math:`target * log(target) - target + 0.5 * log(2 * pi * target)`.
Default is `False`.
eps
Small value to prevent evaluation of `log(0)` when `log_input` is `False`.
Default is 1e-8.
reduction
Specifies the reduction applied to the output.
Options are 'none', 'mean', or 'sum'.
'none': no reduction will be applied.
'mean': the output will be averaged.
'sum': the output will be summed.
Default is 'mean'.
Returns
-------
ret
An array of the same shape as `input` representing the
Poisson Negative Log Likelihood Loss.
Raises
------
ValueError
If the `input` and `target` tensors do not have the same shape.
Examples
--------
>>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64)
>>> target_tensor = ivy.array([2, 2, 2, 2], dtype=ivy.float64)
>>> loss = input_tensor.poisson_nll_loss(target_tensor, log_input=True)
>>> print(loss)
ivy.array(16.1977562)
"""
return ivy.poisson_nll_loss(
self._data,
target,
log_input=log_input,
full=full,
eps=eps,
reduction=reduction,
)
def hinge_embedding_loss(
self: Union[ivy.Array, ivy.NativeArray],
target: Union[ivy.Array, ivy.NativeArray],
*,
margin: float = 1.0,
reduction: str = "mean",
) -> ivy.Array:
r"""Measures loss from input `x` and label `y` with values 1 or -1. It
evaluates if two inputs are similar or not, often used for embedding or
semi-supervised learning.
Loss for the `n`-th sample:
.. math::
l_n = \begin{cases}
x_n, & \text{if}\; y_n = 1,\\
\max \{0, margin - x_n\}, & \text{if}\; y_n = -1,
\end{cases}
Total loss:
.. math::
\ell(x, y) = \begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
where :math:`L = \{l_1,\dots,l_N\}^\top`
Parameters
----------
input
Input tensor with dtype float.
The shape is [N, \*], where N is batch size and `\*` represents
any number of additional dimensions.
label
Label tensor containing 1 or -1 with dtype float32 or float64.
Its shape matches that of the input.
margin
Sets the hyperparameter margin. Determines the necessary input size
for hinge_embedding_loss calculations when label is -1. Inputs smaller
than the margin are minimized with hinge_embedding_loss.
Default is 1.0.
reduction
Specifies how to aggregate the loss across the batch. Options are:
- ``'none'``: Returns the unreduced loss.
- ``'mean'``: Returns the mean loss.
- ``'sum'``: Returns the summed loss.
Default is ``'mean'``.
Shape
-----
- Input: :math:`(*)` where :math:`*` means, any number of dimensions. \
The sum operation operates over all the elements.
- Target: :math:`(*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``,
then same shape as the input
Returns
-------
ret
Hinge embedding loss calculated from the input and label,
shaped based on the reduction method.
Examples
--------
>>> input_tensor = ivy.array([1, 2, 3, 4], dtype=ivy.float64)
>>> target_tensor = ivy.array([1, 1, 1, 1], dtype=ivy.float64)
>>> input_tensor.hinge_embedding_loss(target_tensor,reduction="sum")
ivy.array(10.)
>>> input_tensor = ivy.array([1, 2, 3], dtype=ivy.float64)
>>> target_tensor = ivy.array([1, -1, -1], dtype=ivy.float64)
>>> input_tensor.hinge_embedding_loss(target_tensor, margin=2.0)
ivy.array(0.33333333)
"""
return ivy.hinge_embedding_loss(
self._data,
target,
margin=margin,
reduction=reduction,
)
| ivy/ivy/data_classes/array/experimental/losses.py/0 | {
"file_path": "ivy/ivy/data_classes/array/experimental/losses.py",
"repo_id": "ivy",
"token_count": 7219
} | 8 |
# global
from typing import Optional, List, Union
import abc
# local
import ivy
# ToDo: implement all methods here as public instance methods
class _ArrayWithNorms(abc.ABC):
def layer_norm(
self: ivy.Array,
normalized_idxs: List[int],
/,
*,
scale: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
offset: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
eps: float = 1e-05,
new_std: float = 1.0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""ivy.Array instance method variant of ivy.layer_norm. This method
simply wraps the function, and so the docstring for ivy.layer_norm also
applies to this method with minimal changes.
Parameters
----------
self
Input array
normalized_idxs
Indices to apply the normalization to.
scale
Learnable gamma variables for elementwise post-multiplication,
default is ``None``.
offset
Learnable beta variables for elementwise post-addition, default is ``None``.
eps
small constant to add to the denominator. Default is ``1e-05``.
new_std
The standard deviation of the new normalized values. Default is 1.
out
optional output array, for writing the result to. It must have a shape that
the inputs broadcast to.
Returns
-------
ret
The layer after applying layer normalization.
Examples
--------
>>> x = ivy.array([[0.0976, -0.3452, 1.2740],
... [0.1047, 0.5886, 1.2732],
... [0.7696, -1.7024, -2.2518]])
>>> norm = x.layer_norm([0, 1], eps=0.001,
... new_std=1.5, scale=0.5, offset=[0.5, 0.02, 0.1])
>>> print(norm)
ivy.array([[ 0.826, -0.178, 0.981 ],
[ 0.831, 0.421, 0.981 ],
[ 1.26 , -1.05 , -1.28 ]])
"""
return ivy.layer_norm(
self,
normalized_idxs,
scale=scale,
offset=offset,
eps=eps,
new_std=new_std,
out=out,
)
| ivy/ivy/data_classes/array/norms.py/0 | {
"file_path": "ivy/ivy/data_classes/array/norms.py",
"repo_id": "ivy",
"token_count": 1135
} | 9 |
# global
from typing import Optional, Union, List, Dict, Literal
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithElementwise(ContainerBase):
@staticmethod
def _static_abs(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container: # noqa
"""ivy.Container static method variant of ivy.abs. This method simply
wraps the function, and so the docstring for ivy.abs also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the absolute value of each element in ``x``. The
returned container must have the same data type as ``x``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]),
... b=ivy.array([4.5, -5.3, -0, -2.3]))
>>> y = ivy.Container.static_abs(x)
>>> print(y)
{
a: ivy.array([0., 2.6, 3.5]),
b: ivy.array([4.5, 5.3, 0, 2.3])
}
"""
return ContainerBase.cont_multi_map_in_function(
"abs",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def abs(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.abs. This method simply
wraps the function, and so the docstring for ivy.abs also applies to
this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the absolute value of each element in ``self``. The
returned container must have the same data type as ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1.6, 2.6, -3.5]),
... b=ivy.array([4.5, -5.3, -2.3]))
>>> y = x.abs()
>>> print(y)
{
a: ivy.array([1.6, 2.6, 3.5]),
b: ivy.array([4.5, 5.3, 2.3])
}
"""
return self._static_abs(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_acosh(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cosh. This method simply
wraps the function, and so the docstring for ivy.cosh also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements each represent the area of a hyperbolic
sector. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse hyperbolic cosine of each element
in ``x``. The returned container must have a floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2., 3, 4]),
... b=ivy.array([1., 3., 10.0, 6]))
>>> y = ivy.Container.static_acosh(x)
>>> print(y)
{
a: ivy.array([0., 1.32, 1.76, 2.06]),
b: ivy.array([0., 1.76, 2.99, 2.48])
}
"""
return ContainerBase.cont_multi_map_in_function(
"acosh",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def acosh(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.acosh. This method
simply wraps the function, and so the docstring for ivy.acosh also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements each represent the area of a hyperbolic
sector. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse hyperbolic cosine of each element in
``self``. The returned container must have a floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2., 3, 4]),
... b=ivy.array([1., 3., 10.0, 6]))
>>> y = x.acosh()
>>> print(y)
{
a: ivy.array([0., 1.32, 1.76, 2.06]),
b: ivy.array([0., 1.76, 2.99, 2.48])
}
"""
return self._static_acosh(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_acos(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.acos. This method simply
wraps the function, and so the docstring for ivy.acos also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse cosine of each element in ``x``.
The returned container must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -1.]))
>>> y = ivy.Container.static_acos(x)
>>> print(y)
{
a: ivy.array([1.57, 3.14, 0.]),
b: ivy.array([0., 1.57, 3.14])
}
"""
return ContainerBase.cont_multi_map_in_function(
"acos",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_add(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
alpha: Optional[Union[int, float, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.add. This method simply
wraps the function, and so the docstring for ivy.add also applies to
this method with minimal changes.
Parameters
----------
x1
first input array or container. Should have a numeric data type.
x2
second input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`). Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
alpha
scalar multiplier for ``x2``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise sums.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([[1.1, 2.3, -3.6]])
>>> y = ivy.Container(a=ivy.array([[4.], [5.], [6.]]),
... b=ivy.array([[5.], [6.], [7.]]))
>>> z = ivy.Container.static_add(x, y)
>>> print(z)
{
a: ivy.array([[5.1, 6.3, 0.4],
[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4]]),
b: ivy.array([[6.1, 7.3, 1.4],
[7.1, 8.3, 2.4],
[8.1, 9.3, 3.4]])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_add(x, y)
>>> print(z)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([7, 9, 11])
}
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_add(x, y, alpha=2)
>>> print(z)
{
a: ivy.array([9, 12, 15]),
b: ivy.array([12, 15, 18])
}
"""
return ContainerBase.cont_multi_map_in_function(
"add",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
alpha=alpha,
out=out,
)
def acos(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.acos. This method
simply wraps the function, and so the docstring for ivy.acos also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse cosine of each element in ``self``.
The returned container must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -1.]))
>>> y = x.acos()
>>> print(y)
{
a: ivy.array([1.57, 3.14, 0.]),
b: ivy.array([0., 1.57, 3.14])
}
"""
return self._static_acos(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def add(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
alpha: Optional[Union[int, float, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.add. This method simply
wraps the function, and so the docstring for ivy.add also applies to
this method with minimal changes.
Parameters
----------
self
first input container. Should have a numeric data type.
x2
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
alpha
scalar multiplier for ``x2``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise sums.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = x.add(y)
>>> print(z)
{
a: ivy.array([5, 7, 9]),
b: ivy.array([7, 9, 11])
}
>>> z = x.add(y, alpha=3)
>>> print(z)
{
a: ivy.array([13, 17, 21]),
b: ivy.array([17, 21, 25])
}
"""
return self._static_add(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
alpha=alpha,
out=out,
)
@staticmethod
def _static_asin(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.asin. This method simply
wraps the function, and so the docstring for ivy.asin also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse sine of each element in ``x``.
The returned container must have a floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., -0.5, -1.]),
... b=ivy.array([0.1, 0.8, 2.]))
>>> y = ivy.Container.static_asin()
>>> print(y)
{
a: ivy.array([0., -0.524, -1.57]),
b: ivy.array([0.1, 0.927, nan])
}
>>> x = ivy.Container(a=ivy.array([0.4, 0.9, -0.9]),
... b=ivy.array([[4, -3, -0.2]))
>>> y = ivy.Container(a=ivy.zeros(3), b=ivy.zeros(3))
>>> ivy.Container.static_asin(out=y)
>>> print(y)
{
a: ivy.array([0.412, 1.12, -1.12]),
b: ivy.array([nan, nan, -0.201])
}
"""
return ContainerBase.cont_multi_map_in_function(
"asin",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def asin(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.asin. This method
simply wraps the function, and so the docstring for ivy.asin also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse sine of each element in ``self``.
The returned container must have a floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 0.5, 1.]),
... b=ivy.array([-4., 0.8, 2.]))
>>> y = x.asin()
>>> print(y)
{
a: ivy.array([0., 0.524, 1.57]),
b: ivy.array([nan, 0.927, nan])
}
>>> x = ivy.Container(a=ivy.array([12., 1.5, 0.]),
... b=ivy.array([-0.85, 0.6, 0.3]))
>>> y = ivy.Container(a=ivy.zeros(3), b=ivy.zeros(3))
>>> x.asin(out=y)
>>> print(y)
{
a: ivy.array([nan, nan, 0.]),
b: ivy.array([-1.02, 0.644, 0.305])
}
"""
return self._static_asin(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_asinh(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.asinh. This method simply
wraps the function, and so the docstring for ivy.asinh also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements each represent the area of a hyperbolic
sector. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse hyperbolic sine of each element
in ``x``. The returned container must have a floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.5, 0., -3.5]),
... b=ivy.array([3.4, -5.3, -0, -2.8]))
>>> y = ivy.Container.static_asinh(x)
>>> print(y)
{
a: ivy.array([1.19, 0., -1.97]),
b: ivy.array([1.94, -2.37, 0., -1.75])
}
"""
return ContainerBase.cont_multi_map_in_function(
"asinh",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def asinh(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.asinh. This method
simply wraps the function, and so the docstring for ivy.asinh also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements each represent the area of a hyperbolic
sector. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse hyperbolic sine of each element in
``self``. The returned container must have a floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1, 3.7, -5.1]),
... b=ivy.array([4.5, -2.4, -1.5]))
>>> y = x.asinh()
>>> print(y)
{
a: ivy.array([-0.881, 2.02, -2.33]),
b: ivy.array([2.21, -1.61, -1.19])
}
"""
return self._static_asinh(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_atan(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.atan. This method simply
wraps the function, and so the docstring for ivy.atan also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse tangent of each element in ``x``.
The returned container must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -6]))
>>> y = ivy.Container.static_atan(x)
>>> print(y)
{
a: ivy.array([0., -0.785, 0.785]),
b: ivy.array([0.785, 0., -1.41])
}
"""
return ContainerBase.cont_multi_map_in_function(
"atan",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def atan(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.atan. This method
simply wraps the function, and so the docstring for ivy.atan also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse tangent of each element in ``x``.
The returned container must have a floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -6]))
>>> y = x.atan()
>>> print(y)
{
a: ivy.array([0., -0.785, 0.785]),
b: ivy.array([0.785, 0., -1.41])
}
"""
return self._static_atan(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_atan2(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.atan2. This method simply
wraps the function, and so the docstring for ivy.atan2 also applies to
this method with minimal changes.
Parameters
----------
x1
first input array or container corresponding to the y-coordinates.
Should have a real-valued floating-point data type.
x2
second input array or container corresponding to the x-coordinates.
Must be compatible with ``x1``
(see :ref:`broadcasting`). Should have a real-valued
floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse tangent of the quotient ``x1/x2``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]),
... b=ivy.array([4.5, -5.3, -0]))
>>> y = ivy.array([3.0, 2.0, 1.0])
>>> ivy.Container.static_atan2(x, y)
{
a: ivy.array([0., 0.915, -1.29]),
b: ivy.array([0.983, -1.21, 0.])
}
>>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]),
... b=ivy.array([4.5, -5.3, -0, -2.3]))
>>> y = ivy.Container(a=ivy.array([-2.5, 1.75, 3.5]),
... b=ivy.array([2.45, 6.35, 0, 1.5]))
>>> z = ivy.Container.static_atan2(x, y)
>>> print(z)
{
a: ivy.array([3.14, 0.978, -0.785]),
b: ivy.array([1.07, -0.696, 0., -0.993])
}
"""
return ContainerBase.cont_multi_map_in_function(
"atan2",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def atan2(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.atan2. This method
simply wraps the function, and so the docstring for ivy.atan2 also
applies to this method with minimal changes.
Parameters
----------
self
first input array or container corresponding to the y-coordinates.
Should have a real-valued floating-point data type.
x2
second input array or container corresponding to the x-coordinates.
Must be compatible with ``self`` (see :ref:`broadcasting`).
Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse tangent of the quotient ``self/x2``.
The returned array must have a real-valued floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]),
... b=ivy.array([4.5, -5.3, -0]))
>>> y = ivy.array([3.0, 2.0, 1.0])
>>> x.atan2(y)
{
a: ivy.array([0., 0.915, -1.29]),
b: ivy.array([0.983, -1.21, 0.])
}
>>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]),
... b=ivy.array([4.5, -5.3, -0, -2.3]))
>>> y = ivy.Container(a=ivy.array([-2.5, 1.75, 3.5]),
... b=ivy.array([2.45, 6.35, 0, 1.5]))
>>> z = x.atan2(y)
>>> print(z)
{
a: ivy.array([3.14, 0.978, -0.785]),
b: ivy.array([1.07, -0.696, 0., -0.993])
}
"""
return self._static_atan2(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_atanh(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.atanh. This method simply
wraps the function, and so the docstring for ivy.atanh also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements each represent the area of a hyperbolic
sector. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse hyperbolic tangent of each
element in ``x``. The returned container must have a floating-point data
type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 0.5, -0.5]), b=ivy.array([0., 0.2, 0.9]))
>>> y = ivy.Container.static_atanh(x)
>>> print(y)
{
a: ivy.array([0., 0.549, -0.549]),
b: ivy.array([0., 0.203, 1.47])
}
"""
return ContainerBase.cont_multi_map_in_function(
"atanh",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def atanh(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.atanh. This method
simply wraps the function, and so the docstring for ivy.atanh also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements each represent the area of a
hyperbolic sector. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the inverse hyperbolic tangent of each element
in ``self``. The returned container must have a floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 0.5, -0.5]), b=ivy.array([0., 0.2, 0.9]))
>>> y = x.atanh()
>>> print(y)
{
a: ivy.array([0., 0.549, -0.549]),
b: ivy.array([0., 0.203, 1.47])
}
"""
return self._static_atanh(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_bitwise_and(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bitwise_and. This method
simply wraps the function, and so the docstring for ivy.bitwise_and
also applies to this method with minimal changes.
Parameters
----------
x1
first input array or container. Should have an integer or boolean
data type.
x2
second input array or container Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([1, 2, 3])
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_bitwise_and(x, y)
>>> print(z)
{
a: ivy.array([0, 0, 2]),
b: ivy.array([1, 2, 3])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_bitwise_and(x, y)
>>> print(z)
{
a: ivy.array([0, 0, 2]),
b: ivy.array([0, 2, 4])
}
"""
return ContainerBase.cont_multi_map_in_function(
"bitwise_and",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bitwise_and(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.bitwise_and. This
method simply wraps the function, and so the docstring for
ivy.bitwise_and also applies to this method with minimal changes.
Parameters
----------
self
first input array or container. Should have an integer or boolean
data type.
x2
second input array or container Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([True, True]), b=ivy.array([False, True]))
>>> y = ivy.Container(a=ivy.array([False, True]), b=ivy.array([False, True]))
>>> x.bitwise_and(y, out=y)
>>> print(y)
{
a: ivy.array([False, True]),
b: ivy.array([False, True])
}
"""
return self._static_bitwise_and(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_bitwise_left_shift(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bitwise_left_shift. This
method simply wraps the function, and so the docstring for
ivy.bitwise_left_shift also applies to this method with minimal
changes.
Parameters
----------
x1
first input array or container. Should have an integer or boolean
data type.
x2
second input array or container Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined by
:ref:`type-promotion`.
"""
return ContainerBase.cont_multi_map_in_function(
"bitwise_left_shift",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bitwise_left_shift(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.bitwise_left_shift.
This method simply wraps the function, and so the docstring for
ivy.bitwise_left_shift also applies to this method with minimal
changes.
Parameters
----------
self
first input array or container. Should have an integer or boolean
data type.
x2
second input array or container Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type determined by :ref:`type-promotion`.
"""
return self._static_bitwise_left_shift(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_bitwise_invert(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bitwise_invert. This
method simply wraps the function, and so the docstring for
ivy.bitwise_invert also applies to this method with minimal changes.
Parameters
----------
x
input container. Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned array must have the same data type as ``x``.
Examples
--------
>>> x = ivy.Container(a=[False, True, False], b=[True, True, False])
>>> y = ivy.Container.static_bitwise_invert(x)
>>> print(y)
{
a: ivy.array([True, False, True]),
b: ivy.array([False, False, True])
}
>>> x = ivy.Container(a=[1, 2, 3], b=[4, 5, 6])
>>> y = ivy.Container.static_bitwise_invert(x)
>>> print(y)
{
a: ivy.array([-2, -3, -4]),
b: ivy.array([-5, -6, -7])
}
"""
return ContainerBase.cont_multi_map_in_function(
"bitwise_invert",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bitwise_invert(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.bitwise_invert. This
method simply wraps the function, and so the docstring for
ivy.bitwise_invert also applies to this method with minimal changes.
Parameters
----------
self
input container. Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.Container(a = ivy.array([False, True, False]),
... b = ivy.array([True, True, False]))
>>> y = x.bitwise_invert()
>>> print(y)
{
a: ivy.array([True, False, True]),
b: ivy.array([False, False, True])
}
>>> x = ivy.Container(a = ivy.array([1, 2, 3]),
... b = ivy.array([4, 5, 6]))
>>> y = x.bitwise_invert()
>>> print(y)
{
a: ivy.array([-2, -3, -4]),
b: ivy.array([-5, -6, -7])
}
"""
return self._static_bitwise_invert(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_cos(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cos. This method simply
wraps the function, and so the docstring for ivy.cos also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements are each expressed in radians.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the cosine of each element in ``x``. The returned
container must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -6]))
>>> y = ivy.Container.static_cos(x)
>>> print(y)
{
a: ivy.array([1., 0.54, 0.54]),
b: ivy.array([0.54, 1., 0.96])
}
"""
return ivy.ContainerBase.cont_multi_map_in_function(
"cos",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def cos(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cos. This method simply
wraps the function, and so the docstring for ivy.cos also applies to
this method with minimal changes.
Parameters
----------
self
input container whose elements are each expressed in radians.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the cosine of each element in ``self``.
The returned container must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -6]))
>>> y = x.cos()
>>> print(y)
{
a: ivy.array([1., 0.54, 0.54]),
b: ivy.array([0.54, 1., 0.96])
}
"""
return self._static_cos(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_bitwise_or(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bitwise_or. This method
simply wraps the function, and so the docstring for ivy.bitwise_or also
applies to this method with minimal changes.
Parameters
----------
x
input container. Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned array must have the same data type as ``x``.
Examples
--------
With one :class:`ivy.Container` input:
>>> y = ivy.array([1, 2, 3])
>>> x = ivy.Container(a=ivy.array([4, 5, 6]))
>>> z = ivy.Container.static_bitwise_or(x, y)
>>> print(z)
{
a: ivy.array([5, 7, 7]),
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_bitwise_or(x, y)
>>> print(z)
{
a: ivy.array([5, 7, 7]),
b: ivy.array([7, 7, 7])
}
"""
return ContainerBase.cont_multi_map_in_function(
"bitwise_or",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bitwise_or(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.bitwise_or. This method
simply wraps the function, and so the docstring for ivy.bitwise_or also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned array must have the same data type as ``self``.
Examples
--------
Using :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([5, 6, 7]))
>>> z = x.bitwise_or(y)
>>> print(z)
{
a: ivy.array([5, 7, 7]),
b: ivy.array([7, 7, 7])
}
"""
return self._static_bitwise_or(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_bitwise_right_shift(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bitwise_right_shift. This
method simply wraps the function, and so the docstring for
ivy.bitwise_right_shift also applies to this method with minimal
changes.
Parameters
----------
x1
first input array or container. Should have an integer or boolean data type.
x2
second input array or container Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With one :class:`ivy.Container` input:
>>> a = ivy.Container(a = ivy.array([2, 3, 4]), b = ivy.array([5, 10, 64]))
>>> b = ivy.array([0, 1, 2])
>>> y = ivy.Container.static_bitwise_right_shift(a, b)
>>> print(y)
{
a: ivy.array([2, 1, 1]),
b: ivy.array([5, 5, 16])
}
With multiple :class:`ivy.Container` inputs:
>>> a = ivy.Container(a = ivy.array([2, 3, 4]), b = ivy.array([5, 10, 64]))
>>> b = ivy.Container(a = ivy.array([0, 1, 2]), b = ivy.array([2]))
>>> y = ivy.Container.static_bitwise_right_shift(a, b)
>>> print(y)
{
a: ivy.array([2, 1, 1]),
b: ivy.array([1, 2, 16])
}
"""
return ContainerBase.cont_multi_map_in_function(
"bitwise_right_shift",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bitwise_right_shift(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.bitwise_right_shift.
This method simply wraps the function, and so the docstring for
ivy.bitwise_right_shift also applies to this method with minimal
changes.
Parameters
----------
self
first input array or container. Should have an integer or boolean data type.
x2
second input array or container Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type determined by :ref:`type-promotion`.
Examples
--------
>>> a = ivy.Container(a = ivy.array([2, 3, 4]), b = ivy.array([5, 10, 64]))
>>> b = ivy.Container(a = ivy.array([0, 1, 2]), b = ivy.array([2]))
>>> y = a.bitwise_right_shift(b)
>>> print(y)
{
a: ivy.array([2, 1, 1]),
b: ivy.array([1, 2, 16])
}
"""
return self._static_bitwise_right_shift(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_bitwise_xor(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.bitwise_xor. This method
simply wraps the function, and so the docstring for ivy.bitwise_xor
also applies to this method with minimal changes.
Parameters
----------
x1
first input array or container. Should have an integer or boolean
data type.
x2
second input array or container Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a = ivy.array([89]), b = ivy.array([3]))
>>> y = ivy.Container(a = ivy.array([12]), b = ivy.array([5]))
>>> z = ivy.Container.static_bitwise_xor(x, y)
>>> print(z)
{
a: ivy.array([85]),
b: ivy.array([6])
}
"""
return ContainerBase.cont_multi_map_in_function(
"bitwise_xor",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def bitwise_xor(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.bitwise_xor. This
method simply wraps the function, and so the docstring for
ivy.bitwise_xor also applies to this method with minimal changes.
Parameters
----------
self
first input array or container. Should have an integer or
boolean data type.
x2
second input array or container Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have an integer or boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a = ivy.array([89]), b = ivy.array([3]))
>>> y = ivy.Container(a = ivy.array([12]), b = ivy.array([5]))
>>> z = x.bitwise_xor(y)
>>> print(z)
{
a: ivy.array([85]),
b: ivy.array([6])
}
"""
return self._static_bitwise_xor(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_ceil(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.ceil. This method simply
wraps the function, and so the docstring for ivy.ceil also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an container containing the rounded result for each element in ``x``.
The returned array must have the same data type as ``x``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.5, 0.5, -1.4]),
... b=ivy.array([5.4, -3.2, 5.2]))
>>> y = ivy.Container.static_ceil(x)
>>> print(y)
{
a: ivy.array([3., 1., -1.]),
b: ivy.array([6., -3., 6.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"ceil",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def ceil(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.ceil. This method
simply wraps the function, and so the docstring for ivy.ceil also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an container containing the rounded result for each element in ``self``.
The returned container must have the same data type as ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.5, 0.5, -1.4]),
... b=ivy.array([5.4, -3.2, 5.2]))
>>> y = x.ceil()
>>> print(y)
{
a: ivy.array([3., 1., -1.]),
b: ivy.array([6., -3., 6.])
}
"""
return self._static_ceil(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_cosh(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.cosh. This method simply
wraps the function, and so the docstring for ivy.cosh also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements each represent a hyperbolic angle. Should
have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an container containing the hyperbolic cosine of each element in ``x``. The
returned container must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 0.23, 1.12]), b=ivy.array([1, -2, 0.76]))
>>> y = ivy.Container.static_cosh(x)
>>> print(y)
{
a: ivy.array([1.54, 1.03, 1.7]),
b: ivy.array([1.54, 3.76, 1.3])
}
>>> x = ivy.Container(a=ivy.array([-3, 0.34, 2.]),
... b=ivy.array([0.67, -0.98, -3]))
>>> y = ivy.Container(a=ivy.zeros(1), b=ivy.zeros(1))
>>> ivy.Container.static_cosh(x, out=y)
>>> print(y)
{
a: ivy.array([10.1, 1.06, 3.76]),
b: ivy.array([1.23, 1.52, 10.1])
}
"""
return ContainerBase.cont_multi_map_in_function(
"cosh",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def cosh(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.cosh. This method
simply wraps the function, and so the docstring for ivy.cosh also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements each represent a hyperbolic angle. Should
have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an container containing the hyperbolic cosine of each element in ``self``.
The returned container must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, 0.23, 1.12]), b=ivy.array([1, -2, 0.76]))
>>> y = x.cosh()
>>> print(y)
{
a: ivy.array([1.54, 1.03, 1.7]),
b: ivy.array([1.54, 3.76, 1.3])
}
>>> x = ivy.Container(a=ivy.array([-3, 0.34, 2.]),
... b=ivy.array([0.67, -0.98, -3]))
>>> y = ivy.Container(a=ivy.zeros(3), b=ivy.zeros(3))
>>> x.cosh(out=y)
>>> print(y)
{
a: ivy.array([10.1, 1.06, 3.76]),
b: ivy.array([1.23, 1.52, 10.1])
}
"""
return self._static_cosh(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_divide(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.divide. This method
simply wraps the function, and so the docstring for ivy.divide also
applies to this method with minimal changes.
Parameters
----------
x1
dividend input array or container. Should have a real-valued data type.
x2
divisor input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.Container(a=ivy.array([1., 2.3, 3]), b=ivy.array([2.4, 3., 2.]))
>>> y = ivy.Container.static_divide(x1, x2)
>>> print(y)
{
a: ivy.array([12., 1.52, 2.1]),
b: ivy.array([1.25, 0.333, 0.45])
}
"""
return ContainerBase.cont_multi_map_in_function(
"divide",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def divide(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.divide. This method
simply wraps the function, and so the docstring for ivy.divide also
applies to this method with minimal changes.
Parameters
----------
self
dividend input array or container. Should have a real-valued
data type.
x2
divisor input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.Container(a=ivy.array([1., 2.3, 3]), b=ivy.array([2.4, 3., 2.]))
>>> y = x1.divide(x2)
>>> print(y)
{
a: ivy.array([12., 1.52, 2.1]),
b: ivy.array([1.25, 0.333, 0.45])
}
With :code:`Number` instances at the leaves:
>>> x = ivy.Container(a=1, b=2)
>>> y = ivy.Container(a=5, b=4)
>>> z = x.divide(y)
>>> print(z)
{
a: 0.2,
b: 0.5
}
"""
return self._static_divide(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_equal(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.equal. This method simply
wraps the function, and so the docstring for ivy.equal also applies to
this method with minimal changes.
Parameters
----------
x1
input array or container. May have any data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
May have any data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12, 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.Container(a=ivy.array([12, 2.3, 3]), b=ivy.array([2.4, 3., 2.]))
>>> y = ivy.Container.static_equal(x1, x2)
>>> print(y)
{
a: ivy.array([True, False, False]),
b: ivy.array([False, False, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"equal",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def equal(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.equal. This method
simply wraps the function, and so the docstring for ivy.equal also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. May have any data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
May have any data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12, 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.Container(a=ivy.array([12, 2.3, 3]), b=ivy.array([2.4, 3., 2.]))
>>> y = x1.equal(x2)
>>> print(y)
{
a: ivy.array([True, False, False]),
b: ivy.array([False, False, False])
}
With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.array([3., 1., 0.9])
>>> y = x1.equal(x2)
>>> print(y)
{
a: ivy.array([False, False, False]),
b: ivy.array([True, True, True])
}
"""
return self._static_equal(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_nan_to_num(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
copy: Union[bool, ivy.Container] = True,
nan: Union[float, int, ivy.Container] = 0.0,
posinf: Optional[Union[float, int, ivy.Container]] = None,
neginf: Optional[Union[float, int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.nan_to_num. This method
simply wraps the function, and so the docstring for ivy.nan_to_num also
applies to this method with minimal changes.
Parameters
----------
x
Input container with array items.
copy
Whether to create a copy of x (True) or to replace values in-place (False).
The in-place operation only occurs if casting to an array does not require
a copy. Default is True.
nan
Value to be used to fill NaN values. If no value is passed then NaN values
will be replaced with 0.0.
posinf
Value to be used to fill positive infinity values. If no value is passed
then positive infinity values will be replaced with a very large number.
neginf
Value to be used to fill negative infinity values.
If no value is passed then negative infinity values
will be replaced with a very small (or negative) number.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with replaced non-finite elements.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3, nan]),\
b=ivy.array([1, 2, 3, inf]))
>>> ivy.Container.static_nan_to_num(x, posinf=5e+100)
{
a: ivy.array([1., 1., 3., 0.0])
b: ivy.array([1., 2., 1., 5e+100])
}
"""
return ContainerBase.cont_multi_map_in_function(
"nan_to_num",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
copy=copy,
nan=nan,
posinf=posinf,
neginf=neginf,
out=out,
)
def nan_to_num(
self: ivy.Container,
/,
*,
copy: Union[bool, ivy.Container] = True,
nan: Union[float, int, ivy.Container] = 0.0,
posinf: Optional[Union[float, int, ivy.Container]] = None,
neginf: Optional[Union[float, int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.nan_to_num. This method
simply wraps the function, and so the docstring for ivy.nan_to_num also
applies to this method with minimal changes.
Parameters
----------
self
Input container with array items.
copy
Whether to create a copy of x (True) or to replace values in-place (False).
The in-place operation only occurs if casting to an array does not require
a copy. Default is True.
nan
Value to be used to fill NaN values. If no value is passed then NaN values
will be replaced with 0.0.
posinf
Value to be used to fill positive infinity values. If no value is passed
then positive infinity values will be replaced with a very large number.
neginf
Value to be used to fill negative infinity values.
If no value is passed then negative infinity values
will be replaced with a very small (or negative) number.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with replaced non-finite elements.
Examples
--------
>>> a = ivy.array([1., 2, 3, ivy.nan], dtype="float64")
>>> b = ivy.array([1., 2, 3, ivy.inf], dtype="float64")
>>> x = ivy.Container(a=a, b=b)
>>> ret = x.nan_to_num(posinf=5e+100)
>>> print(ret)
{
a: ivy.array([1., 2., 3., 0.]),
b: ivy.array([1.e+000, 2.e+000, 3.e+000, 5.e+100])
}
"""
return self.static_nan_to_num(
self, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=out
)
@staticmethod
def static_imag(
val: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.imag. This method simply
wraps the function, and so the docstring for ivy.imag also applies to
this method with minimal changes.
Parameters
----------
val
Array-like input.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns an Container including arrays with the imaginary part
of complex numbers.
Examples
--------
>>> x = ivy.Container(a=ivy.array(np.array([1+2j, 3+4j, 5+6j])),
b=ivy.array(np.array([-2.25 + 4.75j, 3.25 + 5.75j])))
>>> x
{
a: ivy.array([1.+2.j, 3.+4.j, 5.+6.j]),
b: ivy.array([-2.25+4.75j, 3.25+5.75j])
}
>>> ivy.Container.static_imag(x)
{
a: ivy.array([2., 4., 6.]),
b: ivy.array([4.75, 5.75])
}
"""
return ContainerBase.cont_multi_map_in_function(
"imag",
val,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def imag(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.imag. This method
simply wraps the function, and so the docstring for ivy.imag also
applies to this method with minimal changes.
Parameters
----------
val
Array-like input.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns an Container including arrays with the imaginary part
of complex numbers.
Examples
--------
>>> x = ivy.Container(a=ivy.array(np.array([1+2j, 3+4j, 5+6j])),
b=ivy.array(np.array([-2.25 + 4.75j, 3.25 + 5.75j])))
>>> x
{
a: ivy.array([1.+2.j, 3.+4.j, 5.+6.j]),
b: ivy.array([-2.25+4.75j, 3.25+5.75j])
}
>>> x.imag()
{
a: ivy.array([2., 4., 6.]),
b: ivy.array([4.75, 5.75])
}
"""
return self.static_imag(self, out=out)
@staticmethod
def static_angle(
z: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
deg: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.angle. This method simply
wraps the function, and so the docstring for ivy.angle also applies to
this method with minimal changes.
Parameters
----------
z
Array-like input.
deg
optional bool.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns an array of angles for each complex number in the input.
If def is False(default), angle is calculated in radian and if
def is True, then angle is calculated in degrees.
Examples
--------
>>> ivy.set_backend('tensorflow')
>>> x = ivy.Container(a=ivy.array([-2.25 + 4.75j, 3.25 + 5.75j]),
b=ivy.array([-2.25 + 4.75j, 3.25 + 5.75j]))
>>> x
{
a: ivy.array([-2.25+4.75j, 3.25+5.75j]),
b: ivy.array([-2.25+4.75j, 3.25+5.75j])
}
>>> ivy.Container.static_angle(x)
{
a: ivy.array([2.01317055, 1.05634501]),
b: ivy.array([2.01317055, 1.05634501])
}
>>> ivy.set_backend('numpy')
>>> ivy.Container.static_angle(x,deg=True)
{
a: ivy.array([115.3461759, 60.524111]),
b: ivy.array([115.3461759, 60.524111])
}
"""
return ContainerBase.cont_multi_map_in_function(
"angle",
z,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
deg=deg,
out=out,
)
def angle(
self: ivy.Container,
/,
*,
deg: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.angle. This method
simply wraps the function, and so the docstring for ivy.angle also
applies to this method with minimal changes.
Parameters
----------
z
Array-like input.
deg
optional bool.
out
optional output array, for writing the result to.
Returns
-------
ret
Returns an array of angles for each complex number in the input.
If def is False(default), angle is calculated in radian and if
def is True, then angle is calculated in degrees.
Examples
--------
>>> ivy.set_backend('tensorflow')
>>> x = ivy.Container(a=ivy.array([-2.25 + 4.75j, 3.25 + 5.75j]),
b=ivy.array([-2.25 + 4.75j, 3.25 + 5.75j]))
>>> x
{
a: ivy.array([-2.25+4.75j, 3.25+5.75j]),
b: ivy.array([-2.25+4.75j, 3.25+5.75j])
}
>>> x.angle()
{
a: ivy.array([2.01317055, 1.05634501]),
b: ivy.array([2.01317055, 1.05634501])
}
>>> ivy.set_backend('numpy')
>>> x.angle(deg=True)
{
a: ivy.array([115.3461759, 60.524111]),
b: ivy.array([115.3461759, 60.524111])
}
"""
return self.static_angle(self, deg=deg, out=out)
@staticmethod
def static_gcd(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container, int, list, tuple],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container, int, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.gcd. This method simply
wraps the function, and so the docstring for ivy.gcd also applies to
this method with minimal changes.
Parameters
----------
x1
first input container with array-like items.
x2
second input container with array-like items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise gcd of input arrays.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([1, 2, 3]))
>>> x2 = ivy.Container(a=ivy.array([5, 6, 7]),\
b=10)
>>> ivy.Container.static_gcd(x1, x2)
{
a: ivy.array([1., 1., 3.])
b: ivy.array([1., 2., 1.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"gcd",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def gcd(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.gcd. This method simply
wraps the function, and so the docstring for ivy.gcd also applies to
this method with minimal changes.
Parameters
----------
self
first input container with array-like items.
x2
second input container with array-like items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise gcd of input arrays.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([1, 2, 3]))
>>> x2 = ivy.Container(a=ivy.array([5, 6, 7]),\
b=10)
>>> x1.gcd(x2)
{
a: ivy.array([1., 1., 3.])
b: ivy.array([1., 2., 1.])
}
"""
return self.static_gcd(self, x2, out=out)
@staticmethod
def static_exp2(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container, float, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.exp2. This method simply
wraps the function, and so the docstring for ivy.exp2 also applies to
this method with minimal changes.
Parameters
----------
x
container with the base input arrays.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise 2 to the power
of input arrays elements.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),\
b=[5, 6, 7])
>>> ivy.Container.static_exp2(x)
{
a: ivy.array([2., 4., 8.])
b: ivy.array([32., 64., 128.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"exp2",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def exp2(
self: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.exp2. This method
simply wraps the function, and so the docstring for ivy.exp2 also
applies to this method with minimal changes.
Parameters
----------
self
container with the base input arrays.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise 2 to the power
of input array elements.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),\
b=[5, 6, 7])
>>> x.exp2()
{
a: ivy.array([2., 4., 8.])
b: ivy.array([32., 64., 128.])
}
"""
return self.static_exp2(self, out=out)
@staticmethod
def _static_exp(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.exp. This method simply
wraps the function, and so the docstring for ivy.exp also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2.,]), b=ivy.array([4., 5.]))
>>> y = ivy.Container.static_exp(x)
>>> print(y)
{
a: ivy.array([2.71828198, 7.38905573]),
b: ivy.array([54.59814835, 148.4131622])
}
"""
return ContainerBase.cont_multi_map_in_function(
"exp",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def exp(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.exp. This method simply
wraps the function, and so the docstring for ivy.exp also applies to
this method with minimal changes.
Parameters
----------
self
input container. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([4., 5., 6.]))
>>> y = x.exp()
>>> print(y)
{
a: ivy.array([2.71828198, 7.38905573, 20.08553696]),
b: ivy.array([54.59814835, 148.4131622, 403.428772])
}
"""
return self._static_exp(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_expm1(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.expm1. This method simply
wraps thefunction, and so the docstring for ivy.expm1 also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned array must have areal-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` static method:
>>> x = ivy.Container(a=ivy.array([1, 2]), b=ivy.array([3, 4]))
>>> print(ivy.Container.static_expm1(x))
{
a: ivy.array([1.71828175, 6.38905621]),
b: ivy.array([19.08553696, 53.59815216])
}
"""
return ContainerBase.cont_multi_map_in_function(
"expm1",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def expm1(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.expm1. This method
simply wraps the function, and so the docstring for ivy.expm1 also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.5, 0.5]),
... b=ivy.array([5.4, -3.2]))
>>> y = x.expm1()
>>> print(y)
{
a: ivy.array([11.2, 0.649]),
b: ivy.array([220., -0.959])
}
>>> y = ivy.Container(a=ivy.array([0., 0.]))
>>> x = ivy.Container(a=ivy.array([4., -2.]))
>>> x.expm1(out=y)
>>> print(y)
{
a: ivy.array([53.6, -0.865])
}
"""
return self._static_expm1(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_floor(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.floor. This method simply
wraps thefunction, and so the docstring for ivy.floor also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the rounded result for each element in ``x``. The
returned array must have the same data type as ``x``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.5, 0.5, -1.4]),
... b=ivy.array([5.4, -3.2, 5.2]))
>>> y = ivy.Container.static_floor(x)
>>> print(y)
{
a: ivy.array([2., 0., -2.]),
b: ivy.array([5., -4., 5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"floor",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def floor(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.floor. This method
simply wraps the function, and so the docstring for ivy.floor also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the rounded result for each element in ``self``.
The returned array must have the same data type as ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([2.5, 0.5, -1.4]),
... b=ivy.array([5.4, -3.2, 5.2]))
>>> y = x.floor()
>>> print(y)
{
a: ivy.array([2., 0., -2.]),
b: ivy.array([5., -4., 5.])
}
"""
return self._static_floor(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_floor_divide(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.floor_divide. This method
simply wraps the function, and so the docstring for ivy.floor_divide
also applies to this method with minimal changes.
Parameters
----------
x1
dividend input array or container. Should have a real-valued data type.
x2
divisor input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([4., 5., 6.]), b=ivy.array([7., 8., 9.]))
>>> x2 = ivy.Container(a=ivy.array([5., 4., 2.5]), b=ivy.array([2.3, 3.7, 5]))
>>> y = ivy.Container.static_floor_divide(x1, x2)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 2., 1.])
}
With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([4., 5., 6.]), b=ivy.array([7., 8., 9.]))
>>> x2 = ivy.array([2, 3, 4])
>>> y = ivy.Container.static_floor_divide(x1, x2)
>>> print(y)
{
a: ivy.array([2., 1., 1.]),
b: ivy.array([3., 2., 2.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"floor_divide",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def floor_divide(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.floor_divide. This
method simply wraps the function, and so the docstring for
ivy.floor_divide also applies to this method with minimal changes.
Parameters
----------
self
dividend input array or container. Should have a real-valued
data type.
x2
divisor input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([4., 5., 6.]), b=ivy.array([7., 8., 9.]))
>>> x2 = ivy.Container(a=ivy.array([5., 4., 2.5]), b=ivy.array([2.3, 3.7, 5]))
>>> y = x1.floor_divide(x2)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 2., 1.])
}
With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([4., 5., 6.]), b=ivy.array([7., 8., 9.]))
>>> x2 = ivy.array([2, 3, 4])
>>> y = x1.floor_divide(x2)
>>> print(y)
{
a: ivy.array([2., 1., 1.]),
b: ivy.array([3., 2., 2.])
}
"""
return self._static_floor_divide(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_fmin(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.fmin. This method simply
wraps the function, and so the docstring for ivy.fmin also applies to
this method with minimal changes.
Parameters
----------
x1
container with the first input arrays.
x2
container with the second input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise minimums.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([2, 3, 4]),\
b=ivy.array([ivy.nan, 0, ivy.nan]))
>>> x2 = ivy.Container(a=ivy.array([1, 5, 2]),\
b=ivy.array([0, ivy.nan, ivy.nan]))
>>> ivy.Container.static_fmin(x1, x2)
{
a: ivy.array([1, 3, 2]),
b: ivy.array([0., 0., nan])
}
"""
return ContainerBase.cont_multi_map_in_function(
"fmin",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def fmin(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.fmin. This method
simply wraps the function, and so the docstring for ivy.fmin also
applies to this method with minimal changes.
Parameters
----------
self
container with the first input arrays.
x2
container with the second input arrays
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise minimums.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([2, 3, 4]),\
b=ivy.array([ivy.nan, 0, ivy.nan]))
>>> x2 = ivy.Container(a=ivy.array([1, 5, 2]),\
b=ivy.array([0, ivy.nan, ivy.nan]))
>>> x1.fmin(x2)
{
a: ivy.array([1, 3, 2]),
b: ivy.array([0., 0., nan])
}
"""
return self.static_fmin(self, x2, out=out)
@staticmethod
def _static_greater(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.greater. This method
simply wraps the function, and so the docstring for ivy.greater also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
divisor input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned array must
have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_greater(y,x)
>>> print(z)
{
a: ivy.array([False, False, False]),
b: ivy.array([True, True, True])
}
"""
return ContainerBase.cont_multi_map_in_function(
"greater",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def greater(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.greater. This method
simply wraps the function, and so the docstring for ivy.greater also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
divisor input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned array must
have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([5, 6, 7]))
>>> z = x.greater(y)
>>> print(z)
{
a: ivy.array([True, True, True]),
b: ivy.array([False, False, False])
}
"""
return self._static_greater(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_greater_equal(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.greater_equal. This
method simply wraps the function, and so the docstring for
ivy.greater_equal also applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_greater_equal(y)
>>> print(z)
{
a:ivy.array([True,True,True]),
b:ivy.array([False,False,False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"greater_equal",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def greater_equal(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.greater_equal. This
method simply wraps the function, and so the docstring for
ivy.greater_equal also applies to this metho with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([5, 6, 7]))
>>> z = x.greater_equal(y)
>>> print(z)
{
a:ivy.array([True,True,True]),
b:ivy.array([False,False,False])
}
"""
return self._static_greater_equal(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_isfinite(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.isfinite. This method
simply wraps the function, and so the docstring for ivy.isfinite also
applies to this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``x_i`` is finite and ``False`` otherwise.
The returned array must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 999999999999]),
... b=ivy.array([float('-0'), ivy.nan]))
>>> y = ivy.Container.static_isfinite(x)
>>> print(y)
{
a: ivy.array([True, True]),
b: ivy.array([True, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"isfinite",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def isfinite(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.isfinite. This method
simply wraps the function, and so the docstring for ivy.isfinite also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``self_i`` is finite and ``False`` otherwise.
The returned array must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 999999999999]),
... b=ivy.array([float('-0'), ivy.nan]))
>>> y = x.isfinite()
>>> print(y)
{
a: ivy.array([True, True]),
b: ivy.array([True, False])
}
"""
return self._static_isfinite(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_isinf(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
detect_positive: Union[bool, ivy.Container] = True,
detect_negative: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.isinf. This method simply
wraps the function, and so the docstring for ivy.isinf also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
detect_positive
if ``True``, positive infinity is detected.
detect_negative
if ``True``, negative infinity is detected.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``x_i`` is either positive or negative infinity and ``False``
otherwise. The returned array must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, -float('inf'), 1.23]),
... b=ivy.array([float('inf'), 3.3, -4.2]))
>>> z = ivy.Container.static_isinf(x)
>>> print(z)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"isinf",
x,
detect_positive=detect_positive,
detect_negative=detect_negative,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def isinf(
self: ivy.Container,
*,
detect_positive: Union[bool, ivy.Container] = True,
detect_negative: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.isinf. This method
simply wraps the function, and so the docstring for ivy.isinf also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
detect_positive
if ``True``, positive infinity is detected.
detect_negative
if ``True``, negative infinity is detected.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``self_i`` is either positive or negative infinity and ``False``
otherwise. The returned array must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, -float('inf'), 1.23]),
... b=ivy.array([float('inf'), 3.3, -4.2]))
>>> z = x.isinf()
>>> print(z)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, False])
}
"""
return self._static_isinf(
self,
detect_positive=detect_positive,
detect_negative=detect_negative,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_isnan(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.isnan. This method simply
wraps the function, and so the docstring for ivy.isnan also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``x_i`` is ``NaN`` and ``False`` otherwise.
The returned array should have a data type of ``bool``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-1, -float('nan'), 1.23]),
... b=ivy.array([float('nan'), 3.3, -4.2]))
>>> z = ivy.Container.static_isnan(x)
>>> print(z)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"isnan",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def isnan(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.isnan. This method
simply wraps the function, and so the docstring for ivy.isnan also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``self_i`` is ``NaN`` and ``False`` otherwise.
The returned array should have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1, -float('nan'), 1.23]),
... b=ivy.array([float('nan'), 3.3, -4.2]))
>>> y = x.isnan()
>>> print(y)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, False])
}
"""
return self._static_isnan(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_less(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.less. This method simply
wraps the function, and so the docstring for ivy.less also applies to
this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([5, 6, 7]))
>>> z = ivy.Container.static_less(y,x)
>>> print(z)
{
a: ivy.array([True, True, True]),
b: ivy.array([False, False, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"less",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def less(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.less. This method
simply wraps the function, and so the docstring for ivy.less also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4, 5, 6]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([5, 6, 7]))
>>> z = x.less(y)
>>> print(z)
{
a: ivy.array([False, False, False]),
b: ivy.array([True, True, True])
}
"""
return self._static_less(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_less_equal(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.less_equal. This method
simply wraps the function, and so the docstring for ivy.less_equal also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
With :code:'ivy.Container' inputs:
>>> x1 = ivy.Container(a=ivy.array([12, 3.5, 9.2]), b=ivy.array([2., 1.1, 5.5]))
>>> x2 = ivy.Container(a=ivy.array([12, 2.2, 4.1]), b=ivy.array([1, 0.7, 3.8]))
>>> y = ivy.Container.static_less_equal(x1, x2)
>>> print(y)
{
a: ivy.array([True, False, False]),
b: ivy.array([False, False, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"less_equal",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def less_equal(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.less_equal. This method
simply wraps the function, and so the docstring for ivy.less_equal also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
With :code:'ivy.Container' inputs:
>>> x1 = ivy.Container(a=ivy.array([12, 3.5, 9.2]), b=ivy.array([2., 1.1, 5.5]))
>>> x2 = ivy.Container(a=ivy.array([12, 2.2, 4.1]), b=ivy.array([1, 0.7, 3.8]))
>>> y = x1.less_equal(x2)
>>> print(y)
{
a: ivy.array([True, False, False]),
b: ivy.array([False, False, False])
}
With mixed :code:'ivy.Container' and :code:'ivy.Array' inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 9.2]), b=ivy.array([2., 1., 5.5]))
>>> x2 = ivy.array([2., 1.1, 5.5])
>>> y = x1.less_equal(x2)
>>> print(y)
{
a: ivy.array([False, False, False]),
b: ivy.array([True, True, True])
}
"""
return self._static_less_equal(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_log(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.log. This method simply
wraps the function, and so the docstring for ivy.log also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the log for each element in ``x``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.0, float('nan')]),
... b=ivy.array([-0., -3.9, float('+inf')]),
... c=ivy.array([7.9, 1.1, 1.]))
>>> y = ivy.Container.static_log(x)
>>> print(y)
{
a: ivy.array([-inf, nan]),
b: ivy.array([-inf, nan, inf]),
c: ivy.array([2.07, 0.0953, 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"log",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def log(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.log. This method simply
wraps the function, and so the docstring for ivy.log also applies to
this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the log for each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([0.0, float('nan')]),
... b=ivy.array([-0., -3.9, float('+inf')]),
... c=ivy.array([7.9, 1.1, 1.]))
>>> y = x.log()
>>> print(y)
{
a: ivy.array([-inf, nan]),
b: ivy.array([-inf, nan, inf]),
c: ivy.array([2.07, 0.0953, 0.])
}
"""
return self._static_log(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_log1p(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.log1p. This method simply
wraps the function, and so the docstring for ivy.log1p also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.1]))
>>> y = ivy.Container.static_log1p(x)
>>> print(y)
{
a: ivy.array([0., 0.693, 1.1]),
b: ivy.array([1.39, 1.61, 1.81])
}
>>> x = ivy.Container(a=ivy.array([0., 2.]), b=ivy.array([ 4., 5.1]))
>>> ivy.Container.static_log1p(x, out = x)
>>> print(y)
{
a: ivy.array([0., 0.693, 1.1]),
b: ivy.array([1.39, 1.61, 1.81])
}
"""
return ContainerBase.cont_multi_map_in_function(
"log1p",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def log1p(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.log1p. This method
simply wraps the function, and so the docstring for ivy.log1p also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1.6, 2.6, 3.5]),
... b=ivy.array([4.5, 5.3, 2.3]))
>>> y = x.log1p()
>>> print(y)
{
a: ivy.array([0.956, 1.28, 1.5]),
b: ivy.array([1.7, 1.84, 1.19])
}
"""
return self._static_log1p(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_log2(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.log2. This method simply
wraps the function, and so the docstring for ivy.log2 also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated base ``2`` logarithm for
each element in ``x``. The returned array must have a real-valued
floating-point data type determined by :ref:`type-promotion`.
Examples
--------
Using :code:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.0, float('nan')]),\
b=ivy.array([-0., -4.9, float('+inf')]),\
c=ivy.array([8.9, 2.1, 1.]))
>>> y = ivy.Container.static_log2(x)
>>> print(y)
{
a: ivy.array([-inf, nan]),
b: ivy.array([-inf, nan, inf]),
c: ivy.array([3.15, 1.07, 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"log2",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def log2(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.log2. This method
simply wraps the function, and so the docstring for ivy.log2 also
applies to this metho with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated base ``2`` logarithm for each
element in ``self``. The returned array must have a real-valued
floating-point data type determined by :ref:`type-promotion`.
Examples
--------
Using :code:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([0.0, float('nan')]),
... b=ivy.array([-0., -5.9, float('+inf')]),
... c=ivy.array([8.9, 2.1, 1.]))
>>> y = ivy.log2(x)
>>> print(y)
{
a: ivy.array([-inf, nan]),
b: ivy.array([-inf, nan, inf]),
c: ivy.array([3.15, 1.07, 0.])
}
"""
return self._static_log2(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_log10(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.log10. This method simply
wraps the function, and so the docstring for ivy.log10 also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated base ``10`` logarithm for each
element in ``x``. The returned array must have a real-valued
floating-point data type determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0.0, float('nan')]),
... b=ivy.array([-0., -3.9, float('+inf')]),
... c=ivy.array([7.9, 1.1, 1.]))
>>> y = ivy.Container.static_log10(x)
>>> print(y)
{
a: ivy.array([-inf, nan]),
b: ivy.array([-inf, nan, inf]),
c: ivy.array([0.898, 0.0414, 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"log10",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def log10(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.log10. This method
simply wraps the function, and so the docstring for ivy.log10 also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated base ``10`` logarithm for
each element in ``self``. The returned array must have a real-valued
floating-point data type determined by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([0.0, float('nan')]),
... b=ivy.array([-0., -3.9, float('+inf')]),
... c=ivy.array([7.9, 1.1, 1.]))
>>> y = x.log10()
>>> print(y)
{
a: ivy.array([-inf, nan]),
b: ivy.array([-inf, nan, inf]),
c: ivy.array([0.898, 0.0414, 0.])
}
"""
return self._static_log10(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_logaddexp(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.greater_equal. This
method simply wraps the function, and so the docstring for
ivy.greater_equal also applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a real-valued floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([4., 5., .]),
... b=ivy.array([2., 3., 4.]))
>>> y = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([5., 6., 7.]))
>>> z = ivy.Container.static_logaddexp(y,x)
>>> print(z)
{
a: ivy.array([4.05, 5.05, 6.05]),
b: ivy.array([5.05, 6.05, 7.05])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logaddexp",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def logaddexp(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.greater_equal. This
method simply wraps the function, and so the docstring for
ivy.greater_equal also applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a real-valued floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
Using :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([4., 5., 6.]),
... b=ivy.array([2., 3., 4.]))
>>> y = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([5., 6., 7.]))
>>> z = ivy.logaddexp(y,x)
>>> print(z)
{
a: ivy.array([4.05, 5.05, 6.05]),
b: ivy.array([5.05, 6.05, 7.05])
}
"""
return self._static_logaddexp(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_logaddexp2(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container, float, list, tuple],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container, float, list, tuple],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logaddexp2. This method
simply wraps the function, and so the docstring for ivy.logaddexp2 also
applies to this method with minimal changes.
Parameters
----------
x1
first input container with array-like items.
x2
second input container with array-like items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise logaddexp2 of input arrays.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([1, 2, 3]))
>>> x2 = ivy.Container(a=ivy.array([4, 5, 6]),\
b=5)
>>> ivy.Container.static_logaddexp2(x1, x2)
{
a: ivy.array([4.169925, 5.169925, 6.169925])
b: ivy.array([5.08746284, 5.169925 , 5.32192809])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logaddexp2",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def logaddexp2(
self: ivy.Container,
x2: ivy.Container,
/,
*,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.logaddexp2. This method
simply wraps the function, and so the docstring for ivy.logaddexp2 also
applies to this method with minimal changes.
Parameters
----------
self
first input container with array-like items.
x2
second input container with array-like items.
out
optional output container, for writing the result to.
Returns
-------
ret
Container including arrays with element-wise logaddexp2 of input arrays.
Examples
--------
>>> x1 = ivy.Container(a=ivy.array([1, 2, 3]),\
b=ivy.array([1, 2, 3]))
>>> x2 = ivy.Container(a=ivy.array([4, 5, 6]),\
b=5)
>>> x1.logaddexp2(x2)
{
a: ivy.array([4.169925, 5.169925, 6.169925])
b: ivy.array([5.08746284, 5.169925 , 5.32192809])
}
"""
return self.static_logaddexp2(self, x2, out=out)
@staticmethod
def _static_logical_and(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logical_and. This method
simply wraps the function, and so the docstring for ivy.logical_and
also applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a boolean data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
Using 'ivy.Container' instance
>>> a = ivy.Container(a=ivy.array([True, False, True, False]))
>>> b = ivy.Container(a=ivy.array([True, True, False, False]))
>>> w = ivy.Container.static_logical_and(a, b)
>>> print(w)
{
a:ivy.array([True,False,False,False])
}
>>> j = ivy.Container(a=ivy.array([True, True, False, False]))
>>> m = ivy.array([False, True, False, True])
>>> x = ivy.Container.static_logical_and(j, m)
>>> print(x)
{
a:ivy.array([False,True,False,False])
}
>>> k = ivy.Container(a=ivy.array([True, False, True]),
... b=ivy.array([True, False, False]))
>>> l = ivy.Container(a=ivy.array([True, True, True]),
... b=ivy.array([False, False, False]))
>>> z = ivy.Container.static_logical_and(k, l)
>>> print(z)
{
a:ivy.array([True,False,True]),
b:ivy.array([False,False,False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logical_and",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def logical_and(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.logical_and. This
method simply wraps the function, and so the docstring for
ivy.logical_and also applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a boolean data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
Using 'ivy.Container' instance
>>> a = ivy.Container(a=ivy.array([True, False, True, False]))
>>> b = ivy.Container(a=ivy.array([True, True, False, False]))
>>> w = a.logical_and(b)
>>> print(w)
{
a:ivy.array([True,False,False,False])
}
>>> j = ivy.Container(a=ivy.array([True, True, False, False]))
>>> m = ivy.array([False, True, False, True])
>>> x = j.logical_and(m)
>>> print(x)
{
a:ivy.array([False,True,False,False])
}
>>> k = ivy.Container(a=ivy.array([True, False, True]),
... b=ivy.array([True, False, False]))
>>> l = ivy.Container(a=ivy.array([True, True, True]),
... b=ivy.array([False, False, False]))
>>> z = k.logical_and(l)
>>> print(z)
{
a:ivy.array([True,False,True]),
b:ivy.array([False,False,False])
}
"""
return self._static_logical_and(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_logical_not(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logical_not. This method
simply wraps the function, and so the docstring for ivy.logical_not
also applies to this method with minimal changes.
Parameters
----------
x
input container. Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned container must have a data type of ``bool``.
Examples
--------
Using 'ivy.Container' instance
>>> x=ivy.Container(a=ivy.array([1,0,0,1]), b=ivy.array([3,1,7,0]))
>>> ivy.Container.static_logical_not(x)
{
a: ivy.array([False, True, True, False]),
b: ivy.array([False, False, False, True])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logical_not",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def logical_not(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.logical_not. This
method simply wraps the function, and so the docstring for
ivy.logical_not also applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned container must have a data type of ``bool``.
Examples
--------
Using 'ivy.Container' instance
>>> x=ivy.Container(a=ivy.array([1,0,0,1]), b=ivy.array([3,1,7,0]))
>>> y = x.logical_not()
>>> print(y)
{
a: ivy.array([False, True, True, False]),
b: ivy.array([False, False, False, True])
}
>>> x=ivy.Container(a=ivy.array([1,0,1,0]), b=ivy.native_array([5,2,0,3]))
>>> y = x.logical_not()
>>> print(y)
{
a: ivy.array([False, True, False, True]),
b: ivy.array([False, False, True, False])
}
"""
return self._static_logical_not(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_logical_or(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logical_or. This method
simply wraps the function, and so the docstring for ivy.logical_or also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a boolean data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([False, False, True]),
... b=ivy.array([True, False, True]))
>>> y = ivy.Container(a=ivy.array([False, True, False]),
... b=ivy.array([True, True, False]))
>>> z = ivy.Container.static_logical_or(x, y)
>>> print(z)
{
a: ivy.array([False, True, True]),
b: ivy.array([True, True, True])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logical_or",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def logical_or(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.logical_or. This method
simply wraps the function, and so the docstring for ivy.logical_or also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a boolean data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
This function conforms to the `Array API Standard
<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of
the `docstring <https://data-apis.org/array-api/latest/
API_specification/generated/array_api.logical_or.html>`_
in the standard.
Both the description and the type hints above assumes an array input for
simplicity, but this function is *nestable*, and therefore also
accepts :class:`ivy.Container` instances in place of any of the arguments.
Examples
--------
Using :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([False,True,True]),
... b=ivy.array([3.14, 2.718, 1.618]))
>>> y = ivy.Container(a=ivy.array([0, 5.2, 0.8]), b=ivy.array([0.2, 0, 0.9]))
>>> z = x.logical_or(y)
>>> print(z)
{
a: ivy.array([False, True, True]),
b: ivy.array([True, True, True])
}
"""
return self._static_logical_or(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_logical_xor(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.logical_xor. This method
simply wraps the function, and so the docstring for ivy.logical_xor
also applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a boolean data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([0,0,1,1,0])
>>> y = ivy.Container(a=ivy.array([1,0,0,1,0]), b=ivy.array([1,0,1,0,0]))
>>> z = ivy.Container.static_logical_xor(x, y)
>>> print(z)
{
a: ivy.array([True, False, True, False, False]),
b: ivy.array([True, False, False, True, False])
}
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1,0,0,1,0]), b=ivy.array([1,0,1,0,0]))
>>> y = ivy.Container(a=ivy.array([0,0,1,1,0]), b=ivy.array([1,0,1,1,0]))
>>> z = ivy.Container.static_logical_xor(x, y)
>>> print(z)
{
a: ivy.array([True, False, True, False, False]),
b: ivy.array([False, False, False, True, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"logical_xor",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def logical_xor(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.logical_xor. This
method simply wraps the function, and so the docstring for
ivy.logical_xor also applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a boolean data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a boolean data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1,0,0,1,0]), b=ivy.array([1,0,1,0,0]))
>>> y = ivy.Container(a=ivy.array([0,0,1,1,0]), b=ivy.array([1,0,1,1,0]))
>>> z = x.logical_xor(y)
>>> print(z)
{
a: ivy.array([True, False, True, False, False]),
b: ivy.array([False, False, False, True, False])
}
"""
return self._static_logical_xor(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_multiply(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.multiply. This method
simply wraps the function, and so the docstring for ivy.multiply also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :code:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([15., 4.5, 6.5]),\
b=ivy.array([3.2, 5., 7.5]))
>>> x2 = ivy.Container(a=ivy.array([1.7, 2.8, 3.]),\
b=ivy.array([5.6, 1.2, 4.2]))
>>> y =ivy.Container.static_multiply(x1, x2)
>>> print(y)
{
a: ivy.array([25.5, 12.6, 19.5]),
b: ivy.array([17.9, 6., 31.5])
}
"""
return ContainerBase.cont_multi_map_in_function(
"multiply",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def multiply(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.multiply. This method
simply wraps the function, and so the docstring for ivy.multiply also
applies to this method with minimal changes.
Parameters
----------
self
first input array or container. Should have a numeric data type.
x2
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a nuneric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise products.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :code:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([15., 4.5, 6.5]),\
b=ivy.array([3.2, 5., 7.5]))
>>> x2 = ivy.Container(a=ivy.array([1.7, 2.8, 3.]),\
b=ivy.array([5.6, 1.2, 4.2]))
>>> y = ivy.Container.multiply(x1, x2)
>>> print(y)
{
a: ivy.array([25.5, 12.6, 19.5]),
b: ivy.array([17.9, 6., 31.5])
}
With mixed :code:`ivy.Container` and :code:`ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([6.2, 4.8, 2.3]),\
b=ivy.array([5., 1.7, 0.1]))
>>> x2 = ivy.array([8.3, 3.2, 6.5])
>>> y = ivy.Container.multiply(x1, x2)
>>> print(y)
{
a: ivy.array([51.5, 15.4, 14.9]),
b: ivy.array([41.5, 5.44, 0.65])
}
"""
return self._static_multiply(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_negative(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.negative. This method
simply wraps the function, and so the docstring for ivy.negative also
applies to this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned container must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., -5.]))
>>> y = ivy.Container.static_negative(x)
>>> print(y)
{
a: ivy.array([-0., -1., -2.]),
b: ivy.array([-3., -4., 5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"negative",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def negative(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.negative. This method
simply wraps the function, and so the docstring for ivy.negative also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned container must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., -5.]))
>>> y = x.negative()
>>> print(y)
{
a: ivy.array([-0., -1., -2.]),
b: ivy.array([-3., -4., 5.])
}
"""
return self._static_negative(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_not_equal(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.not_equal. This method
simply wraps the function, and so the docstring for ivy.not_equal also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. May have any data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
May have any data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12, 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.Container(a=ivy.array([12, 2.3, 3]), b=ivy.array([2.4, 3., 2.]))
>>> y = ivy.Container.static_not_equal(x1, x2)
>>> print(y)
{
a: ivy.array([False, True, True]),
b: ivy.array([True, True, True])
}
"""
return ContainerBase.cont_multi_map_in_function(
"not_equal",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def not_equal(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.not_equal. This method
simply wraps the function, and so the docstring for ivy.not_equal also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. May have any data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
May have any data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type of ``bool``.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12, 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.Container(a=ivy.array([12, 2.3, 3]), b=ivy.array([2.4, 3., 2.]))
>>> y = x1.not_equal(x2)
>>> print(y)
{
a: ivy.array([False, True, True]),
b: ivy.array([True, True, True])
}
With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9]))
>>> x2 = ivy.array([3., 1., 0.9])
>>> y = x1.not_equal(x2)
>>> print(y)
{
a: ivy.array([True, True, True]),
b: ivy.array([False, False, False])
}
"""
return self._static_not_equal(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_positive(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.positive. This method
simply wraps the function, and so the docstring for ivy.positive also
applies to this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned container must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., -5.]))
>>> y = ivy.Container.static_positive(x)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., -5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"positive",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def positive(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.positive. This method
simply wraps the function, and so the docstring for ivy.positive also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned container must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., -5.]))
>>> y = ivy.positive(x)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([3., 4., -5.])
}
"""
return self._static_positive(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_pow(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[int, float, ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.pow. This method simply
wraps the function, and so the docstring for ivy.pow also applies to
this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3]))
>>> y = ivy.Container.static_pow(x, 2)
>>> print(y)
{
a: ivy.array([0, 1]),
b: ivy.array([4, 9])
}
"""
return ContainerBase.cont_multi_map_in_function(
"pow",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def pow(
self: ivy.Container,
x2: Union[int, float, ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.pow. This method simply
wraps the function, and so the docstring for ivy.pow also applies to
this method with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have a data type determined by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3]))
>>> y = x.pow(3)
>>> print(y)
{
a:ivy.array([0,1]),
b:ivy.array([8,27])
}
"""
return self._static_pow(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def static_real(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.real. This method simply
wraps the function, and so the docstring for ivy.real also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``out_i``
if ``x_i`` is real number part only else ``real number part``,
if it contains real and complex part both.
The returned array should have a data type of ``float``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1+5j, 0-0j, 1.23j]),
... b=ivy.array([7.9, 0.31+3.3j, -4.2-5.9j]))
>>> z = ivy.Container.static_real(x)
>>> print(z)
{
a: ivy.array([-1., 0., 0.]),
b: ivy.array([7.9, 0.31, -4.2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"real",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def real(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.real. This method
simply wraps the function, and so the docstring for ivy.real also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result.
An element ``out_i`` is ``self_i`` if ``self_i`` is real number
else ``took real number part only`` if ``self_i``
contains real number and complex number both.
The returned array should have a data type of ``float``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1j, 0.335+2.345j, 1.23+7j]),\
b=ivy.array([0.0, 1.2+3.3j, 1+0j]))
>>> x.real()
{
a: ivy.array([0., 0.335, 1.23]),
b: ivy.array([0.0, 1.2, 1.])
}
"""
return self.static_real(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_remainder(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
modulus: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.remainder. This method
simply wraps the function, and so the docstring for ivy.remainder also
applies to this method with minimal changes.
Parameters
----------
x1
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
modulus
whether to compute the modulus instead of the remainder.
Default is ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have the same sign as the respective element ``x2_i``.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.]))
>>> x2 = ivy.Container(a=ivy.array([1., 3., 4.]), b=ivy.array([1., 3., 3.]))
>>> y = ivy.Container.static_remainder(x1, x2)
>>> print(y)
{
a: ivy.array([0., 0., 1.]),
b: ivy.array([0., 2., 1.])
}
With mixed :class:`ivy.Container` and `ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.]))
>>> x2 = ivy.array([1., 2., 3.])
>>> y = ivy.Container.static_remainder(x1, x2)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([0., 0., 1.])
}
With mixed :class:`ivy.Container` and `ivy.NativeArray` inputs:
>>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.]))
>>> x2 = ivy.native_array([1., 2., 3.])
>>> y = ivy.Container.static_remainder(x1, x2)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([0., 0., 1.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"remainder",
x1,
x2,
modulus=modulus,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def remainder(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
modulus: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.remainder. This method
simply wraps the function, and so the docstring for ivy.remainder also
applies to this method with minimal changes.
Parameters
----------
self
input array or container. Should have a real-valued data type.
x2
input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
modulus
whether to compute the modulus instead of the remainder.
Default is ``True``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results. The returned container
must have the same sign as the respective element ``x2_i``.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.]))
>>> x2 = ivy.Container(a=ivy.array([1., 3., 4.]), b=ivy.array([1., 3., 3.]))
>>> y = x1.remainder(x2)
>>> print(y)
{
a: ivy.array([0., 0., 1.]),
b: ivy.array([0., 2., 1.])
}
With mixed :class:`ivy.Container` and `ivy.Array` inputs:
>>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.]))
>>> x2 = ivy.array([1., 2., 3.])
>>> y = x1.remainder(x2)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([0., 0., 1.])
}
With mixed :class:`ivy.Container` and `ivy.NativeArray` inputs:
>>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.]))
>>> x2 = ivy.native_array([1., 2., 3.])
>>> y = x1.remainder(x2)
>>> print(y)
{
a: ivy.array([0., 1., 2.]),
b: ivy.array([0., 0., 1.])
}
"""
return self._static_remainder(
self,
x2,
modulus=modulus,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_round(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
decimals: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.round. This method simply
wraps thevfunction, and so the docstring for ivy.round also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
decimals
number of decimal places to round to. Default is ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the rounded result for each element in ``x``.
The returned container must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([4.20, 8.6, 6.90, 0.0]),
... b=ivy.array([-300.9, -527.3, 4.5]))
>>> y = ivy.Container.static_round(x)
>>> print(y)
{
a: ivy.array([4., 9., 7., 0.]),
b: ivy.array([-301., -527., 4.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"round",
x,
decimals=decimals,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def round(
self: ivy.Container,
*,
decimals: Union[int, ivy.Container] = 0,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.round. This method
simply wraps the function, and so the docstring for ivy.round also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
decimals
number of decimal places to round to. Default is ``0``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the rounded result for each element in ``self``.
The returned container must have the same data type as ``self``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([4.20, 8.6, 6.90, 0.0]),
... b=ivy.array([-300.9, -527.3, 4.5]))
>>> y = x.round()
>>> print(y)
{
a: ivy.array([4., 9., 7., 0.]),
b: ivy.array([-301., -527., 4.])
}
"""
return self._static_round(
self,
decimals=decimals,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sign(
x: Union[float, ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
np_variant: Optional[Union[bool, ivy.Container]] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sign. This method simply
wraps the function, and so the docstring for ivy.sign also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``x``.
The returned container must have the same data type as ``x``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, -1., 6.6]),
... b=ivy.array([-14.2, 8.3, 0.1, -0]))
>>> y = ivy.Container.static_sign(x)
>>> print(y)
{
a: ivy.array([0., -1., 1.]),
b: ivy.array([-1., 1., 1., 0.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sign",
x,
np_variant=np_variant,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sign(
self: ivy.Container,
*,
np_variant: Optional[Union[bool, ivy.Container]] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sign. This method
simply wraps the function, and so the docstring for ivy.sign also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the evaluated result for each element in ``self``.
The returned container must have the same data type as ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-6.7, 2.4, -8.5]),
... b=ivy.array([1.5, -0.3, 0]),
... c=ivy.array([-4.7, -5.4, 7.5]))
>>> y = x.sign()
>>> print(y)
{
a: ivy.array([-1., 1., -1.]),
b: ivy.array([1., -1., 0.]),
c: ivy.array([-1., -1., 1.])
}
"""
return self._static_sign(
self,
np_variant=np_variant,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sin(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sin. This method simply
wraps the function, and so the docstring for ivy.sin also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements are each expressed in radians.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the sine of each element in ``x``. The returned
container must have a floating-point data type determined by
:ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1., -2., -3.]),
... b=ivy.array([4., 5., 6.]))
>>> y = ivy.Container.static_sin(x)
>>> print(y)
{
a: ivy.array([-0.841, -0.909, -0.141]),
b: ivy.array([-0.757, -0.959, -0.279])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sin",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sin(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sin. This method simply
wraps the function, and so the docstring for ivy.sin also applies to
this method with minimal changes.
Parameters
----------
self
input container whose elements are each expressed in radians.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the sine of each element in ``self``.
The returned container must have a floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1., 2., 3.]),
... b=ivy.array([-4., -5., -6.]))
>>> y = x.sin()
>>> print(y)
{
a: ivy.array([0.841, 0.909, 0.141]),
b: ivy.array([0.757, 0.959, 0.279])
}
"""
return self._static_sin(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sinh(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sinh. This method simply
wraps the function, and so the docstring for ivy.sinh also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements each represent a hyperbolic angle.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an container containing the hyperbolic sine of each element in ``x``.
The returned container must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1, 0.23, 1.12]), b=ivy.array([1, -2, 0.76]))
>>> y = ivy.Container.static_sinh(x)
>>> print(y)
{
a: ivy.array([-1.18, 0.232, 1.37]),
b: ivy.array([1.18, -3.63, 0.835])
}
>>> x = ivy.Container(a=ivy.array([-3, 0.34, 2.]),
... b=ivy.array([0.67, -0.98, -3]))
>>> y = ivy.Container(a=ivy.zeros(1), b=ivy.zeros(1))
>>> ivy.Container.static_sinh(x, out=y)
>>> print(y)
{
a: ivy.array([-10., 0.347, 3.63]),
b: ivy.array([0.721, -1.14, -10.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sinh",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sinh(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sinh. This method
simply wraps the function, and so the docstring for ivy.sinh also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements each represent a hyperbolic angle.
Should have a floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
an container containing the hyperbolic sine of each element in ``self``.
The returned container must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1, 0.23, 1.12]), b=ivy.array([1, -2, 0.76]))
>>> y = x.sinh()
>>> print(y)
{
a: ivy.array([-1.18, 0.232, 1.37]),
b: ivy.array([1.18, -3.63, 0.835])
}
>>> x = ivy.Container(a=ivy.array([-3, 0.34, 2.]),
... b=ivy.array([0.67, -0.98, -3]))
>>> y = ivy.Container(a=ivy.zeros(3), b=ivy.zeros(3))
>>> x.sinh(out=y)
>>> print(y)
{
a: ivy.array([-10., 0.347, 3.63]),
b: ivy.array([0.721, -1.14, -10.])
}
"""
return self._static_sinh(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_square(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.square. This method
simply wraps the function, and so the docstring for ivy.square also
applies to this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the square of each element in ``x``.
The returned container must have a real-valued floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3]))
>>> y = ivy.Container.static_square(x)
>>> print(y)
{
a:ivy.array([0,1]),
b:ivy.array([4,9])
}
"""
return ContainerBase.cont_multi_map_in_function(
"square",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def square(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.square. This method
simply wraps the function, and so the docstring for ivy.square also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the square of each element in ``self``.
The returned container must have a real-valued floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3]))
>>> y = x.square()
>>> print(y)
{
a:ivy.array([0,1]),
b:ivy.array([4,9])
}
"""
return self._static_square(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_sqrt(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.sqrt. This method simply
wraps the function, and so the docstring for ivy.sqrt also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the square root of each element in ``x``.
The returned container must have a real-valued floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
with :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 100., 27.]),
... b=ivy.native_array([93., 54., 25.]))
>>> y = ivy.Container.static_sqrt(x)
>>> print(y)
{
a: ivy.array([0., 10., 5.2]),
b: ivy.array([9.64, 7.35, 5.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"sqrt",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def sqrt(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.sqrt. This method
simply wraps the function, and so the docstring for ivy.sqrt also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the square root of each element in
``self``. The returned container must have a real-valued
floating-point data type determined by :ref:`type-promotion`.
Examples
--------
with :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([0., 100., 27.]),
... b=ivy.native_array([93., 54., 25.]))
>>> y = x.sqrt()
>>> print(y)
{
a: ivy.array([0., 10., 5.2]),
b: ivy.array([9.64, 7.35, 5.])
}
"""
return self._static_sqrt(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_subtract(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
alpha: Optional[Union[int, float, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.subtract. This method
simply wraps the function, and so the docstring for ivy.subtract also
applies to this method with minimal changes.
Parameters
----------
x1
first input array or container. Should have a numeric data type.
x2
second input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`). Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
alpha
optional scalar multiplier for ``x2``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise sums.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 1, 3]),
... b=ivy.array([1, -1, 0]))
>>> z = ivy.Container.static_subtract(x, y)
>>> print(z)
{
a: ivy.array([-3, 1, 0]),
b: ivy.array([1, 4, 4])
}
>>> z = ivy.Container.static_subtract(x, y, alpha=3)
>>> print(z)
{
a: ivy.array([-11, -1, -6]),
b: ivy.array([-1, 6, 4])
}
"""
return ContainerBase.cont_multi_map_in_function(
"subtract",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
alpha=alpha,
out=out,
)
def subtract(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
alpha: Optional[Union[int, float, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.subtract. This method
simply wraps the function, and so the docstring for ivy.subtract also
applies to this method with minimal changes.
Parameters
----------
self
first input array or container. Should have a numeric data type.
x2
second input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`). Should have a numeric data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
alpha
optional scalar multiplier for ``x2``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise sums.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2, 3]),
... b=ivy.array([2, 3, 4]))
>>> y = ivy.Container(a=ivy.array([4, 1, 3]),
... b=ivy.array([1, -1, 0]))
>>> z = x.subtract(y)
>>> print(z)
{
a: ivy.array([-3, 1, 0]),
b: ivy.array([1, 4, 4])
}
>>> z = x.subtract(y, alpha=3)
>>> print(z)
{
a: ivy.array([-11, -1, -6]),
b: ivy.array([-1, 6, 4])
}
"""
return self._static_subtract(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
alpha=alpha,
out=out,
)
@staticmethod
def _static_tan(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.tan. This method simply
wraps the function, and so the docstring for ivy.tan also applies to
this method with minimal changes.
Parameters
----------
x
input array whose elements are expressed in radians. Should have a
floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
an array containing the tangent of each element in ``x``.
The return must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_tan(x)
>>> print(y)
{
a: ivy.array([0., 1.56, -2.19]),
b: ivy.array([-0.143, 1.16, -3.38])
}
"""
return ContainerBase.cont_multi_map_in_function(
"tan",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def tan(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.tan. This method simply
wraps the function, and so the docstring for ivy.tan also applies to
this method with minimal changes.
Parameters
----------
self
input array whose elements are expressed in radians. Should have a
floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
a container containing the tangent of each element in ``self``.
The return must have a floating-point data type determined
by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = x.tan()
>>> print(y)
{
a:ivy.array([0., 1.56, -2.19]),
b:ivy.array([-0.143, 1.16, -3.38])
}
"""
return self._static_tan(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_tanh(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.tanh. This method simply
wraps the function, and so the docstring for ivy.tanh also applies to
this method with minimal changes.
Parameters
----------
x
input container whose elements each represent a hyperbolic angle.
Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the hyperbolic tangent of each element in ``x``.
The returned array must have a real-valued floating-point data type
determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.]))
>>> y = ivy.Container.static_tanh(x)
>>> print(y)
{
a: ivy.array([0., 0.76, 0.96]),
b: ivy.array([0.995, 0.999, 0.9999])
}
"""
return ContainerBase.cont_multi_map_in_function(
"tanh",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
def tanh(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
complex_mode: Literal["split", "magnitude", "jax"] = "jax",
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.tanh. This method
simply wraps the function, and so the docstring for ivy.tanh also
applies to this method with minimal changes.
Parameters
----------
self
input container whose elements each represent a hyperbolic angle.
Should have a real-valued floating-point data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
complex_mode
optional specifier for how to handle complex data types. See
``ivy.func_wrapper.handle_complex_input`` for more detail.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the hyperbolic tangent of each element in
``self``. The returned container must have a real-valued floating-point
data type determined by :ref:`type-promotion`.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 2.]),
... b=ivy.array([3., 4., 5.]))
>>> y = x.tanh()
>>> print(y)
{
a:ivy.array([0., 0.762, 0.964]),
b:ivy.array([0.995, 0.999, 1.])
}
"""
return self._static_tanh(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
complex_mode=complex_mode,
out=out,
)
@staticmethod
def _static_trunc(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.trunc. This method simply
wraps the function, and so the docstring for ivy.trunc also applies to
this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the rounded result for each element in ``x``.
The returned container must have the same data type as ``x``.
Examples
--------
With :class:`ivy.Container` input:
>>> x = ivy.Container(a=ivy.array([-0.25, 4, 1.3]),
... b=ivy.array([12, -3.5, 1.234]))
>>> y = ivy.Container.static_trunc(x)
>>> print(y)
{
a: ivy.array([-0., 4., 1.]),
b: ivy.array([12., -3., 1.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"trunc",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def trunc(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.trunc. This method
simply wraps the function, and so the docstring for ivy.trunc also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the rounded result for each element in ``self``.
The returned container must have the same data type as ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.25, 4, 1.3]),
... b=ivy.array([12, -3.5, 1.234]))
>>> y = x.trunc()
>>> print(y)
{
a: ivy.array([-0., 4., 1.]),
b: ivy.array([12., -3., 1.])
}
"""
return self._static_trunc(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_erf(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.erf. This method simply
wraps the function, and so the docstring for ivy.erf also applies to
this method with minimal changes.
Parameters
----------
x
input container to compute exponential for.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the Gauss error of ``x``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.25, 4, 1.3]),
... b=ivy.array([12, -3.5, 1.234]))
>>> y = ivy.Container.static_erf(x)
>>> print(y)
{
a: ivy.array([-0.27632612, 1., 0.934008]),
b: ivy.array([1., -0.99999928, 0.91903949])
}
"""
return ContainerBase.cont_multi_map_in_function(
"erf",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def erf(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.erf. This method simply
wraps thefunction, and so the docstring for ivy.erf also applies to
this method with minimal changes.
Parameters
----------
self
input container to compute exponential for.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the Gauss error of ``self``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-0.25, 4, 1.3]),
... b=ivy.array([12, -3.5, 1.234]))
>>> y = x.erf()
>>> print(y)
{
a: ivy.array([-0.27632612, 1., 0.934008]),
b: ivy.array([1., -0.99999928, 0.91903949])
}
"""
return self._static_erf(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_minimum(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
use_where: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.minimum. This method
simply wraps the function, and so the docstring for ivy.minimum also
applies to this method with minimal changes.
Parameters
----------
x1
Input array containing elements to minimum threshold.
x2
The other container or number to compute the minimum against.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
use_where
Whether to use :func:`where` to calculate the minimum. If ``False``, the
minimum is calculated using the ``(x + y - |x - y|)/2`` formula. Default is
``True``.
out
optional output container, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
Container object with all sub-arrays having the minimum values computed.
Examples
--------
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 3, 1]),
... b=ivy.array([2, 8, 5]))
>>> y = ivy.Container(a=ivy.array([1, 5, 6]),
... b=ivy.array([5, 9, 7]))
>>> z = ivy.Container.static_minimum(x, y)
>>> print(z)
{
a: ivy.array([1, 3, 1]),
b: ivy.array([2, 8, 5])
}
"""
return ContainerBase.cont_multi_map_in_function(
"minimum",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
use_where=use_where,
out=out,
)
def minimum(
self: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
use_where: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.minimum. This method
simply wraps the function, and so the docstring for ivy.minimum also
applies to this method with minimal changes.
Parameters
----------
self
Input array containing elements to minimum threshold.
x2
The other container or number to compute the minimum against.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
use_where
Whether to use :func:`where` to calculate the minimum. If ``False``, the
minimum is calculated using the ``(x + y - |x - y|)/2`` formula. Default is
``True``.
out
optional output container, for writing the result to. It must have a
shape that the inputs broadcast to.
Returns
-------
Container object with all sub-arrays having the minimum values computed.
Examples
--------
With multiple :class:`ivy.Container` inputs:
>>> x = ivy.Container(a=ivy.array([1, 3, 1]),
... b=ivy.array([2, 8, 5]))
>>> y = ivy.Container(a=ivy.array([1, 5, 6]),
... b=ivy.array([5, 9, 7]))
>>> z = x.minimum(y)
>>> print(z)
{
a: ivy.array([1, 3, 1]),
b: ivy.array([2, 8, 5])
}
"""
return self._static_minimum(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
use_where=use_where,
out=out,
)
@staticmethod
def _static_maximum(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
use_where: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.maximum. This method
simply wraps the function, and so the docstring for ivy.maximum also
applies to this method with minimal changes.
Parameters
----------
x1
Input array containing elements to maximum threshold.
x2
Tensor containing maximum values, must be broadcastable to x1.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
use_where
Whether to use :func:`where` to calculate the maximum. If ``False``, the
maximum is calculated using the ``(x + y + |x - y|)/2`` formula. Default is
``True``.
out
optional output container, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
A container with the elements of x1, but clipped to not be lower than the x2
values.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([[1, 3], [2, 4], [3, 7]])
>>> y = ivy.Container(a=ivy.array([1, 0,]),
... b=ivy.array([-5, 9]))
>>> z = ivy.Container.static_maximum(x, y)
>>> print(z)
{
a: ivy.array([[1, 3],
[2, 4],
[3, 7]]),
b: ivy.array([[1, 9],
[2, 9],
[3, 9]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"maximum",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
use_where=use_where,
out=out,
)
def maximum(
self: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
use_where: Union[bool, ivy.Container] = True,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.maximum. This method
simply wraps the function, and so the docstring for ivy.maximum also
applies to this method with minimal changes.
Parameters
----------
self
Input array containing elements to maximum threshold.
x2
Tensor containing maximum values, must be broadcastable to x1.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
use_where
Whether to use :func:`where` to calculate the maximum. If ``False``, the
maximum is calculated using the ``(x + y + |x - y|)/2`` formula. Default is
``True``.
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
An container with the elements of x1, but clipped to not be
lower than the x2 values.
Examples
--------
With one :class:`ivy.Container` input:
>>> x = ivy.array([[1, 3], [2, 4], [3, 7]])
>>> y = ivy.Container(a=ivy.array([1, 0,]),
... b=ivy.array([-5, 9]))
>>> z = x.maximum(y)
>>> print(z)
{
a: ivy.array([[1, 3],
[2, 4],
[3, 7]]),
b: ivy.array([[1, 9],
[2, 9],
[3, 9]])
}
"""
return self._static_maximum(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
use_where=use_where,
out=out,
)
@staticmethod
def _static_reciprocal(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.reciprocal. This method
simply wraps the function, and so the docstring for ivy.reciprocal also
applies to this method with minimal changes.
Parameters
----------
x
input container.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the element-wise recirpocal of ``x``
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2]), b=ivy.array([4, 5]))
>>> y = ivy.Container.static_reciprocal(x)
>>> print(y)
{
a: ivy.array([1, 0.5]),
b: ivy.array([0.25, 0.2])
}
"""
return ContainerBase.cont_multi_map_in_function(
"reciprocal",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def reciprocal(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.reciprocal. This method
simply wraps the function, and so the docstring for ivy.reciprocal also
applies to this method with minimal changes.
Parameters
----------
self
input container to compute the element-wise reciprocal for.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with the element-wise recirpocal of ``x``
Examples
--------
>>> x = ivy.Container(a=ivy.array([1, 2]), b=ivy.array([4, 5]))
>>> y = x.reciprocal()
>>> print(y)
{
a: ivy.array([1, 0.5]),
b: ivy.array([0.25, 0.2])
}
"""
return self._static_reciprocal(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_deg2rad(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.deg2rad. This method
simply wraps the function, and so the docstring for ivy.deg2rad also
applies to this method with minimal changes.
Parameters
----------
x
input container. to be converted from degrees to radians.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with each element in ``x`` converted from degrees to radians.
Examples
--------
>>> x=ivy.Container(a=ivy.array([0,90,180,270,360]),
... b=ivy.native_array([0,-1.5,-50,ivy.nan]))
>>> y=ivy.Container.static_deg2rad(x)
>>> print(y)
{
a: ivy.array([0., 1.57, 3.14, 4.71, 6.28]),
b: ivy.array([0., -0.0262, -0.873, nan])
}
"""
return ContainerBase.cont_multi_map_in_function(
"deg2rad",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def deg2rad(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.deg2rad. This method
simply wraps the function, and so the docstring for ivy.deg2rad also
applies to this method with minimal changes.
Parameters
----------
self
input container. to be converted from degrees to radians.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with each element in ``x`` converted from degrees to radians.
Examples
--------
With :class:`ivy.Container` input:
>>> x=ivy.Container(a=ivy.array([0., 0.351, -0.881, ivy.nan]),
... b=ivy.native_array([0,-1.5,-50,ivy.nan]))
>>> y=x.deg2rad()
>>> print(y)
{
a: ivy.array([0., 0.00613, -0.0154, nan]),
b: ivy.array([0., -0.0262, -0.873, nan])
}
"""
return self._static_deg2rad(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_rad2deg(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.rad2deg. This method
simply wraps the function, and so the docstring for ivy.rad2deg also
applies to this method with minimal changes.
Parameters
----------
x
input container. to be converted from radians to degrees.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with each element in ``x`` converted from radians to degrees.
Examples
--------
>>> x=ivy.Container(a=ivy.array([0,90,180,270,360]),
... b=ivy.native_array([0,-1.5,-50,ivy.nan]))
>>> y=ivy.Container.static_rad2deg(x)
>>> print(y)
{
a: ivy.array([0., 5160., 10300., 15500., 20600.]),
b: ivy.array([0., -85.9, -2860., nan])
}
"""
return ContainerBase.cont_multi_map_in_function(
"rad2deg",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def rad2deg(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.rad2deg. This method
simply wraps the function, and so the docstring for ivy.rad2deg also
applies to this method with minimal changes.
Parameters
----------
self
input container. to be converted from radians to degrees.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container with each element in ``x`` converted from radians to degrees.
Examples
--------
With :class:`ivy.Container` input:
>>> x=ivy.Container(a=ivy.array([0., 0.351, -0.881, ivy.nan]),
... b=ivy.native_array([0,-1.5,-50,7.2]))
>>> y=x.rad2deg()
>>> print(y)
{
a: ivy.array([0., 20.1, -50.5, nan]),
b: ivy.array([0., -85.9, -2860., 413.])
}
"""
return self._static_rad2deg(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_trunc_divide(
x1: Union[ivy.Array, ivy.NativeArray, ivy.Container],
x2: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.trunc_divide. This method
simply wraps the function, and so the docstring for ivy.trunc_divide
also applies to this method with minimal changes.
Parameters
----------
x1
dividend input array or container. Should have a real-valued data type.
x2
divisor input array or container. Must be compatible with ``x1``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 9.]))
>>> x2 = ivy.Container(a=ivy.array([1., 2.3, -3]), b=ivy.array([2.4, 3., -2.]))
>>> y = ivy.Container.static_divide(x1, x2)
>>> print(y)
{
a: ivy.array([12., 1., -2.]),
b: ivy.array([1., 0., -4.])
}
"""
return ContainerBase.cont_multi_map_in_function(
"trunc_divide",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def trunc_divide(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.trunc_divide. This
method simply wraps the function, and so the docstring for
ivy.trunc_divide also applies to this method with minimal changes.
Parameters
----------
self
dividend input array or container. Should have a real-valued
data type.
x2
divisor input array or container. Must be compatible with ``self``
(see :ref:`broadcasting`).
Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the element-wise results.
The returned container must have a data type determined
by :ref:`type-promotion`.
Examples
--------
With :class:`ivy.Container` inputs:
>>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 9.]))
>>> x2 = ivy.Container(a=ivy.array([1., 2.3, -3]), b=ivy.array([2.4, 3., -2.]))
>>> y = x1.trunc_divide(x2)
>>> print(y)
{
a: ivy.array([12., 1., -2.]),
b: ivy.array([1., 0., -4.])
}
"""
return self._static_trunc_divide(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_isreal(
x: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.isreal. This method
simply wraps the function, and so the docstring for ivy.isreal also
applies to this method with minimal changes.
Parameters
----------
x
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``x_i`` is real number and ``False`` otherwise.
The returned array should have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1+5j, 0-0j, 1.23j]),
... b=ivy.array([7.9, 3.3j, -4.2-5.9j]))
>>> z = ivy.Container.static_isreal(x)
>>> print(z)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, False])
}
"""
return ContainerBase.cont_multi_map_in_function(
"isreal",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def isreal(
self: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.isreal. This method
simply wraps the function, and so the docstring for ivy.isreal also
applies to this method with minimal changes.
Parameters
----------
self
input container. Should have a real-valued data type.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
a container containing the test result. An element ``out_i`` is ``True``
if ``self_i`` is real number and ``False`` otherwise.
The returned array should have a data type of ``bool``.
Examples
--------
>>> x = ivy.Container(a=ivy.array([-1j, -np.inf, 1.23+7j]),\
b=ivy.array([0.0, 3.3j, 1+0j]))
>>> y = x.isreal()
>>> print(y)
{
a: ivy.array([False, True, False]),
b: ivy.array([True, False, True])
}
"""
return self._static_isreal(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_trapz(
y: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
x: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
dx: Union[float, ivy.Container] = 1.0,
axis: Union[int, ivy.Container] = -1,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.trapz. This method simply
wraps the function, and so the docstring for ivy.trapz also applies to
this method with minimal changes.
Parameters
----------
y
The container whose arrays should be integrated.
x
The sample points corresponding to the input array values.
If x is None, the sample points are assumed to be evenly spaced
dx apart. The default is None.
dx
The spacing between sample points when x is None. The default is 1.
axis
The axis along which to integrate.
out
optional output container, for writing the result to.
Returns
-------
ret
container including definite integrals of n-dimensional arrays
as approximated along a single axis by the trapezoidal rule.
Examples
--------
With one :class:`ivy.Container` input:
>>> y = ivy.Container(a=ivy.array((1, 2, 3)), b=ivy.array((1, 5, 10)))
>>> ivy.Container.static_trapz(y)
{
a: 4.0
b: 10.5
}
"""
return ContainerBase.cont_multi_map_in_function(
"trapz",
y,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
x=x,
dx=dx,
axis=axis,
out=out,
)
def trapz(
self: ivy.Container,
/,
*,
x: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None,
dx: Union[float, ivy.Container] = 1.0,
axis: Union[int, ivy.Container] = -1,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.trapz. This method
simply wraps the function, and so the docstring for ivy.trapz also
applies to this method with minimal changes.
Parameters
----------
self
The container whose arrays should be integrated.
x
The sample points corresponding to the input array values.
If x is None, the sample points are assumed to be evenly spaced
dx apart. The default is None.
dx
The spacing between sample points when x is None. The default is 1.
axis
The axis along which to integrate.
out
optional output container, for writing the result to.
Returns
-------
ret
container including definite integrals of n-dimensional arrays
as approximated along a single axis by the trapezoidal rule.
Examples
--------
With one :class:`ivy.Container` input:
>>> y = ivy.Container(a=ivy.array((1, 2, 3)), b=ivy.array((1, 5, 10)))
>>> y.trapz()
{
a: 4.0
b: 10.5
}
"""
return self._static_trapz(self, x=x, dx=dx, axis=axis, out=out)
@staticmethod
def _static_lcm(
x1: Union[ivy.Container, ivy.Array, ivy.NativeArray],
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.lcm. This method simply
wraps the function, and so the docstring for ivy.lcm also applies to
this method with minimal changes.
Parameters
----------
x1
first input container.
x2
second input container.
out
optional output container, for writing the result to.
Returns
-------
ret
a container containing the element-wise least common multiples
of the arrays contained in x1 and x2.
Examples
--------
>>> x1=ivy.Container(a=ivy.array([2, 3, 4]),
... b=ivy.array([6, 54, 62, 10]))
>>> x2=ivy.Container(a=ivy.array([5, 8, 15]),
... b=ivy.array([32, 40, 25, 13]))
>>> ivy.Container.lcm(x1, x2)
{
a: ivy.array([10, 21, 60]),
b: ivy.array([96, 1080, 1550, 130])
}
"""
return ContainerBase.cont_multi_map_in_function(
"lcm",
x1,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def lcm(
self: ivy.Container,
x2: Union[ivy.Container, ivy.Array, ivy.NativeArray],
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.lcm. This method simply
wraps the function, and so the docstring for ivy.lcm also applies to
this method with minimal changes.
Parameters
----------
x1
first input container.
x2
second input container.
out
optional output container, for writing the result to.
Returns
-------
ret
a container containing the the element-wise least common multiples
of the arrays contained in x1 and x2.
Examples
--------
>>> x1=ivy.Container(a=ivy.array([2, 3, 4]),
... b=ivy.array([6, 54, 62, 10]))
>>> x2=ivy.Container(a=ivy.array([5, 8, 15]),
... b=ivy.array([32, 40, 25, 13]))
>>> x1.lcm(x2)
{
a: ivy.array([10, 24, 60]),
b: ivy.array([96, 1080, 1550, 130])
}
"""
return self._static_lcm(
self,
x2,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
| ivy/ivy/data_classes/container/elementwise.py/0 | {
"file_path": "ivy/ivy/data_classes/container/elementwise.py",
"repo_id": "ivy",
"token_count": 168986
} | 10 |
# global
from typing import Optional, Union, List, Dict
# local
import ivy
from ivy.data_classes.container.base import ContainerBase
class _ContainerWithRandomExperimental(ContainerBase):
# dirichlet
@staticmethod
def static_dirichlet(
alpha: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
size: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.dirichlet. This method
simply wraps the function, and so the docstring for ivy.dirichlet also
applies to this method with minimal changes.
Parameters
----------
alpha
Sequence of floats of length k
size
optional container including ints or tuple of ints,
Output shape for the arrays in the input container.
dtype
output container array data type. If ``dtype`` is ``None``, the output data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output container, for writing the result to.
Returns
-------
ret
container including the drawn samples.
Examples
--------
>>> alpha = ivy.Container(a=ivy.array([7,6,5]), \
b=ivy.array([8,9,4]))
>>> size = ivy.Container(a=3, b=5)
>>> ivy.Container.static_dirichlet(alpha, size)
{
a: ivy.array(
[[0.43643127, 0.32325703, 0.24031169],
[0.34251311, 0.31692529, 0.3405616 ],
[0.5319725 , 0.22458365, 0.24344385]]
),
b: ivy.array(
[[0.26588406, 0.61075421, 0.12336174],
[0.51142915, 0.25041268, 0.23815817],
[0.64042903, 0.25763214, 0.10193883],
[0.31624692, 0.46567987, 0.21807321],
[0.37677699, 0.39914594, 0.22407707]]
)
}
"""
return ContainerBase.cont_multi_map_in_function(
"dirichlet",
alpha,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
size=size,
dtype=dtype,
out=out,
)
def dirichlet(
self: ivy.Container,
/,
*,
size: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.dirichlet. This method
simply wraps the function, and so the docstring for ivy.shuffle also
applies to this method with minimal changes.
Parameters
----------
self
Sequence of floats of length k
size
optional container including ints or tuple of ints,
Output shape for the arrays in the input container.
dtype
output container array data type. If ``dtype`` is ``None``, the output data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output container, for writing the result to.
Returns
-------
ret
container including the drawn samples.
Examples
--------
>>> alpha = ivy.Container(a=ivy.array([7,6,5]), \
b=ivy.array([8,9,4]))
>>> size = ivy.Container(a=3, b=5)
>>> alpha.dirichlet(size)
{
a: ivy.array(
[[0.43643127, 0.32325703, 0.24031169],
[0.34251311, 0.31692529, 0.3405616 ],
[0.5319725 , 0.22458365, 0.24344385]]
),
b: ivy.array(
[[0.26588406, 0.61075421, 0.12336174],
[0.51142915, 0.25041268, 0.23815817],
[0.64042903, 0.25763214, 0.10193883],
[0.31624692, 0.46567987, 0.21807321],
[0.37677699, 0.39914594, 0.22407707]]
)
}
"""
return self.static_dirichlet(
self,
size=size,
dtype=dtype,
out=out,
)
@staticmethod
def static_beta(
alpha: ivy.Container,
beta: Union[int, float, ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
device: Optional[Union[str, ivy.Container]] = None,
dtype: Optional[Union[str, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.beta. This method simply
wraps the function, and so the docstring for ivy.beta also applies to
this method with minimal changes.
Parameters
----------
x
Input array or container. Should have a numeric data type.
alpha
The alpha parameter of the distribution.
beta
The beta parameter of the distribution.
shape
The shape of the output array. Default is ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
device
The device to place the output array on. Default is ``None``.
dtype
The data type of the output array. Default is ``None``.
seed
A python integer. Used to create a random seed distribution
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container object, with values drawn from the beta distribution.
"""
return ContainerBase.cont_multi_map_in_function(
"beta",
alpha,
beta,
shape=shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def beta(
self: ivy.Container,
beta: Union[int, float, ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
device: Optional[Union[str, ivy.Container]] = None,
dtype: Optional[Union[str, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.beta. This method
simply wraps the function, and so the docstring for ivy.beta also
applies to this method with minimal changes.
Parameters
----------
self
Input container. Should have a numeric data type.
alpha
The alpha parameter of the distribution.
beta
The beta parameter of the distribution.
shape
The shape of the output array. Default is ``None``.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
device
The device to place the output array on. Default is ``None``.
dtype
The data type of the output array. Default is ``None``.
seed
A python integer. Used to create a random seed distribution
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
A container object, with values drawn from the beta distribution.
"""
return self.static_beta(
self,
beta,
shape=shape,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
@staticmethod
def static_poisson(
lam: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
fill_value: Optional[Union[float, int, ivy.Container]] = 0,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.poisson. This method
simply wraps the function, and so the docstring for ivy.poisson also
applies to this method with minimal changes.
Parameters
----------
lam
Input container with rate parameter(s) describing the poisson
distribution(s) to sample.
shape
optional container including ints or tuple of ints,
Output shape for the arrays in the input container.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output container array data type. If ``dtype`` is ``None``, the output data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution.
fill_value
if lam is negative, fill the output array with this value
on that specific dimension.
out
optional output container, for writing the result to.
Returns
-------
ret
container including the drawn samples.
Examples
--------
>>> lam = ivy.Container(a=ivy.array([7,6,5]), \
b=ivy.array([8,9,4]))
>>> shape = ivy.Container(a=(2,3), b=(1,1,3))
>>> ivy.Container.static_poisson(lam, shape=shape)
{
a: ivy.array([[5, 4, 6],
[12, 4, 5]]),
b: ivy.array([[[8, 13, 3]]])
}
"""
return ContainerBase.cont_multi_map_in_function(
"poisson",
lam,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
fill_value=fill_value,
out=out,
)
def poisson(
self: ivy.Container,
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
fill_value: Optional[Union[float, int, ivy.Container]] = 0,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.poisson. This method
simply wraps the function, and so the docstring for ivy.poisson also
applies to this method with minimal changes.
Parameters
----------
self
Input container with rate parameter(s) describing the poisson
distribution(s) to sample.
shape
optional container including ints or tuple of ints,
Output shape for the arrays in the input container.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output container array data type. If ``dtype`` is ``None``, the output data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution.
fill_value
if lam is negative, fill the output array with this value
on that specific dimension.
out
optional output container, for writing the result to.
Returns
-------
ret
container including the drawn samples.
Examples
--------
>>> lam = ivy.Container(a=ivy.array([7,6,5]), \
b=ivy.array([8,9,4]))
>>> shape = ivy.Container(a=(2,3), b=(1,1,3))
>>> lam.poisson(shape=shape)
{
a: ivy.array([[5, 4, 6],
[12, 4, 5]]),
b: ivy.array([[[8, 13, 3]]])
}
"""
return self.static_poisson(
self,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
fill_value=fill_value,
out=out,
)
@staticmethod
def static_bernoulli(
probs: ivy.Container,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
logits: Optional[
Union[float, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[Union[ivy.Array, ivy.Container]] = None,
) -> ivy.Container:
"""
Parameters
----------
probs
An N-D Array representing the probability of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution. Only one of logits or probs should be passed in
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
logits
An N-D Array representing the log-odds of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution where the probability of an event is sigmoid
(logits). Only one of logits or probs should be passed in.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
device
The device to place the output array on. Default is ``None``.
dtype
The data type of the output array. Default is ``None``.
seed
A python integer. Used to create a random seed distribution
out
optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the Bernoulli distribution
"""
return ContainerBase.cont_multi_map_in_function(
"bernoulli",
probs,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
logits=logits,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def bernoulli(
self: ivy.Container,
/,
*,
logits: Optional[
Union[float, ivy.Array, ivy.NativeArray, ivy.Container]
] = None,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice, ivy.Container]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""
Parameters
----------
self
An N-D Array representing the probability of a 1 event.
Each entry in the Array parameterizes an independent
Bernoulli distribution. Only one of logits or probs should
be passed in.
logits
An N-D Array representing the log-odds of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution where the probability of an event is
sigmoid(logits). Only one of logits or probs should be passed in.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to.
It must have a shape that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the Bernoulli distribution
"""
return self.static_bernoulli(
self,
logits=logits,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
@staticmethod
def static_gamma(
alpha: ivy.Container,
beta: Union[int, float, ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
device: Optional[Union[str, ivy.Container]] = None,
dtype: Optional[Union[str, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
):
"""ivy.Container static method variant of ivy.gamma. This method simply
wraps the function, and so the docstring for ivy.gamma also applies to
this method with minimal changes.
Parameters
----------
alpha
First parameter of the distribution.
beta
Second parameter of the distribution.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
Optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized gamma distribution with the shape of
the input Container.
"""
return ContainerBase.cont_multi_map_in_function(
"gamma",
alpha,
beta,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
def gamma(
self: ivy.Container,
beta: Union[int, float, ivy.Container, ivy.Array, ivy.NativeArray],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape, ivy.Container]] = None,
device: Optional[Union[str, ivy.Container]] = None,
dtype: Optional[Union[str, ivy.Container]] = None,
seed: Optional[Union[int, ivy.Container]] = None,
out: Optional[ivy.Container] = None,
):
"""ivy.Container method variant of ivy.gamma. This method simply wraps
the function, and so the docstring for ivy.gamma also applies to this
method with minimal changes.
Parameters
----------
self
First parameter of the distribution.
beta
Second parameter of the distribution.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
Optional output container, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Drawn samples from the parameterized gamma distribution with the shape of
the input Container.
"""
return self.static_gamma(
self,
beta,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
| ivy/ivy/data_classes/container/experimental/random.py/0 | {
"file_path": "ivy/ivy/data_classes/container/experimental/random.py",
"repo_id": "ivy",
"token_count": 11928
} | 11 |
# global
from typing import Dict, List, Optional, Union
# local
from ivy.data_classes.container.base import ContainerBase
import ivy
class _ContainerWithSet(ContainerBase):
@staticmethod
def _static_unique_all(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
axis: Optional[Union[int, ivy.Container]] = None,
by_value: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.unique_all. This method
simply wraps the function, and so the docstring for ivy.unique_all also
applies to this method with minimal changes.
Parameters
----------
x
input container.
axis
the axis to apply unique on. If None, the unique elements of the flattened
``x`` are returned.
by_value
If False, the unique elements will be sorted in the same order that they
occur in ''x''. Otherwise, they will be sorted by value.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
A container of namedtuples ``(values, indices, inverse_indices,
counts)``. The details can be found in the docstring
for ivy.unique_all.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 3. , 2. , 1. , 0.]),
... b=ivy.array([1,2,1,3,4,1,3]))
>>> y = ivy.Container.static_unique_all(x)
>>> print(y)
{
a: [
values = ivy.array([0., 1., 2., 3.]),
indices = ivy.array([0, 1, 3, 2]),
inverse_indices = ivy.array([0, 1, 3, 2, 1, 0]),
counts = ivy.array([2, 2, 1, 1])
],
b: [
values = ivy.array([1, 2, 3, 4]),
indices = ivy.array([0, 1, 3, 4]),
inverse_indices = ivy.array([0, 1, 0, 2, 3, 0, 2]),
counts = ivy.array([3, 1, 2, 1])
]
}
"""
return ContainerBase.cont_multi_map_in_function(
"unique_all",
x,
axis=axis,
by_value=by_value,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unique_all(
self: ivy.Container,
/,
*,
axis: Optional[Union[int, ivy.Container]] = None,
by_value: Union[bool, ivy.Container] = True,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.unique_all. This method
simply wraps the function, and so the docstring for ivy.unique_all also
applies to this method with minimal changes.
Parameters
----------
self
input container.
axis
the axis to apply unique on. If None, the unique elements of the flattened
``x`` are returned.
by_value
If False, the unique elements will be sorted in the same order that they
occur in ''x''. Otherwise, they will be sorted by value.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
A container of namedtuples ``(values, indices, inverse_indices,
counts)``. The details of each entry can be found in the docstring
for ivy.unique_all.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 3. , 2. , 1. , 0.]),
... b=ivy.array([1,2,1,3,4,1,3]))
>>> y = x.unique_all()
>>> print(y)
[{
a: ivy.array([0., 1., 2., 3.]),
b: ivy.array([1, 2, 3, 4])
}, {
a: ivy.array([0, 1, 3, 2]),
b: ivy.array([0, 1, 3, 4])
}, {
a: ivy.array([0, 1, 3, 2, 1, 0]),
b: ivy.array([0, 1, 0, 2, 3, 0, 2])
}, {
a: ivy.array([2, 2, 1, 1]),
b: ivy.array([3, 1, 2, 1])
}]
"""
return self._static_unique_all(
self,
axis=axis,
by_value=by_value,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def _static_unique_counts(
x: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.unique_counts. This
method simply wraps the function, and so the docstring for
ivy.unique_counts also applies to this method with minimal changes.
Parameters
----------
x
input container. If ``x`` has more than one dimension, the function must
flatten ``x`` and return the unique elements of the flattened array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
a namedtuple ``(values, counts)`` whose
- first element must have the field name ``values`` and must be an
array containing the unique elements of ``x``.
The array must have the same data type as ``x``.
- second element must have the field name ``counts`` and must be an array
containing the number of times each unique element occurs in ``x``.
The returned array must have same shape as ``values`` and must
have the default array index data type.
Examples
--------
>>> x = ivy.Container(a=ivy.array([0., 1., 3. , 2. , 1. , 0.]),
... b=ivy.array([1,2,1,3,4,1,3]))
>>> y = ivy.Container.static_unique_counts(x)
>>> print(y)
{
a:[values=ivy.array([0.,1.,2.,3.]),counts=ivy.array([2,2,1,1])],
b:[values=ivy.array([1,2,3,4]),counts=ivy.array([3,1,2,1])]
}
"""
return ContainerBase.cont_multi_map_in_function(
"unique_counts",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unique_counts(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.unique_counts. This
method simply wraps the function, and so the docstring for
ivy.unique_counts also applies to this method with minimal changes.
Parameters
----------
self
input container. If ``x`` has more than one dimension, the function must
flatten ``x`` and return the unique elements of the flattened array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
a namedtuple ``(values, counts)`` whose
- first element must have the field name ``values`` and must be an
array containing the unique elements of ``x``.
The array must have the same data type as ``x``.
- second element must have the field name ``counts`` and must be an array
containing the number of times each unique element occurs in ``x``.
The returned array must have same shape as ``values`` and must
have the default array index data type.
Examples
--------
With :class:`ivy.Container` instance method:
>>> x = ivy.Container(a=ivy.array([0., 1., 3. , 2. , 1. , 0.]),
... b=ivy.array([1,2,1,3,4,1,3]))
>>> y = x.unique_counts()
>>> print(y)
[{
a: ivy.array([0., 1., 2., 3.]),
b: ivy.array([1, 2, 3, 4])
}, {
a: ivy.array([2, 2, 1, 1]),
b: ivy.array([3, 1, 2, 1])
}]
"""
return self._static_unique_counts(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
@staticmethod
def _static_unique_values(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
return ContainerBase.cont_multi_map_in_function(
"unique_values",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
def unique_values(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
out: Optional[ivy.Container] = None,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.unique_values. This
method simply wraps the function and applies it on the container.
Parameters
----------
self : ivy.Container
input container
key_chains : list or dict, optional
The key-chains to apply or not apply the method to. Default is `None`.
to_apply : bool, optional
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is `True`.
prune_unapplied : bool, optional
Whether to prune key_chains for which the function was not applied.
Default is `False`.
map_sequences : bool, optional
Whether to also map method to sequences (lists, tuples).
Default is `False`.
out : ivy.Container, optional
The container to return the results in. Default is `None`.
Returns
-------
ivy.Container
The result container with the unique values for each input key-chain.
Raises
------
TypeError
If the input container is not an instance of ivy.Container.
ValueError
If the key_chains parameter is not None, and it is not a
list or a dictionary.
Example
-------
>>> x = ivy.Container(a=[1, 2, 3], b=[2, 2, 3], c=[4, 4, 4])
>>> y = x.unique_values()
>>> print(y)
{
a: ivy.array([1, 2, 3]),
b: ivy.array([2, 3]),
c: ivy.array([4])
}
>>> x = ivy.Container(a=[1, 2, 3], b=[2, 2, 3], c=[4, 4, 4])
>>> y = x.unique_values(key_chains=["a", "b"])
>>> print(y)
{
a: ivy.array([1, 2, 3]),
b: ivy.array([2, 3]),
c: [
4,
4,
4
]
}
"""
return self._static_unique_values(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
out=out,
)
@staticmethod
def _static_unique_inverse(
x: Union[ivy.Array, ivy.NativeArray, ivy.Container],
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container static method variant of ivy.unique_inverse. This
method simply wraps the function, and so the docstring for
ivy.unique_inverse also applies to this method with minimal changes.
Parameters
----------
x
input container. If ``x`` has more than one dimension, the function must
flatten ``x`` and return the unique elements of the flattened array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
a namedtuple ``(values, inverse_indices)`` whose
- first element must have the field name ``values`` and must be an array
containing the unique elements of ``x``. The array must have the same data
type as ``x``.
- second element must have the field name ``inverse_indices`` and
must be an array containing the indices of ``values`` that
reconstruct ``x``. The array must have the same shape as ``x`` and
must have the default array index data type.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4.,8.,3.,5.,9.,4.]),
... b=ivy.array([7,6,4,5,6,3,2]))
>>> y = ivy.Container.static_unique_inverse(x)
>>> print(y)
{
a:[values=ivy.array([3.,4.,5.,8.,9.]),inverse_indices=ivy.array([1,3,0,2,4,1])],
b:[values=ivy.array([2,3,4,5,6,7]),inverse_indices=ivy.array([5,4,2,3,4,1,0])]
}
"""
return ContainerBase.cont_multi_map_in_function(
"unique_inverse",
x,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
def unique_inverse(
self: ivy.Container,
/,
*,
key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None,
to_apply: Union[bool, ivy.Container] = True,
prune_unapplied: Union[bool, ivy.Container] = False,
map_sequences: Union[bool, ivy.Container] = False,
) -> ivy.Container:
"""ivy.Container instance method variant of ivy.unique_inverse. This
method simply wraps the function, and so the docstring for
ivy.unique_inverse also applies to this method with minimal changes.
Parameters
----------
self
input container. If ``x`` has more than one dimension, the function must
flatten ``x`` and return the unique elements of the flattened array.
key_chains
The key-chains to apply or not apply the method to. Default is ``None``.
to_apply
If True, the method will be applied to key_chains, otherwise key_chains
will be skipped. Default is ``True``.
prune_unapplied
Whether to prune key_chains for which the function was not applied.
Default is ``False``.
map_sequences
Whether to also map method to sequences (lists, tuples).
Default is ``False``.
Returns
-------
ret
a namedtuple ``(values, inverse_indices)`` whose
- first element must have the field name ``values`` and must be an array
containing the unique elements of ``x``. The array must have the same data
type as ``x``.
- second element must have the field name ``inverse_indices`` and
must be an array containing the indices of ``values`` that
reconstruct ``x``. The array must have the same shape as ``x`` and
must have the default array index data type.
Examples
--------
>>> x = ivy.Container(a=ivy.array([4.,8.,3.,5.,9.,4.]),
... b=ivy.array([7,6,4,5,6,3,2]))
>>> y = x.unique_inverse()
>>> print(y)
[{
a: ivy.array([3., 4., 5., 8., 9.]),
b: ivy.array([2, 3, 4, 5, 6, 7])
}, {
a: ivy.array([1, 3, 0, 2, 4, 1]),
b: ivy.array([5, 4, 2, 3, 4, 1, 0])
}]
>>> x = ivy.Container(a=ivy.array([1., 4., 3. , 5. , 3. , 7.]),
... b=ivy.array([3, 2, 6, 3, 7, 4, 9]))
>>> y = ivy.ivy.unique_inverse(x)
>>> print(y)
[{
a: ivy.array([1., 3., 4., 5., 7.]),
b: ivy.array([2, 3, 4, 6, 7, 9])
}, {
a: ivy.array([0, 2, 1, 3, 1, 4]),
b: ivy.array([1, 0, 3, 1, 4, 2, 5])
}]
"""
return self._static_unique_inverse(
self,
key_chains=key_chains,
to_apply=to_apply,
prune_unapplied=prune_unapplied,
map_sequences=map_sequences,
)
| ivy/ivy/data_classes/container/set.py/0 | {
"file_path": "ivy/ivy/data_classes/container/set.py",
"repo_id": "ivy",
"token_count": 9631
} | 12 |
from .rust_api.python_frontend.xla_core import *
from .rust_api.python_frontend.layers import *
from .rust_api.python_frontend.manipulation import *
from .rust_api.python_frontend.activations import *
from .rust_api.python_frontend.norms import *
from .rust_api.python_frontend.stateful_layers import *
# from .rust_api.python_frontend.sequential_handler import *
from .rust_api.python_frontend.general import *
from .rust_api.python_frontend.creation import *
from .rust_api.python_frontend.linear_algebra import *
from .rust_api.python_frontend.elementwise import *
from .rust_api.python_frontend.statistical import *
| ivy/ivy/engines/XLA/__init__.py/0 | {
"file_path": "ivy/ivy/engines/XLA/__init__.py",
"repo_id": "ivy",
"token_count": 202
} | 13 |
#include "xla_rs.h"
#define ASSIGN_OR_RETURN_STATUS(lhs, rexpr) \
ASSIGN_OR_RETURN_STATUS_IMPL( \
TF_STATUS_MACROS_CONCAT_NAME(_statusor, __COUNTER__), lhs, rexpr)
#define ASSIGN_OR_RETURN_STATUS_IMPL(statusor, lhs, rexpr) \
auto statusor = (rexpr); \
if (!statusor.ok()) \
return new Status(statusor.status()); \
auto lhs = std::move(statusor.value());
#define MAYBE_RETURN_STATUS(rexpr) \
MAYBE_RETURN_STATUS_IMPL(TF_STATUS_MACROS_CONCAT_NAME(_status, __COUNTER__), \
rexpr)
#define MAYBE_RETURN_STATUS_IMPL(statusor, rexpr) \
auto statusor = (rexpr); \
if (!statusor.ok()) \
return new Status(statusor);
#define BEGIN_PROTECT_OP try {
#define END_PROTECT_OP_B(builder) \
} \
catch (std::exception e) { \
return new XlaOp(builder->ReportError(tsl::errors::Internal(e.what()))); \
}
#define END_PROTECT_OP(arg) \
} \
catch (std::exception e) { \
return new XlaOp( \
arg->builder()->ReportError(tsl::errors::Internal(e.what()))); \
}
status pjrt_cpu_client_create(pjrt_client *output) {
ASSIGN_OR_RETURN_STATUS(client, xla::GetTfrtCpuClient(false));
*output = new std::shared_ptr(std::move(client));
return nullptr;
}
status pjrt_gpu_client_create(pjrt_client *output, double memory_fraction,
bool preallocate) {
xla::GpuAllocatorConfig allocator = {.memory_fraction = memory_fraction,
.preallocate = preallocate};
ASSIGN_OR_RETURN_STATUS(
client, xla::GetStreamExecutorGpuClient(false, allocator, nullptr, 0));
*output = new std::shared_ptr(std::move(client));
return nullptr;
}
status pjrt_tpu_client_create(pjrt_client *output,
int max_inflight_computations) {
ASSIGN_OR_RETURN_STATUS(client, xla::GetTpuClient(max_inflight_computations));
*output = new std::shared_ptr(std::move(client));
return nullptr;
}
int pjrt_client_device_count(pjrt_client c) { return (*c)->device_count(); }
int pjrt_client_addressable_device_count(pjrt_client c) {
return (*c)->addressable_device_count();
}
void pjrt_client_devices(pjrt_client c, pjrt_device *outputs) {
size_t index = 0;
for (auto device : (*c)->devices()) {
outputs[index++] = device;
}
}
void pjrt_client_addressable_devices(pjrt_client c, pjrt_device *outputs) {
size_t index = 0;
for (auto device : (*c)->addressable_devices()) {
outputs[index++] = device;
}
}
char *pjrt_client_platform_name(pjrt_client c) {
// TODO: Avoid the double allocation when converting string views.
return strdup(std::string((*c)->platform_name()).c_str());
}
char *pjrt_client_platform_version(pjrt_client c) {
return strdup(std::string((*c)->platform_version()).c_str());
}
void pjrt_client_free(pjrt_client b) { delete b; }
void pjrt_loaded_executable_free(pjrt_loaded_executable b) { delete b; }
status pjrt_buffer_from_host_buffer(const pjrt_client client,
const pjrt_device device, const void *d,
int pr_type, int dsize, const int64_t *ds,
pjrt_buffer *output) {
PjRtDevice *device_ = device == nullptr ? (*client)->devices()[0] : device;
ASSIGN_OR_RETURN_STATUS(
buffer,
(*client)->BufferFromHostBuffer(
d, (PrimitiveType)pr_type, absl::Span<const int64_t>(ds, dsize), {},
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, []() {},
device_));
*output = buffer.release();
return nullptr;
}
status pjrt_buffer_from_host_literal(const pjrt_client client,
const pjrt_device device, const literal l,
pjrt_buffer *output) {
PjRtDevice *d = device == nullptr ? (*client)->devices()[0] : device;
ASSIGN_OR_RETURN_STATUS(buffer, (*client)->BufferFromHostLiteral(*l, d));
*output = buffer.release();
return nullptr;
}
status pjrt_buffer_to_literal_sync(pjrt_buffer b, literal *output) {
ASSIGN_OR_RETURN_STATUS(literal, b->ToLiteralSync());
*output = new Literal();
**output = std::move(*literal);
return nullptr;
}
shape pjrt_buffer_on_device_shape(pjrt_buffer b) {
return new Shape(b->on_device_shape());
}
status pjrt_buffer_copy_to_device(pjrt_buffer b, pjrt_device device,
pjrt_buffer *output) {
ASSIGN_OR_RETURN_STATUS(copied_b, b->CopyToDevice(device));
*output = copied_b.release();
return nullptr;
}
status pjrt_buffer_copy_raw_to_host_sync(pjrt_buffer b, void *dst,
size_t offset, size_t transfer_size) {
MAYBE_RETURN_STATUS(b->CopyRawToHost(dst, offset, transfer_size).Await());
return nullptr;
}
void pjrt_buffer_free(pjrt_buffer b) { delete b; }
int pjrt_device_id(pjrt_device d) { return d->id(); }
int pjrt_device_process_index(pjrt_device d) { return d->process_index(); }
int pjrt_device_local_hardware_id(pjrt_device d) {
return d->local_hardware_id();
}
status pjrt_device_transfer_to_infeed(pjrt_device d, const literal l) {
MAYBE_RETURN_STATUS(d->TransferToInfeed(*l));
return nullptr;
}
status pjrt_device_transfer_from_outfeed(pjrt_device d, literal l) {
MAYBE_RETURN_STATUS(d->TransferFromOutfeed(l));
return nullptr;
}
char *pjrt_device_kind(pjrt_device d) {
return strdup(std::string(d->device_kind()).c_str());
}
char *pjrt_device_debug_string(pjrt_device d) {
return strdup(std::string(d->DebugString()).c_str());
}
char *pjrt_device_to_string(pjrt_device d) {
return strdup(std::string(d->ToString()).c_str());
}
xla_builder xla_builder_create(const char *name) {
return new XlaBuilder(name);
}
void xla_builder_free(xla_builder b) { delete b; }
xla_op constant_literal(const xla_builder b, const literal l) {
BEGIN_PROTECT_OP
return new XlaOp(ConstantLiteral(b, *l));
END_PROTECT_OP_B(b)
}
#define CONST_OP_R01(native_type, primitive_type) \
xla_op constant_r0_##native_type(const xla_builder b, native_type f) { \
return new XlaOp(ConstantR0<native_type>(b, f)); \
} \
xla_op constant_r1c_##native_type(const xla_builder b, native_type f, \
size_t len) { \
return new XlaOp(ConstantR1<native_type>(b, len, f)); \
} \
xla_op constant_r1_##native_type(const xla_builder b, const native_type *f, \
size_t len) { \
return new XlaOp( \
ConstantR1<native_type>(b, absl::Span<const native_type>(f, len))); \
} \
literal create_r0_##native_type(native_type f) { \
return new Literal(LiteralUtil::CreateR0<native_type>(f)); \
} \
literal create_r1_##native_type(const native_type *f, size_t nel) { \
return new Literal(LiteralUtil::CreateR1<native_type>( \
absl::Span<const native_type>(f, nel))); \
} \
native_type literal_get_first_element_##native_type(const literal l) { \
return l->GetFirstElement<native_type>(); \
}
FOR_EACH_NATIVE_TYPE(CONST_OP_R01)
#undef CONST_OP_R01
Shape make_shape_internal(int pr_type, int dsize, const int64_t *ds) {
bool has_negative_dim = false;
for (int i = 0; i < dsize; ++i) {
if (ds[i] < 0) {
has_negative_dim = true;
break;
}
}
Shape shape;
if (has_negative_dim) {
std::vector<bool> dynamic;
std::vector<int64_t> bounds;
for (int i = 0; i < dsize; ++i) {
if (ds[i] < 0) {
bounds.push_back(-ds[i]);
dynamic.push_back(true);
} else {
bounds.push_back(ds[i]);
dynamic.push_back(false);
}
}
shape = ShapeUtil::MakeShape(
(PrimitiveType)pr_type,
absl::Span<const int64_t>(bounds.data(), bounds.size()), dynamic);
} else {
shape = ShapeUtil::MakeShape((PrimitiveType)pr_type,
absl::Span<const int64_t>(ds, dsize));
}
return shape;
}
shape make_shape_array(int pr_type, size_t dsize, const int64_t *ds) {
return new Shape(make_shape_internal(pr_type, dsize, ds));
}
shape make_shape_tuple(size_t dsize, const shape *ds) {
std::vector<Shape> elts;
for (size_t i = 0; i < dsize; ++i) {
elts.push_back(*ds[i]);
}
return new Shape(ShapeUtil::MakeTupleShape(elts));
}
xla_op parameter(const xla_builder b, int64_t id, int pr_type, int dsize,
const int64_t *ds, const char *name) {
BEGIN_PROTECT_OP
Shape shape = make_shape_internal(pr_type, dsize, ds);
return new XlaOp(Parameter(b, id, shape, std::string(name)));
END_PROTECT_OP_B(b)
}
xla_op parameter_s(const xla_builder b, int64_t id, const shape s,
const char *name) {
BEGIN_PROTECT_OP
return new XlaOp(Parameter(b, id, *s, std::string(name)));
END_PROTECT_OP_B(b)
}
xla_op infeed(const xla_builder b, int pr_type, int dsize, const int64_t *ds,
const char *config) {
BEGIN_PROTECT_OP
Shape shape = make_shape_internal(pr_type, dsize, ds);
return new XlaOp(Infeed(b, shape, std::string(config)));
END_PROTECT_OP_B(b)
}
void outfeed(const xla_op op, int pr_type, int dsize, const int64_t *ds,
const char *outfeed_config) {
Shape shape = make_shape_internal(pr_type, dsize, ds);
Outfeed(*op, shape, std::string(outfeed_config));
}
xla_op op_add(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Add(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_sub(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Sub(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_mul(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Mul(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_div(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Div(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_rem(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Rem(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_max(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Max(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_min(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Min(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_and(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(And(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_or(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Or(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_xor(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Xor(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_atan2(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Atan2(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_pow(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Pow(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_dot(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Dot(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_dot_general(const xla_op lhs, const xla_op rhs, const int64_t *lhs_c,
size_t nlhs_c, const int64_t *rhs_c, size_t nrhs_c,
const int64_t *lhs_b, size_t nlhs_b, const int64_t *rhs_b,
size_t nrhs_b) {
BEGIN_PROTECT_OP
DotDimensionNumbers dnums;
for (size_t i = 0; i < nlhs_c; ++i)
dnums.add_lhs_contracting_dimensions(lhs_c[i]);
for (size_t i = 0; i < nrhs_c; ++i)
dnums.add_rhs_contracting_dimensions(rhs_c[i]);
for (size_t i = 0; i < nlhs_b; ++i)
dnums.add_lhs_batch_dimensions(lhs_b[i]);
for (size_t i = 0; i < nrhs_b; ++i)
dnums.add_rhs_batch_dimensions(rhs_b[i]);
return new XlaOp(DotGeneral(*lhs, *rhs, dnums));
END_PROTECT_OP(lhs)
}
xla_op op_eq(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Eq(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_ne(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Ne(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_ge(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Ge(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_gt(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Gt(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_le(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Le(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_lt(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(Lt(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_shift_left(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(ShiftLeft(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_shift_right_arith(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(ShiftRightArithmetic(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_shift_right_logic(const xla_op lhs, const xla_op rhs) {
BEGIN_PROTECT_OP
return new XlaOp(ShiftRightLogical(*lhs, *rhs));
END_PROTECT_OP(lhs)
}
xla_op op_population_count(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(PopulationCount(*arg));
END_PROTECT_OP(arg)
}
xla_op op_not(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Not(*arg));
END_PROTECT_OP(arg)
}
xla_op op_abs(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Abs(*arg));
END_PROTECT_OP(arg)
}
xla_op op_exp(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Exp(*arg));
END_PROTECT_OP(arg)
}
xla_op op_expm1(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Expm1(*arg));
END_PROTECT_OP(arg)
}
xla_op op_floor(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Floor(*arg));
END_PROTECT_OP(arg)
}
xla_op op_ceil(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Ceil(*arg));
END_PROTECT_OP(arg)
}
xla_op op_round(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Round(*arg));
END_PROTECT_OP(arg)
}
xla_op op_round_nearest_even(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(RoundNearestEven(*arg));
END_PROTECT_OP(arg)
}
xla_op op_log(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Log(*arg));
END_PROTECT_OP(arg)
}
xla_op op_log1p(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Log1p(*arg));
END_PROTECT_OP(arg)
}
xla_op op_logistic(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Logistic(*arg));
END_PROTECT_OP(arg)
}
xla_op op_sign(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Sign(*arg));
END_PROTECT_OP(arg)
}
xla_op op_clz(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Clz(*arg));
END_PROTECT_OP(arg)
}
xla_op op_cos(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Cos(*arg));
END_PROTECT_OP(arg)
}
xla_op op_sin(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Sin(*arg));
END_PROTECT_OP(arg)
}
xla_op op_tanh(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Tanh(*arg));
END_PROTECT_OP(arg)
}
xla_op op_real(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Real(*arg));
END_PROTECT_OP(arg)
}
xla_op op_imag(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Imag(*arg));
END_PROTECT_OP(arg)
}
xla_op op_conj(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Conj(*arg));
END_PROTECT_OP(arg)
}
xla_op op_square(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Square(*arg));
END_PROTECT_OP(arg)
}
xla_op op_sqrt(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Sqrt(*arg));
END_PROTECT_OP(arg)
}
xla_op op_rsqrt(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Rsqrt(*arg));
END_PROTECT_OP(arg)
}
xla_op op_cbrt(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Cbrt(*arg));
END_PROTECT_OP(arg)
}
xla_op op_is_finite(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(IsFinite(*arg));
END_PROTECT_OP(arg)
}
xla_op op_neg(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Neg(*arg));
END_PROTECT_OP(arg)
}
xla_op op_lower_triangle(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(LowerTriangle(*arg));
END_PROTECT_OP(arg)
}
xla_op op_upper_triangle(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(UpperTriangle(*arg));
END_PROTECT_OP(arg)
}
xla_op op_erf(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Erf(*arg));
END_PROTECT_OP(arg)
}
xla_op op_einsum1(const xla_op arg, const char *config) {
BEGIN_PROTECT_OP
return new XlaOp(Einsum(*arg, config));
END_PROTECT_OP(arg)
}
xla_op op_einsum2(const xla_op arg1, const xla_op arg2, const char *config) {
BEGIN_PROTECT_OP
return new XlaOp(Einsum(*arg1, *arg2, config));
END_PROTECT_OP(arg1)
}
xla_op op_copy(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(Copy(*arg));
END_PROTECT_OP(arg)
}
xla_op op_clone(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(*arg);
END_PROTECT_OP(arg)
}
xla_op op_zeros_like(const xla_op arg) {
BEGIN_PROTECT_OP
return new XlaOp(ZerosLike(*arg));
END_PROTECT_OP(arg)
}
xla_op op_zero_like(const xla_op arg) {
BEGIN_PROTECT_OP
const Shape *shape = arg->builder()->GetShapePtr(*arg).value();
return new XlaOp(Zero(arg->builder(), shape->element_type()));
END_PROTECT_OP(arg)
}
xla_op op_reshape(const xla_op arg, size_t dsize, const int64_t *ds) {
BEGIN_PROTECT_OP
return new XlaOp(Reshape(*arg, absl::Span<const int64_t>(ds, dsize)));
END_PROTECT_OP(arg)
}
xla_op op_dynamic_reshape(const xla_op arg, size_t n_ops, const xla_op *ds,
size_t n_new_size_bounds, const int64_t *new_size_bounds,
const bool *dims_are_dynamic) {
BEGIN_PROTECT_OP
std::vector<XlaOp> vec_dim_sizes;
for (size_t i = 0; i < n_ops; ++i) {
vec_dim_sizes.push_back(*ds[i]);
}
std::vector<bool> vec_dims_are_dynamic;
for (size_t i = 0; i < n_ops; ++i) {
vec_dims_are_dynamic.push_back(dims_are_dynamic[i]);
}
return new XlaOp(
DynamicReshape(*arg,
absl::Span<const XlaOp>(vec_dim_sizes),
absl::Span<const int64_t>(new_size_bounds, n_new_size_bounds),
vec_dims_are_dynamic));
END_PROTECT_OP(arg)
}
xla_op op_broadcast(const xla_op arg, size_t dsize, const int64_t *ds) {
BEGIN_PROTECT_OP
return new XlaOp(Broadcast(*arg, absl::Span<const int64_t>(ds, dsize)));
END_PROTECT_OP(arg)
}
xla_op op_broadcast_in_dim(const xla_op arg, size_t out_dsize,
const int64_t *out_ds, size_t broadcast_dsize,
const int64_t *broadcast_ds) {
BEGIN_PROTECT_OP
return new XlaOp(
BroadcastInDim(*arg, absl::Span<const int64_t>(out_ds, out_dsize),
absl::Span<const int64_t>(broadcast_ds, broadcast_dsize)));
END_PROTECT_OP(arg)
}
xla_op op_collapse(const xla_op arg, size_t dsize, const int64_t *ds) {
BEGIN_PROTECT_OP
return new XlaOp(Collapse(*arg, absl::Span<const int64_t>(ds, dsize)));
END_PROTECT_OP(arg)
}
xla_op op_transpose(const xla_op arg, size_t dsize, const int64_t *ds) {
BEGIN_PROTECT_OP
return new XlaOp(Transpose(*arg, absl::Span<const int64_t>(ds, dsize)));
END_PROTECT_OP(arg)
}
xla_op op_clamp(const xla_op arg1, const xla_op arg2, const xla_op arg3) {
BEGIN_PROTECT_OP
return new XlaOp(Clamp(*arg1, *arg2, *arg3));
END_PROTECT_OP(arg1)
}
xla_op op_select(const xla_op arg1, const xla_op arg2, const xla_op arg3) {
BEGIN_PROTECT_OP
return new XlaOp(Select(*arg1, *arg2, *arg3));
END_PROTECT_OP(arg1)
}
xla_op op_call(const xla_builder b, const xla_computation f, size_t n_ops, const xla_op *args) {
BEGIN_PROTECT_OP
std::vector<XlaOp> args_;
for (size_t i = 0; i < n_ops; ++i) {
args_.push_back(*args[i]);
}
return new XlaOp(Call(b, *f, absl::Span<const XlaOp>(args_)));
END_PROTECT_OP_B(b)
}
xla_op op_map(const xla_builder b, size_t n_ops, const xla_op *ops, const xla_computation f,
size_t n_dims, const int64_t *dims, size_t n_static_ops, const xla_op *static_ops) {
BEGIN_PROTECT_OP
std::vector<XlaOp> ops_;
for (size_t i = 0; i < n_ops; ++i) {
ops_.push_back(*ops[i]);
}
std::vector<XlaOp> static_ops_;
for (size_t i = 0; i < n_static_ops; ++i) {
static_ops_.push_back(*static_ops[i]);
}
return new XlaOp(Map(b, absl::Span<const XlaOp>(ops_), *f,
absl::Span<const int64_t>(dims, n_dims),
absl::Span<const XlaOp>(static_ops_)));
END_PROTECT_OP_B(b)
}
xla_op op_rng_uniform(const xla_op arg1, const xla_op arg2, int pr_type,
int dsize, const int64_t *ds) {
BEGIN_PROTECT_OP
auto shape = ShapeUtil::MakeShape((PrimitiveType)pr_type,
absl::Span<const int64_t>(ds, dsize));
return new XlaOp(RngUniform(*arg1, *arg2, shape));
END_PROTECT_OP(arg1)
}
xla_op op_rng_normal(const xla_op arg1, const xla_op arg2, int pr_type,
int dsize, const int64_t *ds) {
BEGIN_PROTECT_OP
auto shape = ShapeUtil::MakeShape((PrimitiveType)pr_type,
absl::Span<const int64_t>(ds, dsize));
return new XlaOp(RngNormal(*arg1, *arg2, shape));
END_PROTECT_OP(arg1)
}
xla_op op_pad(const xla_op arg,
const xla_op padding_value,
size_t n_dims,
const int64_t *edge_low,
const int64_t *edge_high,
const int64_t *interior) {
BEGIN_PROTECT_OP
PaddingConfig config;
for (size_t i = 0; i < n_dims; ++i) {
auto dim = config.add_dimensions();
dim->set_edge_padding_low(edge_low[i]);
dim->set_edge_padding_high(edge_high[i]);
dim->set_interior_padding(interior[i]);
}
return new XlaOp(Pad(*arg, *padding_value, config));
END_PROTECT_OP(arg)
}
xla_op op_pad_in_dim(const xla_op arg, const xla_op padding_value,
int64_t dinmo, int64_t pad_lo, int64_t pad_hi) {
BEGIN_PROTECT_OP
return new XlaOp(PadInDim(*arg, *padding_value, dinmo, pad_lo, pad_hi));
END_PROTECT_OP(arg)
}
xla_op op_slice(const xla_op arg, size_t start_dsize, const int64_t *start_ds,
size_t limit_dsize, const int64_t *limit_ds,
size_t stride_dsize, const int64_t *stride_ds) {
BEGIN_PROTECT_OP
return new XlaOp(Slice(*arg, absl::Span<const int64_t>(start_ds, start_dsize),
absl::Span<const int64_t>(limit_ds, limit_dsize),
absl::Span<const int64_t>(stride_ds, stride_dsize)));
END_PROTECT_OP(arg)
}
xla_op op_slice_in_dim(const xla_op arg, int64_t start, int64_t stop,
int64_t stride, int64_t dim) {
BEGIN_PROTECT_OP
return new XlaOp(SliceInDim(*arg, start, stop, stride, dim));
END_PROTECT_OP(arg)
}
xla_op op_dynamic_slice(const xla_op arg, size_t n_ops,
const xla_op *start_indices,
size_t slice_dsize, const int64_t *slice_ds) {
BEGIN_PROTECT_OP
std::vector<XlaOp> indices;
for (size_t i = 0; i < n_ops; ++i) {
indices.push_back(*start_indices[i]);
}
return new XlaOp(
DynamicSlice(*arg, absl::Span<const XlaOp>(indices),
absl::Span<const int64_t>(slice_ds, slice_dsize)));
END_PROTECT_OP(arg)
}
xla_op op_dynamic_update_slice(const xla_op arg, const xla_op update,
size_t n_ops, const xla_op *start_indices) {
BEGIN_PROTECT_OP
std::vector<XlaOp> indices;
for (size_t i = 0; i < n_ops; ++i) {
indices.push_back(*start_indices[i]);
}
return new XlaOp(DynamicUpdateSlice(*arg, *update, absl::Span<const XlaOp>(indices)));
END_PROTECT_OP(arg)
}
xla_op op_concat_in_dim(const xla_op arg, const xla_op *args, size_t nargs,
int64_t dim) {
BEGIN_PROTECT_OP
std::vector<XlaOp> args_ = {*arg};
for (size_t i = 0; i < nargs; ++i) {
args_.push_back(*args[i]);
}
return new XlaOp(
ConcatInDim(arg->builder(), absl::Span<const XlaOp>(args_), dim));
END_PROTECT_OP(arg)
}
xla_op op_tuple(const xla_builder b, const xla_op *args, size_t nargs) {
BEGIN_PROTECT_OP
std::vector<XlaOp> args_;
for (size_t i = 0; i < nargs; ++i) {
args_.push_back(*args[i]);
}
return new XlaOp(Tuple(b, absl::Span<const XlaOp>(args_)));
END_PROTECT_OP_B(b)
}
xla_op op_get_tuple_element(const xla_op arg, int64_t index) {
BEGIN_PROTECT_OP
return new XlaOp(GetTupleElement(*arg, index));
END_PROTECT_OP(arg)
}
xla_op op_gather(const xla_op arg1, const xla_op arg2,
const int64_t *offset_dims, size_t noffset_dims,
const int64_t *collapsed_slice_dims,
size_t ncollapsed_slice_dims, const int64_t *start_index_map,
size_t nstart_index_map, const int64_t *set_index_vector_dim,
const int64_t *slice_sizes, size_t nslice_sizes) {
BEGIN_PROTECT_OP
GatherDimensionNumbers dnums;
for (size_t i = 0; i < noffset_dims; ++i) {
dnums.add_offset_dims(offset_dims[i]);
}
for (size_t i = 0; i < ncollapsed_slice_dims; ++i) {
dnums.add_collapsed_slice_dims(collapsed_slice_dims[i]);
}
for (size_t i = 0; i < nstart_index_map; ++i) {
dnums.add_start_index_map(start_index_map[i]);
}
if (set_index_vector_dim) {
dnums.set_index_vector_dim(*set_index_vector_dim);
}
auto ss = absl::Span<const int64_t>(slice_sizes, nslice_sizes);
return new XlaOp(Gather(*arg1, *arg2, dnums, ss));
END_PROTECT_OP(arg1)
}
xla_op op_scatter(size_t n_ops,
const xla_op *operands,
const xla_op scatter_indices,
size_t n_updates,
const xla_op *updates,
const xla_computation comp,
size_t n_update_window_dims,
const int64_t *update_window_dims,
size_t n_inserted_window_dims,
const int64_t *inserted_window_dims,
size_t n_scatter_dims_to_operand_dims,
const int64_t *scatter_dims_to_operand_dims,
int64_t index_vector_dim
) {
BEGIN_PROTECT_OP
std::vector<XlaOp> operands_;
for (size_t i = 0; i < n_ops; ++i) {
operands_.push_back(*operands[i]);
}
std::vector<XlaOp> updates_;
for (size_t i = 0; i < n_updates; ++i) {
updates_.push_back(*updates[i]);
}
ScatterDimensionNumbers dnums;
for (size_t i = 0; i < n_update_window_dims; ++i) {
dnums.add_update_window_dims(update_window_dims[i]);
}
for (size_t i = 0; i < n_inserted_window_dims; ++i) {
dnums.add_inserted_window_dims(inserted_window_dims[i]);
}
for (size_t i = 0; i < n_scatter_dims_to_operand_dims; ++i) {
dnums.add_scatter_dims_to_operand_dims(scatter_dims_to_operand_dims[i]);
}
dnums.set_index_vector_dim(index_vector_dim);
return new XlaOp(Scatter(operands_, *scatter_indices, updates_, *comp, dnums));
END_PROTECT_OP(scatter_indices)
}
xla_op op_convert_element_type(const xla_op arg, int pr_type) {
BEGIN_PROTECT_OP
return new XlaOp(ConvertElementType(*arg, (PrimitiveType)pr_type));
END_PROTECT_OP(arg)
}
xla_op op_dimensions_size(const xla_op arg, int64_t dim) {
BEGIN_PROTECT_OP
return new XlaOp(GetDimensionSize(*arg, dim));
END_PROTECT_OP(arg)
}
xla_op op_reduce(const xla_op arg, const xla_op init,
const xla_computation comp, const int64_t *dims,
size_t ndims) {
BEGIN_PROTECT_OP
return new XlaOp(
Reduce(*arg, *init, *comp, absl::Span<const int64_t>(dims, ndims)));
END_PROTECT_OP(arg)
}
xla_op op_internal_error(const xla_builder b, const char *error) {
BEGIN_PROTECT_OP
return new XlaOp(b->ReportError(tsl::errors::Internal(error)));
END_PROTECT_OP_B(b)
}
xla_op op_unknown_error(const xla_builder b, const char *error) {
BEGIN_PROTECT_OP
return new XlaOp(b->ReportError(tsl::errors::Unknown(error)));
END_PROTECT_OP_B(b)
}
xla_op op_invalid_argument_error(const xla_builder b, const char *error) {
BEGIN_PROTECT_OP
return new XlaOp(b->ReportError(tsl::errors::InvalidArgument(error)));
END_PROTECT_OP_B(b)
}
xla_op op_zero(const xla_builder b, int pr_type) {
BEGIN_PROTECT_OP
return new XlaOp(Zero(b, (PrimitiveType)pr_type));
END_PROTECT_OP_B(b)
}
xla_op op_one(const xla_builder b, int pr_type) {
BEGIN_PROTECT_OP
return new XlaOp(One(b, (PrimitiveType)pr_type));
END_PROTECT_OP_B(b)
}
xla_op op_min_value(const xla_builder b, int pr_type) {
BEGIN_PROTECT_OP
return new XlaOp(MinValue(b, (PrimitiveType)pr_type));
END_PROTECT_OP_B(b)
}
xla_op op_max_value(const xla_builder b, int pr_type) {
BEGIN_PROTECT_OP
return new XlaOp(MaxValue(b, (PrimitiveType)pr_type));
END_PROTECT_OP_B(b)
}
xla_op op_iota1(const xla_builder b, int pr_type, size_t sz) {
BEGIN_PROTECT_OP
return new XlaOp(Iota(b, (PrimitiveType)pr_type, (int64_t)sz));
END_PROTECT_OP_B(b)
}
xla_op op_iota(const xla_builder b, int pr_type, size_t dsize,
const int64_t *ds, int64_t increasing_dim) {
BEGIN_PROTECT_OP
auto shape = ShapeUtil::MakeShape((PrimitiveType)pr_type,
absl::Span<const int64_t>(ds, dsize));
return new XlaOp(Iota(b, shape, increasing_dim));
END_PROTECT_OP_B(b)
}
xla_op op_while(const xla_computation cond, const xla_computation body,
const xla_op init) {
BEGIN_PROTECT_OP
return new XlaOp(While(*cond, *body, *init));
END_PROTECT_OP(init)
}
xla_op op_conditional(const xla_op pred, const xla_op true_op,
const xla_computation true_comp, const xla_op false_op,
const xla_computation false_comp) {
BEGIN_PROTECT_OP
return new XlaOp(
Conditional(*pred, *true_op, *true_comp, *false_op, *false_comp));
END_PROTECT_OP(pred)
}
Padding ParsePadding(const char* padding_config) {
if (std::string(padding_config) == "same") {
return Padding::kSame;
}
if (std::string(padding_config) == "valid") {
return Padding::kValid;
}
throw std::runtime_error("Invalid padding config: " + std::string(padding_config));
}
xla_op op_conv(const xla_op lhs,
const xla_op rhs,
size_t n_strides,
const int64_t *window_strides,
const char *padding_config,
int64_t feature_group_count,
int64_t batch_group_count) {
BEGIN_PROTECT_OP
Padding padding = ParsePadding(padding_config);
return new XlaOp(
Conv(*lhs, *rhs, absl::Span<const int64_t>(window_strides, n_strides), padding, feature_group_count, batch_group_count));
END_PROTECT_OP(lhs)
}
xla_op op_conv_general_dilated(const xla_op lhs,
const xla_op rhs,
size_t n_strides,
const int64_t *window_strides,
size_t n_padding_pairs,
const int64_t *padding_values,
size_t n_lhs_dilations,
const int64_t *lhs_dilations,
size_t n_rhs_dilations,
const int64_t *rhs_dilations,
const int64_t *ibdim,
const int64_t *ifdim,
size_t n_isdims,
const int64_t *isdims,
const int64_t *obdim,
const int64_t *ofdim,
size_t n_osdims,
const int64_t *osdims,
const int64_t *kifdim,
const int64_t *kofdim,
size_t n_ksdims,
const int64_t *ksdims,
int64_t feature_group_count,
int64_t batch_group_count) {
BEGIN_PROTECT_OP
std::vector<std::pair<int64_t, int64_t>> padding_pairs;
for (size_t i = 0; i < 2 * n_padding_pairs; i += 2) {
padding_pairs.emplace_back(padding_values[i], padding_values[i + 1]);
}
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(*ibdim);
dnums.set_input_feature_dimension(*ifdim);
for (size_t i = 0; i < n_isdims; ++i) {
dnums.add_input_spatial_dimensions(isdims[i]);
}
dnums.set_output_batch_dimension(*obdim);
dnums.set_output_feature_dimension(*ofdim);
for (size_t i = 0; i < n_osdims; ++i) {
dnums.add_output_spatial_dimensions(osdims[i]);
}
dnums.set_kernel_input_feature_dimension(*kifdim);
dnums.set_kernel_output_feature_dimension(*kofdim);
for (size_t i = 0; i < n_ksdims; ++i) {
dnums.add_kernel_spatial_dimensions(ksdims[i]);
}
return new XlaOp(
ConvGeneralDilated(*lhs,
*rhs,
absl::Span<const int64_t>(window_strides, n_strides),
absl::Span<const std::pair<int64_t, int64_t>>(padding_pairs),
absl::Span<const int64_t>(lhs_dilations, n_lhs_dilations),
absl::Span<const int64_t>(rhs_dilations, n_rhs_dilations),
dnums,
feature_group_count,
batch_group_count));
END_PROTECT_OP(lhs)
}
xla_op op_batch_norm_inference(const xla_op operand,
const xla_op scale,
const xla_op offset,
const xla_op mean,
const xla_op variance,
float epsilon,
int64_t feature_index) {
BEGIN_PROTECT_OP
return new XlaOp(BatchNormInference(*operand, *scale, *offset, *mean, *variance, epsilon, feature_index));
END_PROTECT_OP(operand)
}
xla_builder op_builder(const xla_op arg) { return arg->builder(); }
int xla_op_valid(const xla_op op) { return op->valid(); }
void xla_op_free(xla_op o) { delete o; }
size_t shape_tuple_shapes_size(const shape s) { return s->tuple_shapes_size(); }
shape shape_tuple_shapes(const shape s, int i) {
return (shape)&s->tuple_shapes(i);
}
int shape_dimensions_size(const shape s) { return s->dimensions_size(); }
int shape_element_type(const shape s) { return s->element_type(); }
int64_t shape_dimensions(const shape s, int i) { return s->dimensions(i); }
void shape_free(shape s) { delete s; }
status get_shape(const xla_builder b, const xla_op o, shape *out_shape) {
ASSIGN_OR_RETURN_STATUS(shape, b->GetShape(*o));
*out_shape = new Shape(shape);
return nullptr;
}
status get_element_type(const xla_builder b, const xla_op o,
int *out_element_type) {
ASSIGN_OR_RETURN_STATUS(shape, b->GetShapePtr(*o));
*out_element_type = shape->element_type();
return nullptr;
}
status get_dimensions_size(const xla_builder b, const xla_op o, int *out_rank) {
ASSIGN_OR_RETURN_STATUS(shape, b->GetShapePtr(*o));
*out_rank = shape->dimensions_size();
return nullptr;
}
status get_dimensions(const xla_builder b, const xla_op o, size_t *out_dims) {
ASSIGN_OR_RETURN_STATUS(shape, b->GetShapePtr(*o));
size_t dim_size = shape->dimensions_size();
for (size_t i = 0; i < dim_size; ++i) {
out_dims[i] = shape->dimensions(i);
}
return nullptr;
}
status build(const xla_builder b, const xla_op o, xla_computation *output) {
ASSIGN_OR_RETURN_STATUS(computation, b->Build(o));
*output = new XlaComputation();
**output = std::move(computation);
return nullptr;
}
status compile(const pjrt_client client, const xla_computation computation,
pjrt_loaded_executable *output) {
CompileOptions options;
ASSIGN_OR_RETURN_STATUS(executable,
(*client)->Compile(*computation, options));
*output = executable.release();
return nullptr;
}
status first_error(const xla_builder b) {
MAYBE_RETURN_STATUS(b->first_error());
return nullptr;
}
status get_current_status(const xla_builder b) {
MAYBE_RETURN_STATUS(b->GetCurrentStatus());
return nullptr;
}
status execute(const pjrt_loaded_executable exe, const literal *inputs,
int ninputs, pjrt_buffer ***outputs) {
auto client = exe->client();
ExecuteOptions options;
options.strict_shape_checking = false;
std::vector<PjRtBuffer *> input_buffer_ptrs;
PjRtDevice *device = client->devices()[0];
for (int i = 0; i < ninputs; ++i) {
ASSIGN_OR_RETURN_STATUS(buffer,
client->BufferFromHostLiteral(*inputs[i], device));
// Wait for the transfer to have completed to avoid the literal potentially
// getting out of scope before it has been transferred.
MAYBE_RETURN_STATUS(buffer->GetReadyFuture().Await());
input_buffer_ptrs.push_back(buffer.release());
}
ASSIGN_OR_RETURN_STATUS(results, exe->Execute({input_buffer_ptrs}, options));
pjrt_buffer **out =
(pjrt_buffer **)malloc((results.size() + 1) * sizeof(pjrt_buffer *));
for (size_t i = 0; i < results.size(); ++i) {
auto &replica_results = results[i];
pjrt_buffer *per_replica_outputs = (pjrt_buffer *)malloc(
(replica_results.size() + 1) * sizeof(pjrt_buffer));
for (size_t j = 0; j < replica_results.size(); ++j) {
per_replica_outputs[j] = replica_results[j].release();
}
per_replica_outputs[replica_results.size()] = nullptr;
out[i] = per_replica_outputs;
}
out[results.size()] = nullptr;
*outputs = out;
return nullptr;
}
status execute_b(const pjrt_loaded_executable exe, const pjrt_buffer *inputs,
int ninputs, pjrt_buffer ***outputs) {
auto client = exe->client();
ExecuteOptions options;
options.strict_shape_checking = false;
std::vector<PjRtBuffer *> input_buffer_ptrs(inputs, inputs + ninputs);
ASSIGN_OR_RETURN_STATUS(results, exe->Execute({input_buffer_ptrs}, options));
pjrt_buffer **out =
(pjrt_buffer **)malloc((results.size() + 1) * sizeof(pjrt_buffer *));
for (size_t i = 0; i < results.size(); ++i) {
auto &replica_results = results[i];
pjrt_buffer *per_replica_outputs = (pjrt_buffer *)malloc(
(replica_results.size() + 1) * sizeof(pjrt_buffer));
for (size_t j = 0; j < replica_results.size(); ++j) {
per_replica_outputs[j] = replica_results[j].release();
}
per_replica_outputs[replica_results.size()] = nullptr;
out[i] = per_replica_outputs;
}
out[results.size()] = nullptr;
*outputs = out;
return nullptr;
}
literal literal_create_from_shape(int pr_type, const int64_t *dims,
size_t ndims) {
auto shape = ShapeUtil::MakeShape((PrimitiveType)pr_type,
absl::Span<const int64_t>(dims, ndims));
Literal l = Literal::CreateFromShape(shape);
return new Literal(std::move(l));
}
literal literal_create_from_shape_and_data(int pr_type, const int64_t *dims,
size_t ndims, const void *data,
size_t data_len) {
auto shape = ShapeUtil::MakeShape((PrimitiveType)pr_type,
absl::Span<const int64_t>(dims, ndims));
Literal l = Literal::CreateFromShape(shape);
if (l.size_bytes() != data_len) {
return nullptr;
}
memcpy(l.untyped_data(), data, data_len);
return new Literal(std::move(l));
}
literal literal_clone(const literal l) {
return new Literal(std::move(l->Clone()));
}
status literal_reshape(const literal l, const int64_t *dims, size_t ndims,
literal *output) {
ASSIGN_OR_RETURN_STATUS(literal,
l->Reshape(absl::Span<const int64_t>(dims, ndims)));
*output = new Literal(std::move(literal));
return nullptr;
}
status literal_convert(const literal l, int pr_type, literal *output) {
ASSIGN_OR_RETURN_STATUS(literal, l->Convert((PrimitiveType)pr_type));
*output = new Literal(std::move(literal));
return nullptr;
}
int64_t literal_element_count(const literal l) { return l->element_count(); }
int64_t literal_size_bytes(const literal l) { return l->size_bytes(); }
void literal_shape(const literal l, shape *out_shape) {
*out_shape = new Shape(l->shape());
}
void literal_decompose_tuple(literal l, literal *outputs, size_t noutputs) {
auto tuple = l->DecomposeTuple();
for (int i = 0; i < std::min(noutputs, tuple.size()); ++i) {
outputs[i] = new Literal(std::move(tuple[i]));
}
}
int literal_element_type(const literal l) { return l->shape().element_type(); }
void literal_copy_to(const literal l, void *dst, size_t size_in_bytes) {
std::memcpy(dst, l->untyped_data(), size_in_bytes);
}
void literal_copy_from(literal l, const void *src, size_t size_in_bytes) {
std::memcpy(l->untyped_data(), src, size_in_bytes);
}
literal literal_make_tuple(const literal *l, size_t n) {
Literal out = LiteralUtil::MakeTuple(absl::MakeSpan(l, n));
return new Literal(std::move(out));
}
literal literal_make_tuple_owned(const literal *l, size_t n) {
std::vector<xla::Literal> elems;
for (size_t i = 0; i < n; ++i) {
elems.push_back(std::move(*(l[i])));
}
Literal out = LiteralUtil::MakeTupleOwned(std::move(elems));
return new Literal(std::move(out));
}
void literal_free(literal l) { delete l; }
void status_free(status s) { delete s; }
char *xla_computation_name(xla_computation c) {
return strdup(std::string(c->name()).c_str());
}
void xla_computation_free(xla_computation c) { delete c; }
char *status_error_message(status s) {
return strdup(s->error_message().c_str());
}
status hlo_module_proto_parse_and_return_unverified_module(
const char *data, size_t len, hlo_module_proto *output) {
ASSIGN_OR_RETURN_STATUS(
hmp, ParseAndReturnUnverifiedModule(std::string(data, len)));
*output = new HloModuleProto(hmp->ToProto());
return nullptr;
}
status hlo_module_proto_parse_proto(const char *d, size_t len, bool binary,
hlo_module_proto *output) {
std::string data(d, len);
HloSnapshot proto;
if (binary) {
if (!proto.ParseFromString(data) &&
!proto.mutable_hlo()->ParseFromString(data) &&
!proto.mutable_hlo()->mutable_hlo_module()->ParseFromString(data)) {
return new Status(
InvalidArgument("Failed to parse input as HLO protobuf binary"));
}
} else {
if (!tsl::protobuf::TextFormat::ParseFromString(data, &proto) &&
!tsl::protobuf::TextFormat::ParseFromString(data,
proto.mutable_hlo()) &&
!tsl::protobuf::TextFormat::ParseFromString(
data, proto.mutable_hlo()->mutable_hlo_module())) {
return new Status(
InvalidArgument("Failed to parse input as HLO protobuf text"));
}
}
ASSIGN_OR_RETURN_STATUS(config, HloModule::CreateModuleConfigFromProto(
proto.hlo().hlo_module(), {}));
ASSIGN_OR_RETURN_STATUS(
hmp, HloModule::CreateFromProto(proto.hlo().hlo_module(), config));
*output = new HloModuleProto(hmp->ToProto());
return nullptr;
}
status hlo_module_from_proto(const hlo_module_proto input_proto, hlo_module *output) {
ASSIGN_OR_RETURN_STATUS(
config, HloModule::CreateModuleConfigFromProto(*input_proto, {}));
ASSIGN_OR_RETURN_STATUS(
hm, HloModule::CreateFromProto(*input_proto, config));
*output = hm.release();
return nullptr;
}
hlo_computation hlo_module_entry_computation(const hlo_module module) {
return module->entry_computation();
}
int64_t hlo_module_computation_count(const hlo_module module) {
return module->computation_count();
}
int64_t hlo_module_instruction_count(const hlo_module module) {
return module->instruction_count();
}
char* hlo_module_to_string(const hlo_module module) {
std::string result = module->ToString();
char* output = new char[result.length() + 1];
std::strcpy(output, result.c_str());
return output;
}
xla_computation xla_computation_from_hlo_module_proto(const hlo_module_proto p) {
return new XlaComputation(*p);
}
void hlo_module_proto_free(hlo_module_proto p) { delete p; }
hlo_module_proto xla_computation_proto(const xla_computation c) {
return new HloModuleProto(c->proto());
}
| ivy/ivy/engines/XLA/rust_api/xla_rs/xla_rs.cc/0 | {
"file_path": "ivy/ivy/engines/XLA/rust_api/xla_rs/xla_rs.cc",
"repo_id": "ivy",
"token_count": 23322
} | 14 |
import logging
import ivy
from ivy.functional.ivy.experimental.sparse_array import (
_is_valid_format,
_verify_bsc_components,
_verify_bsr_components,
_verify_coo_components,
_verify_csc_components,
_verify_csr_components,
)
def is_native_sparse_array(x):
"""Jax does not support sparse arrays natively."""
return False
def native_sparse_array(
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format="coo"
):
ivy.utils.assertions.check_exists(
data,
inverse=True,
message="data cannot be specified, Jax does not support sparse array natively",
)
if not _is_valid_format(
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
format,
):
raise ivy.utils.exceptions.IvyException(
"format should be one of the strings coo, csr, csc, bsr, and bsc."
)
format = format.lower()
if format == "coo":
_verify_coo_components(
indices=coo_indices,
values=values,
dense_shape=dense_shape,
)
elif format == "csr":
_verify_csr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
elif format == "bsr":
_verify_bsr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
elif format == "csc":
_verify_csc_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
)
else:
_verify_bsc_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
)
logging.warning("Jax does not support sparse array natively, None is returned.")
return None
def native_sparse_array_to_indices_values_and_shape(x):
logging.warning(
"Jax does not support sparse array natively, None is returned for "
" indices, values and shape."
)
return None, None, None
| ivy/ivy/functional/backends/jax/experimental/sparse_array.py/0 | {
"file_path": "ivy/ivy/functional/backends/jax/experimental/sparse_array.py",
"repo_id": "ivy",
"token_count": 1210
} | 15 |
import sys
import numpy as np
import mxnet as mx
import ivy
from ivy.func_wrapper import _dtype_from_version
from ivy.utils.exceptions import IvyNotImplementedException
backend_version = {"version": mx.__version__}
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]
use = ivy.utils.backend.ContextManager(_module_in_memory)
# wrap dunder methods of native tensors to return NotImplemented to prioritize Ivy array methods.
def dunder_wrapper(func):
def rep_method(*args, **kwargs):
for arg in args:
if ivy.is_ivy_array(arg):
return NotImplemented
return func(*args, **kwargs)
return rep_method
# check for previously imported mxnet modules
modules_to_patch = []
tensors_to_patch = []
tmp_globals = dict(globals())
for name, value in tmp_globals.items():
if value == "mxnet.ndarray.ndarray.NDArray":
tensors_to_patch.append(name)
try:
if value.__name__ == "mxnet":
modules_to_patch.append(name)
except AttributeError:
pass
methods_to_patch = [
"__add__",
"__sub__",
"__mul__",
"__div__",
"__truediv__",
"__mod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__pow__",
]
for module in modules_to_patch:
for method in methods_to_patch:
exec(
module
+ ".ndarray.NDArray."
+ method
+ " = dunder_wrapper("
+ module
+ ".ndarray.NDArray."
+ method
+ ")"
)
for tensor in tensors_to_patch:
for method in methods_to_patch:
exec(tensor + "." + method + " = dunder_wrapper(" + tensor + "." + method + ")")
NativeArray = mx.ndarray.NDArray
NativeDevice = str
NativeDtype = np.dtype
NativeShape = tuple
NativeSparseArray = mx.ndarray.sparse.BaseSparseNDArray
valid_devices = ("cpu", "gpu")
invalid_devices = ("tpu",)
# native data types
native_int8 = np.dtype("int8")
native_int32 = np.dtype("int32")
native_int64 = np.dtype("int64")
native_uint8 = np.dtype("uint8")
native_float16 = np.dtype("float16")
native_float32 = np.dtype("float32")
native_float64 = np.dtype("float64")
native_double = native_float64
native_bool = np.dtype("bool")
# update these to add new dtypes
valid_dtypes = {
"1.9.1 and below": (
ivy.int8,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.bool,
)
}
valid_numeric_dtypes = {
"1.9.1 and below": (
ivy.int8,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.float16,
ivy.float32,
ivy.float64,
)
}
valid_int_dtypes = {"1.9.1 and below": (ivy.int8, ivy.int32, ivy.int64, ivy.uint8)}
valid_float_dtypes = {"1.9.1 and below": (ivy.float16, ivy.float32, ivy.float64)}
valid_uint_dtypes = {"1.9.1 and below": (ivy.uint8,)}
valid_complex_dtypes = {"1.9.1 and below": ()}
# leave these untouched
valid_dtypes = _dtype_from_version(valid_dtypes, backend_version)
valid_numeric_dtypes = _dtype_from_version(valid_numeric_dtypes, backend_version)
valid_int_dtypes = _dtype_from_version(valid_int_dtypes, backend_version)
valid_float_dtypes = _dtype_from_version(valid_float_dtypes, backend_version)
valid_uint_dtypes = _dtype_from_version(valid_uint_dtypes, backend_version)
valid_complex_dtypes = _dtype_from_version(valid_complex_dtypes, backend_version)
# update these to add new dtypes
invalid_dtypes = {"1.9.1 and below": (ivy.int16, ivy.uint32, ivy.uint64, ivy.uint16)}
invalid_numeric_dtypes = {
"1.9.1 and below": (ivy.int16, ivy.uint32, ivy.uint64, ivy.uint16)
}
invalid_int_dtypes = {
"1.9.1 and below": (ivy.int16, ivy.uint16, ivy.uint32, ivy.uint64)
}
invalid_float_dtypes = {"1.9.1 and below": (ivy.bfloat16,)}
invalid_uint_dtypes = {"1.9.1 and below": (ivy.uint16, ivy.uint32, ivy.uint64)}
invalid_complex_dtypes = {"1.9.1 and below": (ivy.complex64, ivy.complex128)}
# leave these untouched
invalid_dtypes = _dtype_from_version(invalid_dtypes, backend_version)
invalid_numeric_dtypes = _dtype_from_version(invalid_numeric_dtypes, backend_version)
invalid_int_dtypes = _dtype_from_version(invalid_int_dtypes, backend_version)
invalid_float_dtypes = _dtype_from_version(invalid_float_dtypes, backend_version)
invalid_uint_dtypes = _dtype_from_version(invalid_uint_dtypes, backend_version)
invalid_complex_dtypes = _dtype_from_version(invalid_complex_dtypes, backend_version)
native_inplace_support = True
supports_gradients = True
def closest_valid_dtype(type=None, /, as_native=False):
raise IvyNotImplementedException()
backend = "mxnet"
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
from . import sub_backends
from .sub_backends import *
from . import module
from .module import *
NativeModule = mx.gluon.nn.Block
| ivy/ivy/functional/backends/mxnet/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/__init__.py",
"repo_id": "ivy",
"token_count": 2358
} | 16 |
from typing import Union, Optional, Tuple, List, Sequence
import mxnet as mx
from ivy.utils.exceptions import IvyNotImplementedException
def eigh_tridiagonal(
alpha: Union[(None, mx.ndarray.NDArray)],
beta: Union[(None, mx.ndarray.NDArray)],
/,
*,
eigvals_only: bool = True,
select: str = "a",
select_range: Optional[
Union[(Tuple[(int, int)], List[int], None, mx.ndarray.NDArray)]
] = None,
tol: Optional[float] = None,
) -> Union[
(
None,
mx.ndarray.NDArray,
Tuple[(Union[(None, mx.ndarray.NDArray)], Union[(None, mx.ndarray.NDArray)])],
)
]:
raise IvyNotImplementedException()
def diagflat(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
offset: int = 0,
padding_value: float = 0,
align: str = "RIGHT_LEFT",
num_rows: Optional[int] = None,
num_cols: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
):
raise IvyNotImplementedException()
def kron(
a: Union[(None, mx.ndarray.NDArray)],
b: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def matrix_exp(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def eig(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Tuple[None]:
raise IvyNotImplementedException()
def eigvals(
x: Union[(None, mx.ndarray.NDArray)], /
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def adjoint(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def solve_triangular(
x1: Union[(None, mx.ndarray.NDArray)],
x2: Union[(None, mx.ndarray.NDArray)],
/,
*,
upper: bool = True,
adjoint: bool = False,
unit_diagonal: bool = False,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
# Multiplying with a mask matrix can stop gradients on the diagonal.
if unit_diagonal:
w = mx.eye(x1.shape[-2], batch_shape=x1.shape[:-2], dtype=x1.dtype)
x1 = w + (1 - w) * x1
# MXNet does not support complex tensors for this operation,
# so adjoint always equals transpose.
return mx.nd.linalg.trsm(x1, x2, lower=not upper, transpose=adjoint)
def multi_dot(
x: Sequence[Union[(None, mx.ndarray.NDArray)]],
/,
*,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> None:
raise IvyNotImplementedException()
def cond(
x: Union[(None, mx.ndarray.NDArray)],
/,
*,
p: Optional[Union[(None, int, str)]] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def dot(
a: mx.ndarray.NDArray,
b: mx.ndarray.NDArray,
/,
*,
out: Optional[mx.ndarray.NDArray] = None,
) -> mx.ndarray.NDArray:
return mx.symbol.dot(a, b, out=out)
dot.support_native_out = True
| ivy/ivy/functional/backends/mxnet/experimental/linear_algebra.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/experimental/linear_algebra.py",
"repo_id": "ivy",
"token_count": 1504
} | 17 |
"""MXNet random functions.
Collection of MXNet random functions, wrapped to fit Ivy syntax and
signature.
"""
import mxnet as mx
from typing import Optional, Union, Sequence
import ivy
from ivy.utils.exceptions import IvyNotImplementedException
def random_uniform(
*,
low: Union[(float, None, mx.ndarray.NDArray)] = 0.0,
high: Union[(float, None, mx.ndarray.NDArray)] = 1.0,
shape: Optional[Union[(ivy.NativeShape, Sequence[int], None)]] = None,
dtype: None,
device: str,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def random_normal(
*,
mean: Union[(float, None, mx.ndarray.NDArray)] = 0.0,
std: Union[(float, None, mx.ndarray.NDArray)] = 1.0,
shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
dtype: None,
seed: Optional[int] = None,
device: str,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def multinomial(
population_size: int,
num_samples: int,
/,
*,
batch_size: int = 1,
probs: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
replace: bool = True,
device: str,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def randint(
low: Union[(float, None, mx.ndarray.NDArray)],
high: Union[(float, None, mx.ndarray.NDArray)],
/,
*,
shape: Optional[Union[(ivy.NativeShape, Sequence[int])]] = None,
device: str,
dtype: Optional[Union[(None, ivy.Dtype)]] = None,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
def seed(*, seed_value: int = 0) -> None:
raise IvyNotImplementedException()
def shuffle(
x: Union[(None, mx.ndarray.NDArray)],
axis: Optional[int] = 0,
/,
*,
seed: Optional[int] = None,
out: Optional[Union[(None, mx.ndarray.NDArray)]] = None,
) -> Union[(None, mx.ndarray.NDArray)]:
raise IvyNotImplementedException()
| ivy/ivy/functional/backends/mxnet/random.py/0 | {
"file_path": "ivy/ivy/functional/backends/mxnet/random.py",
"repo_id": "ivy",
"token_count": 931
} | 18 |
# global
from typing import Optional, Tuple, Sequence, Union
import numpy as np
# local
import ivy
# Array API Standard #
# -------------------#
def vorbis_window(
window_length: np.ndarray,
*,
dtype: np.dtype = np.float32,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
result = []
for i in range(1, window_length * 2, 2):
temp = np.sin(ivy.pi / 2 * (np.sin(ivy.pi * i / (window_length * 2)) ** 2))
result.append(round(temp, 8))
return np.array(result, dtype=dtype)
vorbis_window.support_native_out = False
def tril_indices(
n_rows: int,
n_cols: Optional[int] = None,
k: int = 0,
/,
*,
device: Optional[str] = None,
) -> Tuple[np.ndarray, ...]:
return tuple(np.asarray(np.tril_indices(n=n_rows, k=k, m=n_cols)))
def hann_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if size < 2:
return np.ones([size], dtype=dtype)
if periodic:
count = np.arange(size) / size
else:
count = np.linspace(start=0, stop=size, num=size)
return (0.5 - 0.5 * np.cos(2 * np.pi * count)).astype(dtype)
hann_window.support_native_out = False
def kaiser_window(
window_length: int,
periodic: bool = True,
beta: float = 12.0,
*,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if window_length < 2:
return np.ones([window_length], dtype=dtype)
if periodic is False:
return np.kaiser(M=window_length, beta=beta).astype(dtype)
else:
return np.kaiser(M=window_length + 1, beta=beta)[:-1].astype(dtype)
kaiser_window.support_native_out = False
def indices(
dimensions: Sequence,
dtype: np.dtype = np.int64,
sparse: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
return np.indices(dimensions, dtype=dtype, sparse=sparse)
def unsorted_segment_min(
data: np.ndarray,
segment_ids: np.ndarray,
num_segments: int,
) -> np.ndarray:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
if data.dtype in [np.float32, np.float64]:
init_val = np.finfo(data.dtype).max
elif data.dtype in [np.int32, np.int64, np.int8, np.int16, np.uint8]:
init_val = np.iinfo(data.dtype).max
else:
raise TypeError("Unsupported data type")
res = np.full((num_segments,) + data.shape[1:], init_val, dtype=data.dtype)
for i in range(num_segments):
mask_index = segment_ids == i
if np.any(mask_index):
res[i] = np.min(data[mask_index], axis=0)
return res
def blackman_window(
size: int,
/,
*,
periodic: bool = True,
dtype: Optional[np.dtype] = None,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if size < 2:
return np.ones([size], dtype=dtype)
if periodic:
count = np.arange(size) / size
else:
count = np.linspace(start=0, stop=size, num=size)
return (
(0.42 - 0.5 * np.cos(2 * np.pi * count))
+ (0.08 * np.cos(2 * np.pi * 2 * count))
).astype(dtype)
blackman_window.support_native_out = False
def unsorted_segment_sum(
data: np.ndarray,
segment_ids: np.ndarray,
num_segments: int,
) -> np.ndarray:
# Used the same check which is used for unsorted_segment_min as the
# check should be same
# Might require to change the assertion function name to
# check_unsorted_segment_valid_params
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
res = np.zeros((num_segments,) + data.shape[1:], dtype=data.dtype)
for i in range(num_segments):
mask_index = segment_ids == i
if np.any(mask_index):
res[i] = np.sum(data[mask_index], axis=0)
return res
def trilu(
x: np.ndarray,
/,
*,
k: int = 0,
upper: bool = True,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
if upper:
return np.triu(x, k)
return np.tril(x, k)
def mel_weight_matrix(
num_mel_bins: int,
dft_length: int,
sample_rate: int,
lower_edge_hertz: float = 125.0,
upper_edge_hertz: float = 3000.0,
):
lower_edge_hertz = np.array(lower_edge_hertz)
upper_edge_hertz = np.array(upper_edge_hertz)
zero = np.array(0.0)
def hz_to_mel(f):
return 2595 * np.log10(1 + f / 700)
nyquist_hz = sample_rate / 2
linear_freqs = np.linspace(0, nyquist_hz, dft_length, dtype=np.float32)[1:]
spec_bin_mels = hz_to_mel(linear_freqs)[..., None]
mel_edges = np.linspace(
hz_to_mel(lower_edge_hertz),
hz_to_mel(upper_edge_hertz),
num_mel_bins + 2,
dtype=np.float32,
)
mel_edges = np.stack([mel_edges[i : i + 3] for i in range(num_mel_bins)])
lower_edge_mel, center_mel, upper_edge_mel = (
t.reshape((1, num_mel_bins)) for t in np.split(mel_edges, 3, axis=1)
)
lower_slopes = (spec_bin_mels - lower_edge_mel) / (center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spec_bin_mels) / (upper_edge_mel - center_mel)
mel_weights = np.maximum(zero, np.minimum(lower_slopes, upper_slopes))
return np.pad(mel_weights, [[1, 0], [0, 0]])
def unsorted_segment_mean(
data: np.ndarray,
segment_ids: np.ndarray,
num_segments: int,
) -> np.ndarray:
ivy.utils.assertions.check_unsorted_segment_valid_params(
data, segment_ids, num_segments
)
if len(segment_ids) == 0:
# If segment_ids is empty, return an empty array of the correct shape
return np.zeros((num_segments,) + data.shape[1:], dtype=data.dtype)
# Initialize an array to store the sum of elements for each segment
res = np.zeros((num_segments,) + data.shape[1:], dtype=data.dtype)
# Initialize an array to keep track of the number of elements in each segment
counts = np.zeros(num_segments, dtype=np.int64)
for i in range(len(segment_ids)):
seg_id = segment_ids[i]
if seg_id < num_segments:
res[seg_id] += data[i]
counts[seg_id] += 1
return res / counts[:, np.newaxis]
def polyval(coeffs: np.ndarray, x: np.ndarray) -> np.ndarray:
with ivy.PreciseMode(True):
promoted_type = ivy.promote_types(ivy.dtype(coeffs[0]), ivy.dtype(x[0]))
result = np.polyval(coeffs, x)
result = np.asarray(result, np.dtype(promoted_type))
return result
| ivy/ivy/functional/backends/numpy/experimental/creation.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/experimental/creation.py",
"repo_id": "ivy",
"token_count": 2949
} | 19 |
# global
import numpy as np
from typing import Union, Optional, Sequence
# local
import ivy
def all(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
try:
return np.asarray(np.all(x, axis=axis, keepdims=keepdims, out=out))
except np.AxisError as error:
raise ivy.utils.exceptions.IvyIndexError(error) from error
all.support_native_out = True
def any(
x: np.ndarray,
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[np.ndarray] = None,
) -> np.ndarray:
return np.asarray(np.any(x, axis=axis, keepdims=keepdims, out=out))
any.support_native_out = True
| ivy/ivy/functional/backends/numpy/utility.py/0 | {
"file_path": "ivy/ivy/functional/backends/numpy/utility.py",
"repo_id": "ivy",
"token_count": 329
} | 20 |
"""Collection of Paddle network layers, wrapped to fit Ivy syntax and
signature."""
from typing import Optional, Tuple, Union, Sequence
# global
import paddle
import ivy
from ivy.func_wrapper import with_unsupported_device_and_dtypes
from ivy.utils.exceptions import IvyNotImplementedException
from ivy.functional.ivy.layers import (
_handle_padding,
_get_x_data_format,
_deconv_length,
)
import ivy.functional.backends.paddle as paddle_backend
# local
from . import backend_version
def _is_list_or_tuple(inp):
return isinstance(inp, (list, tuple))
def _convert_to_list(value, n, name="padding", _type=int):
if isinstance(value, _type):
return [value] * n
else:
try:
value_list = list(value)
except TypeError as e:
raise TypeError(
f"The input {value}'s type must be list or tuple. Received:"
f" {type(value)}"
) from e
else:
return value_list
def _pad_before_conv(x, filters, strides, padding, dims, dilations, data_format):
dilations = _convert_to_list(dilations, dims, "dilations")
strides = _convert_to_list(strides, dims, "strides")
if isinstance(padding, str):
# Case 1: "VALID", "SAME" etc.
filter_shape = [
filters.shape[i] + (filters.shape[i] - 1) * (dilations[i] - 1)
for i in range(dims)
]
padding_spec = [
_handle_padding(x.shape[1 + i], strides[i], filter_shape[i], padding)
for i in range(dims - 1, -1, -1)
]
padding_top = [padding_spec[i] // 2 for i in range(dims)]
padding_bot = [padding_spec[i] - padding_spec[i] // 2 for i in range(dims)]
padding = [None] * len(padding_top) * 2
padding[::2] = padding_top
padding[1::2] = padding_bot
else:
if isinstance(padding, int):
padding = [(padding, padding)] * dims
if (
_is_list_or_tuple(padding)
and len(padding) == dims
and _is_list_or_tuple(padding[0])
):
# Case 2: [(pad_left, pad_right), (pad_top, pad_bottom)...]
padding = [item for sublist in padding for item in sublist[::-1]][::-1]
else:
raise ValueError(f"Invalid padding format: {padding}")
if not all(p >= 0 for p in padding):
raise ValueError(
"Invalid padding, all values should be larger than"
f"or equal to 0, but received: {padding}."
)
return paddle.nn.functional.pad(
x, pad=padding, data_format=data_format, mode="constant"
)
def _pad_before_conv_tranpose(
x, filters, strides, padding, dims, dilations, output_shape, filter_shape
):
if output_shape is None:
out_shape = [
_deconv_length(
x.shape[i + 2], strides[i], filter_shape[i], padding, dilations[i]
)
for i in range(dims)
]
output_shape = [x.shape[0], *out_shape, filters.shape[1]]
elif len(output_shape) == dims:
output_shape = [x.shape[0]] + output_shape + [filters.shape[1]]
not_valid_pad = [False] * dims
filter_shape = [
filter_shape[i] + (filter_shape[i] - 1) * (dilations[i] - 1)
for i in range(dims)
]
pad_specific = [
_handle_padding(output_shape[i + 1], strides[i], filter_shape[i], padding)
for i in range(dims)
]
if padding == "VALID":
padding_list = [0] * dims
else:
for i in range(dims):
if pad_specific[i] % 2 != 0:
pad_specific[i] -= 1
not_valid_pad[i] = True
padding_list = [pad_specific[i] // 2 for i in range(dims)]
out_shape = [
(x.shape[i + 2] - 1) * strides[i]
- 2 * padding_list[i]
+ dilations[i] * (filters.shape[i + 2] - 1)
+ 1
for i in range(dims)
]
output_padding = [max(output_shape[i + 1] - out_shape[i], 0) for i in range(dims)]
return not_valid_pad, padding_list, output_padding
def _ff_xd_before_conv(x, filters, dims, filter_format, x_dilations):
if filter_format == "channel_first":
filters = paddle.transpose(filters, (*range(2, dims + 2), 1, 0))
# adding dilation in input
x_dilations = [x_dilations] * dims if isinstance(x_dilations, int) else x_dilations
for i in range(dims):
if x_dilations[i] > 1:
h = x.shape[1 + i]
new_height = h + (h - 1) * (x_dilations[i] - 1)
h = paddle.eye(new_height, dtype=x.dtype)[:: x_dilations[i]]
x = paddle_backend.swapaxes(x, 1 + i, -1)
x = paddle.matmul(x, h)
x = paddle_backend.swapaxes(x, -1, 1 + i)
return x, filters
def conv1d(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int]] = 1,
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "bfloat16")}},
backend_version,
)
def conv1d_transpose(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int]],
padding: Union[str, Sequence[Tuple[int, int]]],
/,
*,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NWC",
dilations: Union[int, Tuple[int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
):
if data_format == "NWC":
x = x.transpose([0, 2, 1])
strides = [strides] if isinstance(strides, int) else strides
dilations = [dilations] if isinstance(dilations, int) else dilations
if filter_format == "channel_last":
filters = filters.transpose([2, 1, 0])
not_valid_pad, padding_list, output_padding = _pad_before_conv_tranpose(
x, filters, strides, padding, 1, dilations, output_shape, filters.shape[2:]
)
res = paddle.nn.functional.conv1d_transpose(
x,
filters,
stride=strides,
padding=padding_list,
output_padding=output_padding,
dilation=dilations,
data_format="NCL",
)
if not_valid_pad[0]:
res = res[:, :, 0:-1]
if data_format == "NWC":
res = res.transpose([0, 2, 1])
return res
# noinspection PyUnresolvedReferences
def conv2d(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int]] = 1,
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16", "bfloat16")}},
backend_version,
)
def conv2d_transpose(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int, int]],
padding: Union[str, Sequence[Tuple[int, int]]],
/,
*,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
):
if data_format == "NHWC":
x = x.transpose([0, 3, 1, 2])
strides = [strides] * 2 if isinstance(strides, int) else strides
dilations = [dilations] * 2 if isinstance(dilations, int) else dilations
if filter_format == "channel_last":
filters = filters.transpose([3, 2, 0, 1])
not_valid_pad, padding_list, output_padding = _pad_before_conv_tranpose(
x, filters, strides, padding, 2, dilations, output_shape, filters.shape[2:]
)
res = paddle.nn.functional.conv2d_transpose(
x,
filters,
bias=bias,
stride=strides,
padding=padding_list,
output_padding=output_padding,
dilation=dilations,
data_format="NCHW",
)
if not_valid_pad[0]:
res = res[:, :, 0:-1, :]
if not_valid_pad[1]:
res = res[:, :, :, 0:-1]
if data_format == "NHWC":
res = res.transpose([0, 2, 3, 1])
return res
# noinspection PyUnresolvedReferences
def depthwise_conv2d(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NHWC",
dilations: Union[int, Tuple[int, int]] = 1,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16",)}},
backend_version,
)
def conv3d(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int, int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
data_format: str = "NDHWC",
filter_format: str = "channel_last",
x_dilations: Union[int, Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
):
if data_format == "NCDHW":
x = paddle.transpose(x, perm=(0, 2, 3, 4, 1))
df = "NDHWC"
x, filters = _ff_xd_before_conv(x, filters, 3, filter_format, x_dilations)
x = _pad_before_conv(x, filters, strides, padding, 3, dilations, df)
filters = paddle.transpose(filters, perm=(4, 3, 0, 1, 2))
padding = "VALID"
res = paddle.nn.functional.conv3d(
x,
filters,
bias=bias,
data_format=df,
stride=strides,
padding=padding,
dilation=dilations,
)
if data_format == "NCDHW":
res = paddle.transpose(res, perm=(0, 4, 1, 2, 3))
return res
# noinspection PyUnresolvedReferences
def conv3d_transpose(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int, int, int]],
padding: Union[str, Sequence[Tuple[int, int]]],
/,
*,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int, int, int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
raise IvyNotImplementedException()
@with_unsupported_device_and_dtypes(
{"2.6.0 and below": {"cpu": ("float16",)}},
backend_version,
)
def conv_general_dilated(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: Union[str, int, Sequence[Tuple[int, int]]],
/,
*,
dims: int = 2,
data_format: str = "channel_last",
filter_format: str = "channel_last",
feature_group_count: int = 1,
x_dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
):
if data_format == "channel_first":
x = paddle.transpose(x, perm=(0, *range(2, dims + 2), 1))
if filter_format == "channel_first":
filters = paddle.transpose(filters, (*range(2, dims + 2), 1, 0))
# adding dilation in input
x_dilations = [x_dilations] * dims if isinstance(x_dilations, int) else x_dilations
for i in range(dims):
if x_dilations[i] > 1:
h = x.shape[1 + i]
new_height = h + (h - 1) * (x_dilations[i] - 1)
h = paddle.eye(new_height, dtype=x.dtype)[:: x_dilations[i]]
x = paddle_backend.swapaxes(x, 1 + i, -1)
x = paddle.matmul(x, h)
x = paddle_backend.swapaxes(x, -1, 1 + i)
df = "NLC" if dims == 1 else _get_x_data_format(dims, data_format="channel_last")
x = _pad_before_conv(x, filters, strides, padding, dims, dilations, df)
filters = paddle.transpose(filters, perm=(dims + 1, dims, *range(dims)))
padding = "VALID"
if dims == 1:
res = paddle.nn.functional.conv1d(
x,
filters,
bias=bias,
data_format=df,
stride=strides,
padding=padding,
dilation=dilations,
groups=feature_group_count,
)
elif dims == 2:
res = paddle.nn.functional.conv2d(
x,
filters,
bias=bias,
data_format=df,
stride=strides,
padding=padding,
dilation=dilations,
groups=feature_group_count,
)
elif dims == 3:
res = paddle.nn.functional.conv3d(
x,
filters,
bias=bias,
data_format=df,
stride=strides,
padding=padding,
dilation=dilations,
groups=feature_group_count,
)
if data_format == "channel_first":
res = paddle.transpose(res, perm=(0, dims + 1, *range(1, dims + 1)))
return res
def conv_general_transpose(
x: paddle.Tensor,
filters: paddle.Tensor,
strides: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]],
padding: Union[str, Sequence[Tuple[int, int]]],
/,
*,
dims: int = 2,
output_shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
filter_format: str = "channel_last",
data_format: str = "NDHWC",
dilations: Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int]] = 1,
feature_group_count: int = 1,
bias: Optional[paddle.Tensor] = None,
out: Optional[paddle.Tensor] = None,
):
if data_format == "channel_last":
x = x.transpose([0, dims + 1, *range(1, dims + 1)])
if filter_format == "channel_last":
filters = filters.transpose([dims + 1, dims, *range(dims)])
strides = [strides] * dims if isinstance(strides, int) else strides
dilations = [dilations] * dims if isinstance(dilations, int) else dilations
not_valid_pad, padding_list, output_padding = _pad_before_conv_tranpose(
x, filters, strides, padding, dims, dilations, output_shape, filters.shape[2:]
)
if dims == 1:
res = paddle.nn.functional.conv1d_transpose(
x,
filters,
bias=bias,
stride=strides,
padding=padding_list,
output_padding=output_padding,
groups=feature_group_count,
dilation=dilations,
data_format="NCL",
)
if not_valid_pad[0]:
res = res[:, :, 0:-1]
elif dims == 2:
res = paddle.nn.functional.conv2d_transpose(
x,
filters,
bias=bias,
stride=strides,
padding=padding_list,
output_padding=output_padding,
groups=feature_group_count,
dilation=dilations,
data_format="NCHW",
)
if not_valid_pad[0]:
res = res[:, :, 0:-1, :]
if not_valid_pad[1]:
res = res[:, :, :, 0:-1]
else:
res = paddle.nn.functional.conv3d_transpose(
x,
filters,
bias=bias,
stride=strides,
padding=padding_list,
output_padding=output_padding,
groups=feature_group_count,
dilation=dilations,
data_format="NCDHW",
)
if not_valid_pad[0]:
res = res[:, 0:-1, :, :]
if not_valid_pad[1]:
res = res[:, :, 0:-1, :]
if not_valid_pad[2]:
res = res[:, :, :, 0:-1]
if data_format == "channel_last":
res = res.transpose([0, *range(2, dims + 2), 1])
return res
| ivy/ivy/functional/backends/paddle/layers.py/0 | {
"file_path": "ivy/ivy/functional/backends/paddle/layers.py",
"repo_id": "ivy",
"token_count": 7600
} | 21 |
"""Tensorflow device functions.
Collection of TensorFlow general functions, wrapped to fit Ivy syntax
and signature.
"""
# global
_round = round
import tensorflow as tf
from typing import Union, Optional
# local
import ivy
from ivy.functional.ivy.device import Profiler as BaseProfiler
def _same_device(dev_a, dev_b):
if dev_a is None or dev_b is None:
return False
return "/" + ":".join(dev_a[1:].split(":")[-2:]) == "/" + ":".join(
dev_b[1:].split(":")[-2:]
)
def dev(
x: Union[tf.Tensor, tf.Variable, tf.TensorArray],
/,
*,
as_native: bool = False,
) -> Union[ivy.Device, str]:
if isinstance(x, tf.TensorArray):
# Read the underlying tensor being wrapped to get the device.
x = x.stack()
dv = x.device
if as_native:
return dv
dv = dv if dv else ivy.default_device(as_native=False)
return as_ivy_dev(dv)
def to_device(
x: Union[tf.Tensor, tf.Variable],
device: str,
/,
*,
stream: Optional[int] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if device is None:
return x
device = as_native_dev(device)
current_dev = dev(x)
if not _same_device(current_dev, device):
with tf.device(f"/{device.upper()}"):
return tf.identity(x)
return x
def as_ivy_dev(device: str, /):
if isinstance(device, str) and "/" not in device:
return ivy.Device(device)
dev_in_split = device[1:].split(":")[-2:]
if len(dev_in_split) == 1:
return ivy.Device(dev_in_split[0])
dev_type, dev_idx = dev_in_split
dev_type = dev_type.lower()
if dev_type == "cpu":
return ivy.Device(dev_type)
return ivy.Device(f"{dev_type}:{dev_idx}")
def as_native_dev(device: str, /):
if isinstance(device, str) and "/" in device:
return device
ret = f"/{ivy.Device(device).upper()}"
if not ret[-1].isnumeric():
ret += ":0"
return ret
def clear_cached_mem_on_dev(device: str, /):
return None
def num_gpus() -> int:
return len(tf.config.list_physical_devices("GPU"))
def gpu_is_available() -> bool:
return len(tf.config.list_physical_devices("GPU")) > 0
def tpu_is_available() -> bool:
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
tf.config.list_logical_devices("TPU")
tf.distribute.experimental.TPUStrategy(resolver)
return True
except ValueError:
return False
def handle_soft_device_variable(*args, fn, **kwargs):
with tf.device(ivy.default_device(as_native=True)):
return fn(*args, **kwargs)
class Profiler(BaseProfiler):
def __init__(self, save_dir: str):
super().__init__(save_dir)
self._options = tf.profiler.experimental.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1, device_tracer_level=1
)
def start(self):
tf.profiler.experimental.start(self._save_dir, options=self._options)
def stop(self):
tf.profiler.experimental.stop()
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
| ivy/ivy/functional/backends/tensorflow/device.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/device.py",
"repo_id": "ivy",
"token_count": 1434
} | 22 |
# global
from typing import Union, Optional, Tuple
import tensorflow as tf
# local
from ivy.func_wrapper import with_supported_dtypes
from . import backend_version
@with_supported_dtypes(
{
"2.15.0 and below": (
"int32",
"int64",
)
},
backend_version,
)
def unravel_index(
indices: Union[tf.Tensor, tf.Variable],
shape: Tuple[int],
/,
*,
out: Optional[Tuple[Union[tf.Tensor, tf.Variable]]] = None,
) -> Tuple[tf.Tensor]:
temp = indices
output = []
for dim in reversed(shape):
output.append(temp % dim)
temp = temp // dim
output.reverse()
ret = tf.convert_to_tensor(output, dtype=tf.int32)
return tuple(ret)
| ivy/ivy/functional/backends/tensorflow/experimental/searching.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/experimental/searching.py",
"repo_id": "ivy",
"token_count": 315
} | 23 |
# global
import tensorflow as tf
from typing import Union, Optional, Sequence
# local
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from . import backend_version
from ivy.utils.einsum_parser import legalise_einsum_expr
# Array API Standard #
# -------------------#
@with_unsupported_dtypes(
{"2.15.0 and below": ("complex", "bool", "uint64")}, backend_version
)
def min(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
initial: Optional[Union[int, float, complex]] = None,
where: Optional[Union[tf.Tensor, tf.Variable]] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
axis = tuple(axis) if isinstance(axis, list) else axis
if where is not None:
max_val = (
ivy.iinfo(x.dtype).max
if ivy.is_int_dtype(x.dtype)
else ivy.finfo(x.dtype).max
)
x = tf.where(where, x, tf.ones_like(x) * max_val)
result = tf.math.reduce_min(x, axis=axis, keepdims=keepdims)
if initial is not None:
result = tf.minimum(result, initial)
return result
@with_unsupported_dtypes({"2.15.0 and below": ("bool",)}, backend_version)
def max(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if "complex" in str(x.dtype):
real = tf.math.real(x)
img = tf.math.imag(x)
const = tf.constant(1j, dtype=x.dtype)
real_max = tf.reduce_max(real, axis=axis, keepdims=keepdims)
imag = tf.where(
real == real_max,
img,
tf.experimental.numpy.finfo(img.dtype).min,
)
# we consider the number with the biggest real and imag part
img_max = tf.reduce_max(imag, axis=axis, keepdims=keepdims)
img_max = tf.cast(img_max, x.dtype)
return tf.add(tf.cast(real_max, x.dtype), tf.multiply(img_max, const))
axis = tuple(axis) if isinstance(axis, list) else axis
return tf.math.reduce_max(x, axis=axis, keepdims=keepdims)
@with_unsupported_dtypes({"2.15.0 and below": ("bool",)}, backend_version)
def mean(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
axis = tuple(axis) if isinstance(axis, list) else axis
return tf.math.reduce_mean(x, axis=axis, keepdims=keepdims)
def _infer_dtype(dtype: tf.DType):
default_dtype = ivy.infer_default_dtype(dtype)
if ivy.dtype_bits(dtype) < ivy.dtype_bits(default_dtype):
return default_dtype
return dtype
def prod(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[tf.DType] = None,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
dtype = _infer_dtype(x.dtype)
axis = tuple(axis) if isinstance(axis, list) else axis
return tf.experimental.numpy.prod(x, axis=axis, dtype=dtype, keepdims=keepdims)
def std(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if axis is None:
axis = tuple(range(len(x.shape)))
axis = (axis,) if isinstance(axis, int) else tuple(axis)
size = 1
for a in axis:
size *= x.shape[a]
if size - correction <= 0:
ret = tf.experimental.numpy.std(x, axis=axis, keepdims=keepdims)
ret = ivy.full(ret.shape, float("nan"), dtype=ret.dtype)
return ret
else:
return tf.cast(
tf.math.multiply(
tf.experimental.numpy.std(x, axis=axis, keepdims=keepdims),
(size / (size - correction)) ** 0.5,
),
x.dtype,
)
def sum(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
dtype: Optional[tf.DType] = None,
keepdims: Optional[bool] = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(dtype)
if dtype is None and not ivy.is_bool_dtype(x):
dtype = x.dtype
axis = tuple(axis) if isinstance(axis, list) else axis
return tf.experimental.numpy.sum(x, axis, dtype, keepdims)
def var(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: Optional[Union[int, Sequence[int]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
if axis is None:
axis = tuple(range(len(x.shape)))
axis = (axis,) if isinstance(axis, int) else tuple(axis)
size = 1
for a in axis:
size *= x.shape[a]
if size - correction <= 0:
ret = tf.math.reduce_variance(x, axis=axis, keepdims=keepdims)
ret = tf.cast(tf.fill(ret.shape, float("nan")), ret.dtype)
return ret
else:
return (
tf.math.reduce_variance(x, axis=axis, keepdims=keepdims)
* size
/ (size - correction)
)
# Extra #
# ------#
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16", "bool")}, backend_version)
def cumprod(
x: Union[tf.Tensor, tf.Variable],
/,
*,
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
if dtype is tf.bool:
dtype = ivy.default_int_dtype()
else:
dtype = _infer_dtype(x.dtype)
dtype = ivy.as_native_dtype(dtype)
x = tf.cast(x, dtype)
return tf.math.cumprod(x, axis, exclusive, reverse)
@with_unsupported_dtypes({"2.15.0 and below": "bool"}, backend_version)
def cumsum(
x: Union[tf.Tensor, tf.Variable],
axis: int = 0,
exclusive: bool = False,
reverse: bool = False,
*,
dtype: Optional[tf.DType] = None,
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
dtype = ivy.as_native_dtype(dtype)
if dtype is None:
if dtype is tf.bool:
dtype = ivy.default_int_dtype()
elif ivy.is_int_dtype(x.dtype):
dtype = ivy.promote_types(x.dtype, ivy.default_int_dtype(as_native=True))
else:
dtype = _infer_dtype(x.dtype)
dtype = ivy.as_native_dtype(dtype)
x = tf.cast(x, dtype)
return tf.math.cumsum(x, axis, exclusive, reverse)
@with_unsupported_dtypes(
{"2.15.0 and below": ("unsigned", "int8", "int16")},
backend_version,
)
def einsum(
equation: str,
*operands: Union[tf.Tensor, tf.Variable],
out: Optional[Union[tf.Tensor, tf.Variable]] = None,
) -> Union[tf.Tensor, tf.Variable]:
equation = legalise_einsum_expr(*[equation, *operands])
dtype_list = set(map(lambda x: x.dtype, operands))
dtype = dtype_list.pop()
if len(dtype_list) > 0:
for d in dtype_list:
dtype = ivy.promote_types(dtype, d)
dtype = ivy.as_native_dtype(dtype)
operands = list(
map(lambda x: tf.cast(x, dtype) if x.dtype != dtype else x, operands)
)
return tf.einsum(equation, *operands)
| ivy/ivy/functional/backends/tensorflow/statistical.py/0 | {
"file_path": "ivy/ivy/functional/backends/tensorflow/statistical.py",
"repo_id": "ivy",
"token_count": 3563
} | 24 |
import ivy
from ivy.functional.ivy.experimental.sparse_array import (
_verify_bsr_components,
_verify_bsc_components,
_verify_coo_components,
_verify_csr_components,
_verify_csc_components,
_is_data_not_indices_values_and_shape,
)
import torch
def is_native_sparse_array(x):
return x.layout in [
torch.sparse_coo,
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsc,
torch.sparse_csc,
]
def native_sparse_array(
data=None,
*,
coo_indices=None,
crow_indices=None,
col_indices=None,
ccol_indices=None,
row_indices=None,
values=None,
dense_shape=None,
format="coo",
):
if _is_data_not_indices_values_and_shape(
data,
coo_indices,
crow_indices,
col_indices,
ccol_indices,
row_indices,
values,
dense_shape,
):
ivy.utils.assertions.check_true(
ivy.is_native_sparse_array(data), message="not a sparse array"
)
return data
format = format.lower()
if format == "coo":
_verify_coo_components(
indices=coo_indices, values=values, dense_shape=dense_shape
)
return torch.sparse_coo_tensor(
indices=coo_indices, values=values, size=dense_shape
)
elif format == "csr":
_verify_csr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
return torch.sparse_csr_tensor(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
size=dense_shape,
)
elif format == "csc":
_verify_csc_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
)
return torch.sparse_csc_tensor(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
size=dense_shape,
)
elif format == "bsc":
_verify_bsc_components(
ccol_indices=ccol_indices,
row_indices=row_indices,
values=values,
dense_shape=dense_shape,
)
else:
_verify_bsr_components(
crow_indices=crow_indices,
col_indices=col_indices,
values=values,
dense_shape=dense_shape,
)
def native_sparse_array_to_indices_values_and_shape(x):
if x.layout == torch.sparse_coo:
x = x.coalesce()
return {"coo_indices": x.indices()}, x.values(), x.size()
elif x.layout in [torch.sparse_csr, torch.sparse_bsr]:
return (
{"crow_indices": x.crow_indices(), "col_indices": x.col_indices()},
x.values(),
x.size(),
)
elif x.layout in [torch.sparse_bsc, torch.sparse_csc]:
return (
{"ccol_indices": x.crow_indices(), "row_indices": x.col_indices()},
x.values(),
x.size(),
)
raise ivy.utils.exceptions.IvyException("not a sparse COO/CSR/CSC/BSC/BSR Tensor")
| ivy/ivy/functional/backends/torch/experimental/sparse_array.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/experimental/sparse_array.py",
"repo_id": "ivy",
"token_count": 1729
} | 25 |
import torchvision
from . import layers
from .layers import *
name = "torchvision"
incompatible_sub_backends = ()
| ivy/ivy/functional/backends/torch/sub_backends/torchvision/__init__.py/0 | {
"file_path": "ivy/ivy/functional/backends/torch/sub_backends/torchvision/__init__.py",
"repo_id": "ivy",
"token_count": 37
} | 26 |
import ivy
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
outputs_to_native_arrays,
)
from ivy.func_wrapper import outputs_to_ivy_arrays
@to_ivy_arrays_and_back
def device_get(x):
if ivy.dev(x) != "cpu":
x = ivy.to_device(x, "cpu")
return x
@to_ivy_arrays_and_back
def device_put(x, device=None, *, src=None):
if device is not None:
cur_dev = ivy.dev(x)
device = ivy.as_ivy_dev(device)
if cur_dev != device:
x = ivy.to_device(x, device)
return x
def vmap(
fun, in_axes=0, out_axes=0, axis_name=None, axis_size=None, spmd_axis_name=None
):
fun = outputs_to_native_arrays(fun)
return to_ivy_arrays_and_back(
outputs_to_ivy_arrays(ivy.vmap(fun, in_axes=in_axes, out_axes=out_axes))
)
| ivy/ivy/functional/frontends/jax/general_functions.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/general_functions.py",
"repo_id": "ivy",
"token_count": 399
} | 27 |
# local
import ivy
from ivy.functional.frontends.jax.func_wrapper import (
to_ivy_arrays_and_back,
handle_jax_dtype,
)
from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs
@to_ivy_arrays_and_back
def append(arr, values, axis=None):
if axis is None:
return ivy.concat((ivy.flatten(arr), ivy.flatten(values)), axis=0)
else:
return ivy.concat((arr, values), axis=axis)
@to_ivy_arrays_and_back
def array_split(ary, indices_or_sections, axis=0):
return ivy.split(
ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=True
)
@to_ivy_arrays_and_back
def atleast_1d(*arys):
return ivy.atleast_1d(*arys)
@to_ivy_arrays_and_back
def atleast_2d(*arys):
return ivy.atleast_2d(*arys)
@to_ivy_arrays_and_back
def atleast_3d(*arys):
return ivy.atleast_3d(*arys)
@to_ivy_arrays_and_back
def bartlett(M):
if M < 1:
return ivy.array([])
if M == 1:
return ivy.ones(M, dtype=ivy.float64)
res = ivy.arange(0, M)
res = ivy.where(
ivy.less_equal(res, (M - 1) / 2.0),
2.0 * res / (M - 1),
2.0 - 2.0 * res / (M - 1),
)
return res
@to_ivy_arrays_and_back
def blackman(M):
if M < 1:
return ivy.array([])
if M == 1:
return ivy.ones((1,))
n = ivy.arange(0, M)
alpha = 0.16
a0 = (1 - alpha) / 2
a1 = 1 / 2
a2 = alpha / 2
ret = (
a0
- a1 * ivy.cos(2 * ivy.pi * n / (M - 1))
+ a2 * ivy.cos(4 * ivy.pi * n / (M - 1))
)
return ret
@to_ivy_arrays_and_back
def block(arr):
# TODO: reimplement block
raise ivy.utils.exceptions.IvyNotImplementedError()
@to_ivy_arrays_and_back
def broadcast_arrays(*args):
return ivy.broadcast_arrays(*args)
@to_ivy_arrays_and_back
def broadcast_shapes(*shapes):
return ivy.broadcast_shapes(*shapes)
@to_ivy_arrays_and_back
def broadcast_to(array, shape):
return ivy.broadcast_to(array, shape)
@to_ivy_arrays_and_back
def clip(a, a_min=None, a_max=None, out=None):
ivy.utils.assertions.check_all_or_any_fn(
a_min,
a_max,
fn=ivy.exists,
type="any",
limit=[1, 2],
message="at most one of a_min or a_max can be None",
)
a = ivy.array(a)
if a_min is None:
a, a_max = promote_types_of_jax_inputs(a, a_max)
return ivy.minimum(a, a_max, out=out)
if a_max is None:
a, a_min = promote_types_of_jax_inputs(a, a_min)
return ivy.maximum(a, a_min, out=out)
return ivy.clip(a, a_min, a_max, out=out)
@to_ivy_arrays_and_back
def column_stack(tup):
if len(ivy.shape(tup[0])) == 1:
ys = []
for t in tup:
ys += [ivy.reshape(t, (ivy.shape(t)[0], 1))]
return ivy.concat(ys, axis=1)
return ivy.concat(tup, axis=1)
@handle_jax_dtype
@to_ivy_arrays_and_back
def concatenate(arrays, axis=0, dtype=None):
ret = ivy.concat(arrays, axis=axis)
if dtype:
ret = ivy.array(ret, dtype=dtype)
return ret
@to_ivy_arrays_and_back
def diagflat(v, k=0):
ret = ivy.diagflat(v, offset=k)
while len(ivy.shape(ret)) < 2:
ret = ret.expand_dims(axis=0)
return ret
@to_ivy_arrays_and_back
def dsplit(ary, indices_or_sections):
if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
indices_or_sections = (
ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[2]])
.astype(ivy.int8)
.to_list()
)
return ivy.dsplit(ary, indices_or_sections)
@to_ivy_arrays_and_back
def dstack(tup, dtype=None):
return ivy.dstack(tup)
@to_ivy_arrays_and_back
def expand_dims(a, axis):
return ivy.expand_dims(a, axis=axis)
@to_ivy_arrays_and_back
def flip(m, axis=None):
return ivy.flip(m, axis=axis)
@to_ivy_arrays_and_back
def fliplr(m):
return ivy.fliplr(m)
@to_ivy_arrays_and_back
def flipud(m):
return ivy.flipud(m, out=None)
def hamming(M):
if M <= 1:
return ivy.ones([M], dtype=ivy.float64)
n = ivy.arange(M)
ret = 0.54 - 0.46 * ivy.cos(2.0 * ivy.pi * n / (M - 1))
return ret
@to_ivy_arrays_and_back
def hanning(M):
if M <= 1:
return ivy.ones([M], dtype=ivy.float64)
n = ivy.arange(M)
ret = 0.5 * (1 - ivy.cos(2.0 * ivy.pi * n / (M - 1)))
return ret
@to_ivy_arrays_and_back
def hsplit(ary, indices_or_sections):
if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
if ary.ndim == 1:
indices_or_sections = (
ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]])
.astype(ivy.int8)
.to_list()
)
else:
indices_or_sections = (
ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[1]])
.astype(ivy.int8)
.to_list()
)
return ivy.hsplit(ary, indices_or_sections)
@to_ivy_arrays_and_back
def kaiser(M, beta):
if M <= 1:
return ivy.ones([M], dtype=ivy.float64)
n = ivy.arange(M)
alpha = 0.5 * (M - 1)
ret = ivy.i0(beta * ivy.sqrt(1 - ((n - alpha) / alpha) ** 2)) / ivy.i0(beta)
return ret
@to_ivy_arrays_and_back
def moveaxis(a, source, destination):
return ivy.moveaxis(a, source, destination)
@to_ivy_arrays_and_back
def pad(array, pad_width, mode="constant", **kwargs):
return ivy.pad(array, pad_width, mode=mode, **kwargs)
@to_ivy_arrays_and_back
def ravel(a, order="C"):
return ivy.reshape(a, shape=(-1,), order=order)
@to_ivy_arrays_and_back
def repeat(a, repeats, axis=None, *, total_repeat_length=None):
return ivy.repeat(a, repeats, axis=axis)
@to_ivy_arrays_and_back
def reshape(a, newshape, order="C"):
return ivy.reshape(a, shape=newshape, order=order)
@to_ivy_arrays_and_back
def resize(a, new_shape):
a = ivy.array(a)
resized_a = ivy.reshape(a, new_shape)
return resized_a
@to_ivy_arrays_and_back
def roll(a, shift, axis=None):
return ivy.roll(a, shift, axis=axis)
@to_ivy_arrays_and_back
def rot90(m, k=1, axes=(0, 1)):
return ivy.rot90(m, k=k, axes=axes)
@to_ivy_arrays_and_back
def row_stack(tup):
if len(ivy.shape(tup[0])) == 1:
xs = []
for t in tup:
xs += [ivy.reshape(t, (1, ivy.shape(t)[0]))]
return ivy.concat(xs, axis=0)
return ivy.concat(tup, axis=0)
@to_ivy_arrays_and_back
def split(ary, indices_or_sections, axis=0):
if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
indices_or_sections = (
ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[axis]])
.astype(ivy.int8)
.to_list()
)
return ivy.split(
ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=False
)
@to_ivy_arrays_and_back
def squeeze(a, axis=None):
return ivy.squeeze(a, axis=axis)
@to_ivy_arrays_and_back
def stack(arrays, axis=0, out=None, dtype=None):
if dtype:
return ivy.astype(
ivy.stack(arrays, axis=axis, out=out), ivy.as_ivy_dtype(dtype)
)
return ivy.stack(arrays, axis=axis, out=out)
@to_ivy_arrays_and_back
def swapaxes(a, axis1, axis2):
return ivy.swapaxes(a, axis1, axis2)
@to_ivy_arrays_and_back
def take(
a,
indices,
axis=None,
out=None,
mode=None,
unique_indices=False,
indices_are_sorted=False,
fill_value=None,
):
return ivy.gather(a, indices, axis=axis, out=out)
@to_ivy_arrays_and_back
def tile(A, reps):
return ivy.tile(A, reps)
@to_ivy_arrays_and_back
def transpose(a, axes=None):
if ivy.isscalar(a):
return ivy.array(a)
elif a.ndim == 1:
return a
if not axes:
axes = list(range(len(a.shape)))[::-1]
if isinstance(axes, int):
axes = [axes]
if (len(a.shape) == 0 and not axes) or (len(a.shape) == 1 and axes[0] == 0):
return a
return ivy.permute_dims(a, axes, out=None)
@handle_jax_dtype
@to_ivy_arrays_and_back
def tri(N, M=None, k=0, dtype="float64"):
if M is None:
M = N
ones = ivy.ones((N, M), dtype=dtype)
return ivy.tril(ones, k=k)
@to_ivy_arrays_and_back
def tril(m, k=0):
return ivy.tril(m, k=k)
@to_ivy_arrays_and_back
def trim_zeros(flit, trim="fb"):
start_index = 0
end_index = ivy.shape(flit)[0]
trim = trim.lower()
if "f" in trim:
for item in flit:
if item == 0:
start_index += 1
else:
break
if "b" in trim:
for item in flit[::-1]:
if item == 0:
end_index -= 1
else:
break
return flit[start_index:end_index]
@to_ivy_arrays_and_back
def vsplit(ary, indices_or_sections):
if isinstance(indices_or_sections, (list, tuple, ivy.Array)):
indices_or_sections = (
ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]])
.astype(ivy.int8)
.to_list()
)
return ivy.vsplit(ary, indices_or_sections)
| ivy/ivy/functional/frontends/jax/numpy/manipulations.py/0 | {
"file_path": "ivy/ivy/functional/frontends/jax/numpy/manipulations.py",
"repo_id": "ivy",
"token_count": 4637
} | 28 |
import functools
import inspect
from typing import Callable
import ivy
from ivy.functional.frontends.mxnet.numpy.ndarray import ndarray
# --- Helpers --- #
# --------------- #
def _ivy_array_to_mxnet(x):
if isinstance(x, ivy.Array) or ivy.is_native_array(x):
return ndarray(x)
return x
def _mxnet_frontend_array_to_ivy(x):
if hasattr(x, "ivy_array"):
return x.ivy_array
return x
def _native_to_ivy_array(x):
if isinstance(x, ivy.NativeArray):
return ivy.array(x)
return x
def _to_ivy_array(x):
return _mxnet_frontend_array_to_ivy(_native_to_ivy_array(x))
# --- Main --- #
# ------------ #
def handle_mxnet_out(fn: Callable) -> Callable:
@functools.wraps(fn)
def _handle_mxnet_out(*args, **kwargs):
if "out" not in kwargs:
keys = list(inspect.signature(fn).parameters.keys())
out_pos = keys.index("out")
kwargs = {
**dict(
zip(
keys[keys.index("out") :],
args[out_pos:],
)
),
**kwargs,
}
args = args[:out_pos]
return fn(*args, **kwargs)
_handle_mxnet_out.handle_numpy_out = True
return _handle_mxnet_out
def inputs_to_ivy_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _inputs_to_ivy_arrays_mxnet(*args, **kwargs):
"""Convert `ndarray.NDArray` into `ivy.Array` instances.
Convert all `ndarray.NDArray` instances in both the positional
and keyword arguments into `ivy.Array` instances, and then calls
the function with the updated arguments.
"""
# convert all arrays in the inputs to ivy.Array instances
new_args = ivy.nested_map(
_to_ivy_array, args, include_derived={"tuple": True}, shallow=False
)
new_kwargs = ivy.nested_map(
_to_ivy_array, kwargs, include_derived={"tuple": True}, shallow=False
)
return fn(*new_args, **new_kwargs)
_inputs_to_ivy_arrays_mxnet.inputs_to_ivy_arrays = True
return _inputs_to_ivy_arrays_mxnet
def outputs_to_frontend_arrays(fn: Callable) -> Callable:
@functools.wraps(fn)
def _outputs_to_frontend_arrays_mxnet(*args, **kwargs):
"""Convert `ivy.Array` into `ndarray.NDArray` instances.
Call the function, and then converts all `ivy.Array` instances
in the function return into `ndarray.NDArray` instances.
"""
# call unmodified function
ret = fn(*args, **kwargs)
# convert all arrays in the return to `frontend.Tensorflow.tensor` instances
return ivy.nested_map(_ivy_array_to_mxnet, ret, include_derived={"tuple": True})
_outputs_to_frontend_arrays_mxnet.outputs_to_frontend_arrays = True
return _outputs_to_frontend_arrays_mxnet
def to_ivy_arrays_and_back(fn: Callable) -> Callable:
"""Wrap `fn` so it receives and returns `ivy.Array` instances.
Wrap `fn` so that input arrays are all converted to `ivy.Array`
instances and return arrays are all converted to `ndarray.NDArray`
instances.
"""
return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
| ivy/ivy/functional/frontends/mxnet/func_wrapper.py/0 | {
"file_path": "ivy/ivy/functional/frontends/mxnet/func_wrapper.py",
"repo_id": "ivy",
"token_count": 1467
} | 29 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import (
outputs_to_frontend_arrays,
handle_numpy_dtype,
)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def empty(shape, dtype="float64", order="C", *, like=None):
return ivy.empty(shape=shape, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def empty_like(prototype, dtype=None, order="K", subok=True, shape=None):
if shape:
return ivy.empty(shape=shape, dtype=dtype)
return ivy.empty_like(prototype, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def eye(N, M=None, k=0, dtype="float64", order="C", *, like=None):
return ivy.eye(N, M, k=k, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def fromfunction(function, shape, *, dtype="float64", like=None, **kwargs):
args = ivy.indices(shape, dtype=dtype)
return function(*args, **kwargs)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def full(shape, fill_value, dtype=None, order="C", *, like=None):
return ivy.full(shape, fill_value, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def full_like(a, fill_value, dtype=None, order="K", subok=True, shape=None):
if shape:
return ivy.full(shape, fill_value, dtype=dtype)
return ivy.full_like(a, fill_value, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def identity(n, dtype=None, *, like=None):
return ivy.eye(n, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def ones(shape, dtype=None, order="C", *, like=None):
return ivy.ones(shape, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def ones_like(a, dtype=None, order="K", subok=True, shape=None):
if shape:
return ivy.ones(shape, dtype=dtype)
return ivy.ones_like(a, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def zeros(shape, dtype=float, order="C", *, like=None):
return ivy.zeros(shape, dtype=dtype)
@handle_numpy_dtype
@outputs_to_frontend_arrays
def zeros_like(a, dtype=None, order="K", subok=True, shape=None):
if shape:
return ivy.zeros(shape, dtype=dtype)
return ivy.zeros_like(a, dtype=dtype)
| ivy/ivy/functional/frontends/numpy/creation_routines/from_shape_or_value.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/creation_routines/from_shape_or_value.py",
"repo_id": "ivy",
"token_count": 928
} | 30 |
from . import stride_tricks
| ivy/ivy/functional/frontends/numpy/indexing_routines/lib/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/indexing_routines/lib/__init__.py",
"repo_id": "ivy",
"token_count": 8
} | 31 |
from . import from_existing_data
from .from_existing_data import *
from . import ones_and_zeros
from .ones_and_zeros import *
from . import inspecting_the_array
from .inspecting_the_array import *
from . import changing_the_shape
from .changing_the_shape import *
from . import modifying_axes
from .modifying_axes import *
from . import changing_the_number_of_dimensions
from .changing_the_number_of_dimensions import *
from . import joining_arrays
from .joining_arrays import *
from . import creating_a_mask
from .creating_a_mask import *
from . import accessing_a_mask
from .accessing_a_mask import *
from . import finding_masked_data
from .finding_masked_data import *
from . import modifying_a_mask
from .modifying_a_mask import *
from . import to_a_masked_array
from .to_a_masked_array import *
from . import to_a_ndarray
from .to_a_ndarray import *
from . import filling_a_masked_array
from .filling_a_masked_array import *
from . import arithmetic
from .arithmetic import *
from . import minimum_maximum
from .minimum_maximum import *
from . import sorting
from .sorting import *
from . import algebra
from .algebra import *
from . import polynomial_fit
from .polynomial_fit import *
from . import clipping_and_rounding
from .clipping_and_rounding import *
from . import miscellanea
from .miscellanea import *
from . import MaskedArray
from .MaskedArray import *
| ivy/ivy/functional/frontends/numpy/ma/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/ma/__init__.py",
"repo_id": "ivy",
"token_count": 420
} | 32 |
# local
import ivy
from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
@to_ivy_arrays_and_back
def repeat(a, repeats, axis=None):
return ivy.repeat(a, repeats, axis=axis)
@to_ivy_arrays_and_back
def tile(A, reps):
return ivy.tile(A, reps)
| ivy/ivy/functional/frontends/numpy/manipulation_routines/tiling_arrays.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/tiling_arrays.py",
"repo_id": "ivy",
"token_count": 115
} | 33 |
# global
import ivy
# local
from ivy.functional.frontends.numpy import (
argmax,
any,
ndarray,
)
class matrix:
def __init__(self, data, dtype=None, copy=True):
self._init_data(data, dtype, copy)
def _init_data(self, data, dtype, copy):
if isinstance(data, str):
self._process_str_data(data, dtype)
elif isinstance(data, (list, ndarray)) or ivy.is_array(data):
if isinstance(data, ndarray):
data = data.ivy_array
if ivy.is_array(data) and dtype is None:
dtype = data.dtype
data = ivy.array(data, dtype=dtype, copy=copy)
self._data = data
elif ivy.isscalar(data):
self._data = ivy.asarray(data, dtype=dtype)
else:
raise ivy.utils.exceptions.IvyException(
"data must be an array, list, or scalar"
)
if self._data.ndim < 2:
self._data = self._data.reshape((1, -1))
elif self._data.ndim > 2:
newshape = tuple(x for x in self._data.shape if x > 1)
ndim = len(newshape)
if ndim == 2:
self._data = self._data.reshape(newshape)
else:
raise ValueError("shape too large to be a matrix.")
self._dtype = self._data.dtype
self._shape = ivy.shape(self._data)
def _process_str_data(self, data, dtype):
is_float = "." in data or "e" in data
is_complex = "j" in data
data = data.replace(",", " ")
data = " ".join(data.split())
data = data.split(";")
for i, row in enumerate(data):
row = row.strip().split(" ")
data[i] = row
for j, elem in enumerate(row):
if is_complex:
data[i][j] = complex(elem)
else:
data[i][j] = float(elem) if is_float else int(elem)
if dtype is None:
if is_complex:
dtype = ivy.complex128
else:
dtype = ivy.float64 if is_float else ivy.int64
self._data = ivy.array(data, dtype=dtype)
# Properties #
# ---------- #
@property
def A(self):
return self._data
@property
def A1(self):
return ivy.reshape(self._data, (self.size,))
# flake8: noqa: E743, E741
@property
def I(self):
if ivy.is_int_dtype(self._data):
return ivy.inv(self._data.astype(ivy.float64))
return ivy.inv(self._data)
@property
def T(self):
return ivy.matrix_transpose(self._data)
@property
def data(self):
return memoryview(ivy.to_numpy(self._data).tobytes())
@property
def dtype(self):
return self._dtype
@property
def ndim(self):
return len(self._shape)
@property
def shape(self):
return self._shape
@property
def size(self):
return self._shape[0] * self._shape[1]
# Setters #
# ------- #
@dtype.setter
def dtype(self, dtype):
self._data = ivy.astype(self._data, dtype)
self._dtype = self._data.dtype
# Built-ins #
# --------- #
def __repr__(self):
return "ivy.matrix(" + str(self._data.to_list()) + ")"
# Instance Methods #
# ---------------- #
def argmax(self, axis=None, out=None):
if ivy.exists(axis):
return argmax(self.A, axis=axis, keepdims=True, out=out)
return argmax(self.A, axis=axis, out=out)
def any(self, axis=None, out=None):
if ivy.exists(axis):
return any(self.A, axis=axis, keepdims=True, out=out)
return any(self.A, axis=axis, out=out)
| ivy/ivy/functional/frontends/numpy/matrix/methods.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/matrix/methods.py",
"repo_id": "ivy",
"token_count": 1870
} | 34 |
# global
import ivy
from ivy.func_wrapper import with_unsupported_dtypes
from ivy.functional.frontends.numpy.func_wrapper import (
to_ivy_arrays_and_back,
handle_numpy_dtype,
from_zero_dim_arrays_to_scalar,
handle_numpy_out,
)
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def average(a, /, *, axis=None, weights=None, returned=False, keepdims=False):
axis = tuple(axis) if isinstance(axis, list) else axis
avg = 0
if keepdims is None:
keepdims_kw = {}
else:
keepdims_kw = {"keepdims": keepdims}
dtype = a.dtype
if weights is None:
avg = a.mean(axis, **keepdims_kw)
weights_sum = avg.dtype.type(a.count(axis))
else:
if a.shape != weights.shape:
if axis is None:
return 0
weights = ivy.broadcast_to(weights, (a.ndim - 1) * (1,) + weights.shape)
weights = weights.swapaxes(-1, axis)
weights_sum = weights.sum(axis=axis, **keepdims_kw)
mul = ivy.multiply(a, weights)
avg = ivy.sum(mul, axis=axis, **keepdims_kw) / weights_sum
if returned:
if weights_sum.shape != avg.shape:
weights_sum = ivy.broadcast_to(weights_sum, avg.shape).copy()
return avg.astype(dtype), weights_sum
else:
return avg.astype(dtype)
@to_ivy_arrays_and_back
def cov(
m,
y=None,
/,
*,
rowvar=True,
bias=False,
ddof=None,
fweights=None,
aweights=None,
dtype=None,
):
return ivy.cov(
m,
y,
rowVar=rowvar,
bias=bias,
ddof=ddof,
fweights=fweights,
aweights=aweights,
dtype=dtype,
)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype = dtype or a.dtype if not ivy.is_int_dtype(a.dtype) else ivy.float64
where = ivy.where(where, ivy.ones_like(a), 0)
if where is not True:
a = ivy.where(where, a, 0.0)
sum = ivy.sum(a, axis=axis, keepdims=keepdims, dtype=dtype)
cnt = ivy.sum(where, axis=axis, keepdims=keepdims, dtype=int)
ret = ivy.divide(sum, cnt, out=out)
else:
ret = ivy.mean(a.astype(dtype), axis=axis, keepdims=keepdims, out=out)
return ret.astype(dtype)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
where = ~ivy.isnan(a) & where
ret = mean(a, axis, dtype, keepdims=keepdims, where=where).ivy_array
if out is not None:
out.data = ret.data
return ret
# nanmedian
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanmedian(
a,
/,
*,
axis=None,
keepdims=False,
out=None,
overwrite_input=False,
):
ret = ivy.nanmedian(
a, axis=axis, keepdims=keepdims, out=out, overwrite_input=overwrite_input
)
return ret
@handle_numpy_out
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def nanstd(
a, /, *, axis=None, dtype=None, out=None, ddof=0, keepdims=False, where=True
):
a = ivy.nan_to_num(a)
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype:
a = ivy.astype(ivy.array(a), ivy.as_ivy_dtype(dtype))
ret = ivy.std(a, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.25.0 and below": ("float16", "bfloat16")}, "tensorflow")
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True):
is_nan = ivy.isnan(a)
axis = tuple(axis) if isinstance(axis, list) else axis
if ivy.any(is_nan):
a = [i for i in a if ivy.isnan(i) is False]
if dtype is None:
dtype = "float" if ivy.is_int_dtype(a) else a.dtype
a = ivy.astype(ivy.array(a), ivy.as_ivy_dtype(dtype))
ret = ivy.var(a, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
where = ivy.array(where, dtype=ivy.bool)
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
if ivy.all(ivy.isnan(ret)):
ret = ivy.astype(ret, ivy.array([float("inf")]))
return ret
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def std(
x,
/,
*,
axis=None,
ddof=0.0,
keepdims=False,
out=None,
dtype=None,
where=True,
):
axis = tuple(axis) if isinstance(axis, list) else axis
if dtype is None:
if ivy.is_int_dtype(x.dtype):
dtype = ivy.float64
else:
dtype = x.dtype
ret = ivy.std(x, axis=axis, correction=ddof, keepdims=keepdims, out=out)
if ivy.is_array(where):
ret = ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
return ret.astype(dtype, copy=False)
@handle_numpy_out
@handle_numpy_dtype
@to_ivy_arrays_and_back
@from_zero_dim_arrays_to_scalar
def var(x, /, *, axis=None, ddof=0.0, keepdims=False, out=None, dtype=None, where=True):
axis = tuple(axis) if isinstance(axis, list) else axis
dtype = (
dtype
if dtype is not None
else ivy.float64
if ivy.is_int_dtype(x.dtype)
else x.dtype
)
ret = ivy.var(x, axis=axis, correction=ddof, keepdims=keepdims, out=out)
ret = (
ivy.where(where, ret, ivy.default(out, ivy.zeros_like(ret)), out=out)
if ivy.is_array(where)
else ret
)
return ret.astype(dtype, copy=False)
| ivy/ivy/functional/frontends/numpy/statistics/averages_and_variances.py/0 | {
"file_path": "ivy/ivy/functional/frontends/numpy/statistics/averages_and_variances.py",
"repo_id": "ivy",
"token_count": 2839
} | 35 |
# global
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import (
to_ivy_arrays_and_back,
)
@with_supported_dtypes(
{"2.6.0 and below": ("complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def fft(x, n=None, axis=-1.0, norm="backward", name=None):
ret = ivy.fft(ivy.astype(x, "complex128"), axis, norm=norm, n=n)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def fft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
if axes is None:
axes = (-2, -1)
ret = ivy.fft2(x, s=s, dim=axes, norm=norm)
return ret
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def fftfreq(n, d=1.0, dtype=None, name=None):
if d * n == 0:
raise ValueError("d or n should not be 0.")
if dtype is None:
dtype = ivy.default_dtype()
val = 1.0 / (n * d)
pos_max = (n + 1) // 2
neg_max = n // 2
indices = ivy.arange(-neg_max, pos_max, dtype=dtype)
indices = ivy.roll(indices, -neg_max)
return ivy.multiply(indices, val)
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float32",
"float64",
"complex64",
"complex128",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def fftshift(x, axes=None, name=None):
shape = x.shape
if axes is None:
axes = tuple(range(x.ndim))
shifts = [(dim // 2) for dim in shape]
elif isinstance(axes, int):
shifts = shape[axes] // 2
else:
shifts = ivy.concat([shape[ax] // 2 for ax in axes])
roll = ivy.roll(x, shifts, axis=axes)
return roll
@with_supported_dtypes(
{"2.6.0 and below": ("complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def hfft(x, n=None, axes=-1, norm="backward", name=None):
"""Compute the FFT of a signal that has Hermitian symmetry, resulting in a
real spectrum."""
# Determine the input shape and axis length
input_shape = x.shape
input_len = input_shape[axes]
# Calculate n if not provided
if n is None:
n = 2 * (input_len - 1)
# Perform the FFT along the specified axis
result = ivy.fft(x, axes, n=n, norm=norm)
return ivy.real(result)
@with_supported_dtypes(
{"2.6.0 and below": "complex64"},
"paddle",
)
@to_ivy_arrays_and_back
def hfft2(x, s=None, axis=(-2, -1), norm="backward"):
# check if the input tensor x is a hermitian complex
if not ivy.allclose(ivy.conj(ivy.matrix_transpose(x)), x):
raise ValueError("Input tensor x must be Hermitian complex.")
fft_result = ivy.fft2(x, s=s, dim=axis, norm=norm)
# Depending on the norm, apply scaling and normalization
if norm == "forward":
fft_result /= ivy.sqrt(ivy.prod(ivy.shape(fft_result)))
elif norm == "ortho":
fft_result /= ivy.sqrt(ivy.prod(ivy.shape(x)))
return ivy.real(fft_result) # Return the real part of the result
@with_supported_dtypes(
{"2.6.0 and below": ("complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def ifft(x, n=None, axis=-1.0, norm="backward", name=None):
ret = ivy.ifft(ivy.astype(x, "complex128"), axis, norm=norm, n=n)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes(
{"2.6.0 and below": ("complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def ifftn(x, s=None, axes=None, norm="backward", name=None):
ret = ivy.ifftn(ivy.astype(x, "complex128"), s=s, axes=axes, norm=norm)
return ivy.astype(ret, x.dtype)
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def ifftshift(x, axes=None, name=None):
shape = x.shape
if axes is None:
axes = tuple(range(x.ndim))
shifts = [-(dim // 2) for dim in shape]
elif isinstance(axes, int):
shifts = -(shape[axes] // 2)
else:
shifts = ivy.concat([-shape[ax] // 2 for ax in axes])
roll = ivy.roll(x, shifts, axis=axes)
return roll
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float32",
"float64",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def ihfft2(x, s=None, axes=(-2, -1), norm="backward", name=None):
# check if the input array is two-dimensional and real
if len(ivy.array(x).shape) != 2 or ivy.is_complex_dtype(x):
raise ValueError("input must be a two-dimensional real array")
# cast the input to the same float64 type so that there are no backend issues
x_ = ivy.astype(x, ivy.float64)
ihfft2_result = 0
# Compute the complex conjugate of the 2-dimensional discrete Fourier Transform
if norm == "backward":
ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="forward"))
if norm == "forward":
ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="backward"))
if norm == "ortho":
ihfft2_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="ortho"))
if x.dtype in [ivy.float32, ivy.int32, ivy.int64]:
return ivy.astype(ihfft2_result, ivy.complex64)
if x.dtype == ivy.float64:
return ivy.astype(ihfft2_result, ivy.complex128)
@to_ivy_arrays_and_back
def ihfftn(x, s=None, axes=None, norm="backward", name=None):
# cast the input to the same float64 type so that there are no backend issues
x_ = ivy.astype(x, ivy.float64)
ihfftn_result = 0
# Compute the complex conjugate of the 2-dimensional discrete Fourier Transform
if norm == "backward":
ihfftn_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="forward"))
if norm == "forward":
ihfftn_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="backward"))
if norm == "ortho":
ihfftn_result = ivy.conj(ivy.rfftn(x_, s=s, axes=axes, norm="ortho"))
if x.dtype in [ivy.float32, ivy.int32, ivy.int64]:
return ivy.astype(ihfftn_result, ivy.complex64)
if x.dtype == ivy.float64:
return ivy.astype(ihfftn_result, ivy.complex128)
@with_supported_dtypes(
{"2.6.0 and below": ("complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def irfft(x, n=None, axis=-1.0, norm="backward", name=None):
if n is None:
n = 2 * (x.shape[axis] - 1)
pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis)
neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1])
combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis)
time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n)
if ivy.isreal(x):
time_domain = ivy.real(time_domain)
return time_domain
@with_supported_dtypes(
{
"2.6.0 and below": (
"int32",
"int64",
"float16",
"float32",
"float64",
"complex64",
"complex128",
)
},
"paddle",
)
@to_ivy_arrays_and_back
def irfft2(x, s=None, axes=(-2, -1), norm="backward"):
# Handle values if None
if s is None:
s = x.shape
if axes is None:
axes = (-2, -1)
# Calculate the normalization factor 'n' based on the shape 's'
n = ivy.prod(ivy.array(s))
result = ivy.ifftn(x, axes=axes[0], norm=norm)
# Normalize the result based on the 'norm' parameter
if norm == "backward":
result /= n
elif norm == "forward":
result *= n
elif norm == "ortho":
result /= ivy.sqrt(n)
return result
@with_supported_dtypes(
{"2.6.0 and below": ("complex64", "complex128")},
"paddle",
)
@to_ivy_arrays_and_back
def irfftn(x, s=None, axes=None, norm="backward", name=None):
x = ivy.array(x)
if axes is None:
axes = list(range(len(x.shape)))
include_last_axis = len(x.shape) - 1 in axes
if s is None:
s = [
x.shape[axis] if axis != (len(x.shape) - 1) else 2 * (x.shape[axis] - 1)
for axis in axes
]
real_result = x
remaining_axes = [axis for axis in axes if axis != (len(x.shape) - 1)]
if remaining_axes:
real_result = ivy.ifftn(
x,
s=[s[axes.index(axis)] for axis in remaining_axes],
axes=remaining_axes,
norm=norm,
)
if include_last_axis:
axis = len(x.shape) - 1
size = s[axes.index(axis)]
freq_domain = ivy.moveaxis(real_result, axis, -1)
slices = [slice(None)] * ivy.get_num_dims(freq_domain)
slices[-1] = slice(0, size // 2 + 1)
pos_freq_terms = freq_domain[tuple(slices)]
slices[-1] = slice(1, -1)
neg_freq_terms = ivy.conj(pos_freq_terms[tuple(slices)][..., ::-1])
combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=-1)
real_result = ivy.ifftn(combined_freq_terms, s=[size], axes=[-1], norm=norm)
real_result = ivy.moveaxis(real_result, -1, axis)
if ivy.is_complex_dtype(x.dtype):
output_dtype = "float32" if x.dtype == "complex64" else "float64"
else:
output_dtype = "float32"
result_t = ivy.astype(real_result, output_dtype)
return result_t
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
def rfft(x, n=None, axis=-1, norm="backward", name=None):
return ivy.dft(x, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)
@to_ivy_arrays_and_back
def rfftfreq(n, d=1.0, dtype=None, name=None):
dtype = ivy.default_dtype()
val = 1.0 / (n * d)
pos_max = n // 2 + 1
indices = ivy.arange(0, pos_max, dtype=dtype)
return indices * val
| ivy/ivy/functional/frontends/paddle/fft.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/fft.py",
"repo_id": "ivy",
"token_count": 4898
} | 36 |
# local
import ivy
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
from ivy.functional.ivy.experimental.layers import _broadcast_pooling_helper
from ivy.func_wrapper import with_unsupported_dtypes
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def adaptive_avg_pool1d(x, output_size, name=None):
return ivy.adaptive_avg_pool1d(x, output_size)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def adaptive_avg_pool2d(x, output_size, data_format="NCHW", name=None):
return ivy.adaptive_avg_pool2d(x, output_size, data_format=data_format)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
def adaptive_avg_pool3d(x, output_size, data_format="NCHW", name=None):
return ivy.adaptive_avg_pool3d(x, output_size)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def adaptive_max_pool2d(x, output_size, return_mask=None, name=None):
return ivy.adaptive_max_pool2d(x, output_size)
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
def avg_pool1d(
x, kernel_size, stride=None, padding=0, exclusive=True, ceil_mode=False, name=None
):
data_format = "NCW"
exclusive = not exclusive
if stride is None:
stride = kernel_size
kernel_size = _broadcast_pooling_helper(kernel_size, "1d", name="kernel_size")
padding = _broadcast_pooling_helper(padding, "1d", name="padding")
# Figure out padding string
if all(
pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)
):
padding = "SAME"
else:
padding = "VALID"
return ivy.avg_pool1d(
x,
kernel_size,
stride,
padding,
count_include_pad=exclusive,
ceil_mode=ceil_mode,
data_format=data_format,
)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def avg_pool2d(
x,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
exclusive=True,
divisor_override=None,
data_format="NCHW",
name=None,
):
if stride is None:
stride = kernel_size
kernel_size = _broadcast_pooling_helper(kernel_size, "2d", name="kernel_size")
padding = _broadcast_pooling_helper(padding, "2d", name="padding")
# Figure out padding string
if all(
pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)
):
padding = "SAME"
else:
padding = "VALID"
count_include_pad = not exclusive
return ivy.avg_pool2d(
x,
kernel_size,
stride,
padding,
data_format=data_format,
count_include_pad=count_include_pad,
ceil_mode=ceil_mode,
divisor_override=divisor_override,
)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def max_pool2d(
x,
kernel_size,
stride=None,
padding=0,
return_mask=False,
ceil_mode=False,
data_format="NCHW",
name=None,
):
if stride is None:
stride = kernel_size
kernel_size = _broadcast_pooling_helper(kernel_size, "2d", name="kernel_size")
padding = _broadcast_pooling_helper(padding, "2d", name="padding")
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
f"Attr(data_format): {data_format}."
)
if data_format == "NHWC" and return_mask:
raise ValueError(
"When setting return_mask to true, data_format must be set to NCHW in"
" API:max_pool2d"
)
return ivy.max_pool2d(
x, kernel_size, stride, padding, data_format=data_format, ceil_mode=ceil_mode
)
@to_ivy_arrays_and_back
@with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle")
def max_unpool1d(
x,
indices,
kernel_size,
stride=None,
padding=0,
data_format="NCL",
output_size=None,
name=None,
):
return ivy.max_unpool1d(
x,
indices,
kernel_size,
strides=stride,
padding=padding,
data_format=data_format,
)
| ivy/ivy/functional/frontends/paddle/nn/functional/pooling.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/pooling.py",
"repo_id": "ivy",
"token_count": 1998
} | 37 |
from . import transforms
from .transforms import *
| ivy/ivy/functional/frontends/paddle/vision/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/paddle/vision/__init__.py",
"repo_id": "ivy",
"token_count": 12
} | 38 |
# This submodule is now considered legacy, new code should use scipy.fft.
from ..fft import *
from .fftpack import *
| ivy/ivy/functional/frontends/scipy/fftpack/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/fftpack/__init__.py",
"repo_id": "ivy",
"token_count": 36
} | 39 |
from . import windows
from .signal import *
| ivy/ivy/functional/frontends/scipy/signal/__init__.py/0 | {
"file_path": "ivy/ivy/functional/frontends/scipy/signal/__init__.py",
"repo_id": "ivy",
"token_count": 12
} | 40 |
from abc import ABC, abstractmethod
import ivy
class Criterion(ABC):
@abstractmethod
def reset(self):
raise NotImplementedError
@abstractmethod
def reverse_reset(self):
raise NotImplementedError
@abstractmethod
def update(self, new_pos):
raise NotImplementedError
def proxy_impurity_improvement(self):
impurity_left = 0.0
impurity_right = 0.0
impurity_left, impurity_right = self.children_impurity(
impurity_left, impurity_right
)
return (
-self.weighted_n_right * impurity_right
- self.weighted_n_left * impurity_left
)
def impurity_improvement(
self, impurity_parent: float, impurity_left: float, impurity_right: float
):
return (self.weighted_n_node_samples / self.weighted_n_samples) * (
impurity_parent
- (self.weighted_n_right / self.weighted_n_node_samples * impurity_right)
- (self.weighted_n_left / self.weighted_n_node_samples * impurity_left)
)
def node_value(self, dest, node_id):
return dest
class ClassificationCriterion(Criterion):
def __init__(self, n_outputs: int, n_classes: ivy.Array):
self.start = 0
self.pos = 0
self.end = 0
self.missing_go_to_left = 0
self.n_outputs = n_outputs
self.n_samples = 0
self.n_node_samples = 0
self.weighted_n_node_samples = 0.0
self.weighted_n_left = 0.0
self.weighted_n_right = 0.0
self.weighted_n_missing = 0.0
self.n_classes = ivy.empty(n_outputs, dtype=ivy.int16)
max_n_classes = 0
for k in range(n_outputs):
self.n_classes[k] = n_classes[k]
if n_classes[k] > max_n_classes:
max_n_classes = n_classes[k]
if isinstance(max_n_classes, ivy.Array):
max_n_classes = ivy.to_scalar(max_n_classes)
self.max_n_classes = max_n_classes
self.sum_total = ivy.zeros((n_outputs, max_n_classes), dtype=ivy.float64)
self.sum_left = ivy.zeros((n_outputs, max_n_classes), dtype=ivy.float64)
self.sum_right = ivy.zeros((n_outputs, max_n_classes), dtype=ivy.float64)
def init(
self,
y,
sample_weight,
weighted_n_samples,
sample_indices,
start,
end,
):
self.y = y
self.sample_weight = sample_weight
self.sample_indices = sample_indices
self.start = start
self.end = end
self.n_node_samples = end - start
self.weighted_n_samples = weighted_n_samples
self.weighted_n_node_samples = 0.0
w = 1.0
for k in range(self.n_outputs):
n_cls = ivy.to_scalar(self.n_classes[k])
self.sum_total[k, :n_cls] = 0
for p in range(start, end):
i = sample_indices[p]
if sample_weight is not None:
w = sample_weight[i]
for k in range(self.n_outputs):
c = int(self.y[i, k])
self.sum_total[k, c] += w
self.weighted_n_node_samples += w
self.reset()
return 0
def init_sum_missing(self):
self.sum_missing = ivy.zeros(
(self.n_outputs, self.max_n_classes), dtype=ivy.float64
)
def node_value(self, dest, node_id):
for k in range(self.n_outputs):
n_cls = ivy.to_scalar(self.n_classes[k])
dest[node_id, k, :n_cls] = self.sum_total[k, :n_cls]
return dest
def init_missing(self, n_missing):
w = 1.0
self.n_missing = n_missing
if n_missing == 0:
return
self.sum_missing[0 : self.n_outputs, 0 : self.max_n_classes] = 0
self.weighted_n_missing = 0.0
for p in range(self.end - n_missing, self.end):
i = self.sample_indices[p]
if self.sample_weight is not None:
w = self.sample_weight[i]
for k in range(self.n_outputs):
c = int(self.y[i, k])
self.sum_missing[k, c] += w
self.weighted_n_missing += w
def reset(self):
self.pos = self.start
(
self.weighted_n_left,
self.weighted_n_right,
self.sum_left,
self.sum_right,
) = _move_sums_classification(
self,
self.sum_left,
self.sum_right,
self.weighted_n_left,
self.weighted_n_right,
self.missing_go_to_left,
)
return 0
def reverse_reset(self):
self.pos = self.end
(
self.weighted_n_right,
self.weighted_n_left,
self.sum_right,
self.sum_left,
) = _move_sums_classification(
self,
self.sum_right,
self.sum_left,
self.weighted_n_right,
self.weighted_n_left,
not self.missing_go_to_left,
)
return 0
def update(self, new_pos):
pos = self.pos
end_non_missing = self.end - self.n_missing
sample_indices = self.sample_indices
sample_weight = self.sample_weight
w = 1.0
if (new_pos - pos) <= (end_non_missing - new_pos):
for p in range(pos, new_pos):
i = sample_indices[p]
if sample_weight is not None:
w = sample_weight[i]
for k in range(self.n_outputs):
c = int(self.y[i, k])
self.sum_left[k, c] += w
self.weighted_n_left += w
else:
self.reverse_reset()
for p in range(end_non_missing - 1, new_pos - 1, -1):
i = sample_indices[p]
if sample_weight is not None:
w = sample_weight[i]
for k in range(self.n_outputs):
c = int(self.y[i, k])
self.sum_left[k, c] -= w
self.weighted_n_left -= w
self.weighted_n_right = self.weighted_n_node_samples - self.weighted_n_left
for k in range(self.n_outputs):
for c in range(ivy.to_scalar(self.n_classes[k])):
self.sum_right[k, c] = self.sum_total[k, c] - self.sum_left[k, c]
self.pos = new_pos
return 0
class Gini(ClassificationCriterion):
def node_impurity(self):
gini = 0.0
for k in range(self.n_outputs):
sq_count = 0.0
for c in range(int(self.n_classes[k])):
count_k = self.sum_total[k, c]
sq_count += count_k * count_k
gini += 1.0 - sq_count / (
self.weighted_n_node_samples * self.weighted_n_node_samples
)
return gini / self.n_outputs
def children_impurity(
self,
impurity_left: float,
impurity_right: float,
):
gini_left, gini_right = 0.0, 0.0
for k in range(self.n_outputs):
sq_count_left, sq_count_right = 0.0, 0.0
for c in range(int(self.n_classes[k])):
count_k = self.sum_left[k, c]
sq_count_left += count_k * count_k
count_k = self.sum_right[k, c]
sq_count_right += count_k * count_k
gini_left += 1.0 - sq_count_left / (
self.weighted_n_left * self.weighted_n_left
)
gini_right += 1.0 - sq_count_right / (
self.weighted_n_right * self.weighted_n_right
)
impurity_left = gini_left / self.n_outputs
impurity_right = gini_right / self.n_outputs
return impurity_left, impurity_right
# --- Helpers --- #
# --------------- #
def _move_sums_classification(
criterion, sum_1, sum_2, weighted_n_1, weighted_n_2, put_missing_in_1
):
for k in range(criterion.n_outputs):
n = int(criterion.n_classes[k])
sum_1[k, :n] = 0
sum_2[k, :n] = criterion.sum_total[k, :n]
weighted_n_1 = 0.0
weighted_n_2 = criterion.weighted_n_node_samples
return weighted_n_1, weighted_n_2, sum_1, sum_2
| ivy/ivy/functional/frontends/sklearn/tree/_criterion.py/0 | {
"file_path": "ivy/ivy/functional/frontends/sklearn/tree/_criterion.py",
"repo_id": "ivy",
"token_count": 4389
} | 41 |
# global
import functools
# local
import ivy
from ivy.functional.frontends.tensorflow.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_supported_dtypes
@with_supported_dtypes({"2.15.0 and below": ("float",)}, "tensorflow")
@to_ivy_arrays_and_back
def extract_patches(images, sizes, strides, rates, padding):
depth = images.shape[-1]
kernel_size = functools.reduce(lambda x, y: x * y, sizes, 1)
kernel_shape = [*sizes[1:-1], depth, kernel_size * depth]
eye = ivy.eye(kernel_size * depth)
filters = ivy.reshape(eye, kernel_shape).astype(images.dtype)
return ivy.conv_general_dilated(
images,
filters,
strides[1:-1],
padding,
dilations=rates[1:-1],
)
@to_ivy_arrays_and_back
def resize(
image, size, method="bilinear", preserve_aspect_ratio=False, antialias=False
):
unsqueezed = False
if len(image.shape) == 3:
image = image.unsqueeze(0)
unsqueezed = True
if preserve_aspect_ratio:
height, width = image.shape[2:]
new_height, new_width = size
aspect_ratio = width / height
new_aspect_ratio = new_width / new_height
if new_aspect_ratio > aspect_ratio:
new_width = int(new_height * aspect_ratio)
new_height = int(new_width * aspect_ratio)
else:
new_width = int(new_height / aspect_ratio)
new_height = int(new_width / aspect_ratio)
else:
new_height, new_width = size
if method == "bicubic":
method = "tf_bicubic"
elif method == "area":
method = "tf_area"
image = ivy.interpolate(
image,
(new_height, new_width),
mode=method,
align_corners=False,
antialias=antialias,
)
if unsqueezed:
return image.squeeze(0)
return image
| ivy/ivy/functional/frontends/tensorflow/image/cropping.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/image/cropping.py",
"repo_id": "ivy",
"token_count": 842
} | 42 |
# global
import ivy
import ivy.functional.frontends.tensorflow as tf_frontend
from ivy.functional.frontends.tensorflow import check_tensorflow_casting
from ivy.functional.frontends.tensorflow.func_wrapper import (
to_ivy_arrays_and_back,
map_raw_ops_alias,
to_ivy_dtype,
)
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.utils.exceptions import IvyNotImplementedException
Acos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acos))
Acosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.acosh))
Add = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))
AddN = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add_n))
AddV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.add))
ArgMax = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.math.argmax, kwargs_to_update={"dimension": "axis"}
)
)
)
ArgMin = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{"2.15.0 and below": ("complex",)},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.math.argmin, kwargs_to_update={"dimension": "axis"}
)
)
)
Asin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.asin))
Atan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.atan))
Atan2 = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{"2.15.0 and below": "float16"},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.atan2))
)
ConcatV2 = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.concat))
Conj = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.13.0 and below": ("complex64", "complex128", "variant"),
},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.math.conj,
kwargs_to_update={
"input": "x",
},
)
)
)
Cos = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cos))
Cosh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cosh))
Cumprod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumprod))
Cumsum = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.cumsum))
Digamma = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.digamma))
Div = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.divide))
Einsum = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"complex128 ",
"complex64",
"float64",
"float32",
"float16",
"int64",
"int32",
),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.general_functions.einsum))
)
Identity = to_ivy_arrays_and_back(
map_raw_ops_alias(tf_frontend.general_functions.identity)
)
IdentityN = to_ivy_arrays_and_back(
map_raw_ops_alias(tf_frontend.general_functions.identity_n)
)
Igamma = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": (
"float64",
"float32",
"half",
),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.igamma))
)
LeakyRelu = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": ("bfloat16", "float16", "float32", "float64"),
},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.nn.leaky_relu,
)
)
)
LessEqual = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("complex",),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.less_equal))
)
Log1p = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.log1p))
LogSoftmax = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": (
"bfloat16",
"float32",
"float64",
),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.log_softmax))
)
LogicalOr = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.logical_or))
MatrixDeterminant = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.linalg.det))
Max = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("complex",),
},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.math.reduce_max,
kwargs_to_update={
"input": "input_tensor",
"keep_dims": "keepdims",
},
)
)
)
MaxPool3D = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": ("float32",),
},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.nn.max_pool3d,
)
)
)
Maximum = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("complex",),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.maximum))
)
Mean = to_ivy_arrays_and_back(
map_raw_ops_alias(
tf_frontend.math.reduce_mean,
kwargs_to_update={
"input": "input_tensor",
"keep_dims": "keepdims",
},
)
)
Min = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("complex",),
},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.math.reduce_min,
kwargs_to_update={
"input": "input_tensor",
"keep_dims": "keepdims",
},
)
)
)
Mod = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.mod))
Mul = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.multiply))
Neg = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.negative))
Pow = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.pow))
RealDiv = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": (
"complex",
"bfloat16",
"float16",
"float64",
"float32",
),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.general_functions.realdiv))
)
Reciprocal = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.reciprocal))
Relu = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("complex", "float16"),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.nn.relu))
)
Relu6 = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("complex", "float16"),
},
"tensorflow",
)(
map_raw_ops_alias(
tf_frontend.nn.relu6,
)
)
)
Reshape = to_ivy_arrays_and_back(
map_raw_ops_alias(tf_frontend.general_functions.reshape)
)
Roll = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.roll))
ShapeN = to_ivy_arrays_and_back(
map_raw_ops_alias(tf_frontend.general_functions.shape_n)
)
Sigmoid = to_ivy_arrays_and_back(
map_raw_ops_alias(tf_frontend.keras.activations.sigmoid)
)
Sin = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.sin))
Size = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.size))
Slice = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.slice))
Softmax = to_ivy_arrays_and_back(
with_unsupported_dtypes(
{
"2.15.0 and below": ("float16",),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.nn.softmax))
)
Split = to_ivy_arrays_and_back(
map_raw_ops_alias(
tf_frontend.split, kwargs_to_update={"num_split": "num_or_size_splits"}
)
)
SquaredDifference = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": (
"complex",
"bfloat16",
"float16",
"float64",
"float32",
"int32",
"int64",
),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.squared_difference))
)
Squeeze = to_ivy_arrays_and_back(
map_raw_ops_alias(tf_frontend.general_functions.squeeze)
)
Sub = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.subtract))
Tan = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tan))
Tanh = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.tanh))
Tile = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.general_functions.tile))
Xlogy = to_ivy_arrays_and_back(map_raw_ops_alias(tf_frontend.math.xlogy))
Zeta = to_ivy_arrays_and_back(
with_supported_dtypes(
{
"2.15.0 and below": ("float32", "float64"),
},
"tensorflow",
)(map_raw_ops_alias(tf_frontend.math.zeta))
)
# --- Helpers --- #
# --------------- #
def _tf_to_ivy_ivy_arguments_for_conv(
padding, ex_pading, strides, dilations, data_format
):
if data_format.find("C") == 1:
strides = strides[2:]
dilations = dilations[2:]
data_format = "channel_first"
pad_index = [4, 8]
else:
strides = strides[1:-1]
dilations = dilations[1:-1]
data_format = "channel_last"
pad_index = [2, 6]
if padding == "EXPLICIT":
padding = [
(ex_pading[i], ex_pading[i + 1])
for i in range(pad_index[0], pad_index[1], 2)
]
return padding, strides, dilations, data_format
# --- Main --- #
# ------------ #
@to_ivy_arrays_and_back
def AccumulateNV2(inputs, shape, name="AccumulateNV2"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def Angle(
*,
input,
Tout=ivy.float32,
name="Angle",
):
Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32
return ivy.astype(ivy.angle(input), Tout)
@with_unsupported_dtypes(
{
"2.15.0 and below": (
"float16",
"bool",
"bfloat16",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def ApproximateEqual(
*,
x,
y,
tolerance=1e-05,
name="ApproximateEqual",
):
x, y = check_tensorflow_casting(x, y)
return ivy.abs(x - y) < tolerance
@to_ivy_arrays_and_back
def Atanh(*, x, name="Atanh"):
return ivy.atanh(x)
@to_ivy_arrays_and_back
def BandedTriangularSolve(
matrix,
rhs,
lower=True,
adjoint=False,
name="BandedTriangularSolve",
):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def BatchMatMul(x, y, adj_x=False, adj_y=False, name="BatchMatMul"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def BatchMatMulV2(x, y, adj_x=False, adj_y=False, name="BatchMatMulV2"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def BatchMatMulV3(x, y, Tout=ivy.Dtype, adj_x=False, adj_y=False, name="BatchMatMulV3"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def BitwiseAnd(*, x, y, name="BitwiseAnd"):
x, y = check_tensorflow_casting(x, y)
return ivy.bitwise_and(x, y)
@to_ivy_arrays_and_back
def BitwiseOr(*, x, y, name="BitwiseOr"):
x, y = check_tensorflow_casting(x, y)
return ivy.bitwise_or(x, y)
@to_ivy_arrays_and_back
def BitwiseXor(*, x, y, name="BitwiseXor"):
x, y = check_tensorflow_casting(x, y)
return ivy.bitwise_xor(x, y)
@to_ivy_arrays_and_back
def BroadcastTo(*, input, shape, name="BroadcastTo"):
return ivy.broadcast_to(input, shape=shape)
@to_ivy_arrays_and_back
def Ceil(*, x, name=None):
return ivy.ceil(x)
@to_ivy_arrays_and_back
def Cholesky(*, input, name="Cholesky"):
return ivy.astype(ivy.cholesky(input), input.dtype)
@to_ivy_arrays_and_back
def Complex(real, imag, Tout=ivy.complex64, name="Complex"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def Concat(*, concat_dim, values, name="Concat"):
return ivy.concat(values, axis=concat_dim)
@to_ivy_arrays_and_back
def Conv2D(
*,
input,
filter,
strides,
padding,
use_cudnn_on_gpu,
explicit_paddings,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name="Conv2D",
):
padding, strides, dilations, data_format = _tf_to_ivy_ivy_arguments_for_conv(
padding, explicit_paddings, strides, dilations, data_format
)
return ivy.conv_general_dilated(
input,
filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
dims=2,
)
@to_ivy_arrays_and_back
def Conv3D(
*,
input,
filter,
strides,
padding,
data_format="NDHWC",
dilations=[1, 1, 1, 1, 1],
name="Conv3D",
):
# ivy.backends.tensorflow expects strides and dilations to be
# a single integer value or a list of 3 values whereas the raw op
# expects a list of 5 values
if data_format == "NDHWC":
strides = strides[1:-1]
dilations = dilations[1:-1]
elif data_format == "NCDHW":
strides = strides[2:]
dilations = dilations[2:]
return tf_frontend.nn.conv3d(
input,
filter,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name,
)
@to_ivy_arrays_and_back
def Cross(*, a, b, name="Cross"):
a, b = check_tensorflow_casting(a, b)
return ivy.cross(a, b)
@to_ivy_arrays_and_back
def CumulativeLogsumexp(
x, axis, exclusive=False, reverse=False, name="CumulativeLogsumexp"
):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def DebugGradientIdentity(input, name="DebugGradientIdentity"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def Diag(*, diagonal, name="Diag"):
return ivy.astype(ivy.diag(diagonal), diagonal.dtype)
@with_supported_dtypes(
{"2.15.0 and below": ("bfloat16", "float16", "float32", "float64")},
"tensorflow",
)
@to_ivy_arrays_and_back
def Elu(features, name=None):
zeros = ivy.zeros_like(features, dtype=ivy.dtype(features))
ones = ivy.ones_like(features, dtype=ivy.dtype(features))
ret_val = ivy.where(
# if x > 0 => x; else e^x - 1
features > zeros,
features,
ivy.subtract(ivy.exp(features), ones),
)
return ret_val
@to_ivy_arrays_and_back
def Equal(*, x, y, incompatible_shape_error=True, name="Equal"):
x, y = check_tensorflow_casting(x, y)
if incompatible_shape_error:
return ivy.equal(x, y)
try:
return ivy.equal(x, y)
except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):
return ivy.array(False)
@to_ivy_arrays_and_back
def EuclideanNorm(*, input, axis, keep_dims=False, name="EuclideanNorm"):
return ivy.astype(
ivy.vector_norm(input, axis=axis, keepdims=keep_dims), input.dtype
)
@to_ivy_arrays_and_back
def Exp(*, x, name="Exp"):
return ivy.exp(x)
@to_ivy_arrays_and_back
def Expm1(*, x, name="Expm1"):
return ivy.expm1(x)
@to_ivy_arrays_and_back
def FFT(*, input, name="FFT"):
return ivy.astype(ivy.fft(input, -1), input.dtype)
@to_ivy_arrays_and_back
def FFT2D(*, input, name="FFT2D"):
return ivy.astype(ivy.fft2(input, dim=(-2, -1)), input.dtype)
@to_ivy_arrays_and_back
def FFT3D(*, input, name="FFT3D"):
fft_result = ivy.fft(input, -1)
fft_result = ivy.fft(fft_result, -2)
fft_result = ivy.fft(fft_result, -3)
return ivy.astype(fft_result, input.dtype)
@to_ivy_arrays_and_back
def Fill(*, dims, value, name="Full"):
return ivy.full(dims, value)
@to_ivy_arrays_and_back
def Floor(*, x, name="Floor"):
return ivy.floor(x)
@to_ivy_arrays_and_back
def FloorDiv(*, x, y, name="FloorDiv"):
x, y = check_tensorflow_casting(x, y)
return ivy.floor_divide(x, y)
@to_ivy_arrays_and_back
def FloorMod(*, x, y, name="FloorMod"):
x, y = check_tensorflow_casting(x, y)
return ivy.remainder(x, y)
@to_ivy_arrays_and_back
def Gather(*, params, indices, validate_indices=None, name="Gather"):
return ivy.gather(params, indices, axis=0, batch_dims=0)
@with_supported_dtypes(
{"2.15.0 and below": ("int32", "int64", "float32", "float64")},
"tensorflow",
)
@to_ivy_arrays_and_back
def GatherNd(*, params, indices, name=None):
return ivy.gather_nd(params, indices, batch_dims=0)
@to_ivy_arrays_and_back
def Greater(*, x, y, name="Greater"):
x, y = check_tensorflow_casting(x, y)
return ivy.greater(x, y)
@to_ivy_arrays_and_back
def GreaterEqual(*, x, y, name="GreaterEqual"):
x, y = check_tensorflow_casting(x, y)
return ivy.greater_equal(x, y)
@to_ivy_arrays_and_back
def Imag(
*,
input,
Tout=ivy.float32,
name="Imag",
):
Tout = ivy.as_ivy_dtype(Tout) if Tout is not None else ivy.float32
return ivy.astype(ivy.imag(input), Tout)
@to_ivy_arrays_and_back
def Inv(*, x, name="Inv"):
return ivy.astype(ivy.reciprocal(x), x.dtype)
@to_ivy_arrays_and_back
def InvGrad(*, y, dy, name="InvGrad"):
return ivy.multiply(ivy.negative(dy), ivy.multiply(y, y))
@to_ivy_arrays_and_back
def Invert(*, x, name="Invert"):
return ivy.bitwise_invert(x)
@to_ivy_arrays_and_back
def LeftShift(*, x, y, name="LeftShift"):
return ivy.bitwise_left_shift(x, y)
@to_ivy_arrays_and_back
def Less(*, x, y, name="Less"):
x, y = check_tensorflow_casting(x, y)
return ivy.less(x, y)
@to_ivy_arrays_and_back
def LinSpace(*, start, stop, num, name=None):
return ivy.linspace(start, stop, num)
@to_ivy_arrays_and_back
def Log(*, x, name="Log"):
return ivy.log(x)
@to_ivy_arrays_and_back
def LogicalNot(*, x, name="LogicalNot"):
return ivy.logical_not(x)
@to_ivy_arrays_and_back
def MatMul(*, a, b, transpose_a=False, transpose_b=False, name="MatMul"):
a, b = check_tensorflow_casting(a, b)
return ivy.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
@to_ivy_arrays_and_back
def MatrixInverse(*, input, adjoint=False, name="MatrixInverse"):
return ivy.inv(input, adjoint=adjoint)
@to_ivy_arrays_and_back
def Minimum(*, x, y, name="Minimum"):
return ivy.minimum(x, y)
@to_ivy_arrays_and_back
def NotEqual(*, x, y, incompatible_shape_error=True, name="NotEqual"):
x, y = check_tensorflow_casting(x, y)
if incompatible_shape_error:
return ivy.not_equal(x, y)
try:
return ivy.not_equal(x, y)
except (ivy.utils.exceptions.IvyError, ivy.utils.exceptions.IvyBackendException):
return ivy.array(True)
@to_ivy_arrays_and_back
def NthElement(*, input, n, reverse=False, name="NthElement"):
return ivy.astype(ivy.sort(input, descending=reverse)[..., n], input.dtype)
@to_ivy_arrays_and_back
def OnesLike(*, x, name="OnesLike"):
return ivy.ones_like(x)
@to_ivy_arrays_and_back
def Pack(*, values, axis=0, name="Pack"):
return ivy.stack(values, axis=axis)
@to_ivy_arrays_and_back
def Pad(*, input, paddings, name="Pad"):
return ivy.constant_pad(input, paddings.to_list())
@to_ivy_arrays_and_back
def PadV2(*, input, paddings, constant_values, name="PadV2"):
return ivy.constant_pad(input, paddings.to_list(), value=constant_values)
@to_ivy_arrays_and_back
def Prod(*, input, axis, keep_dims=False, name="Prod"):
return ivy.astype(ivy.prod(input, axis=axis, keepdims=keep_dims), input.dtype)
@to_ivy_arrays_and_back
def Real(input, Tout=ivy.float32, name="Real"):
# TODO
raise IvyNotImplementedException
@to_ivy_arrays_and_back
def Reverse(*, tensor, dims, name="Reverse"):
ret = tensor
for dim in enumerate(dims):
if dim[1]:
ret = ivy.flip(ret, axis=dim[0])
return ret
@to_ivy_arrays_and_back
def RightShift(*, x, y, name="RightShift"):
return ivy.bitwise_right_shift(x, y)
@to_ivy_arrays_and_back
def Round(*, x, name="Round"):
return ivy.round(x)
@to_ivy_arrays_and_back
def Rsqrt(*, x, name="Rsqrt"):
return ivy.sqrt(ivy.reciprocal(x))
@to_ivy_arrays_and_back
def Shape(*, input, output_type=ivy.int32, name="Shape"):
output_type = to_ivy_dtype(output_type)
return ivy.astype(ivy.shape(input, as_array=True), output_type, copy=False)
@with_unsupported_dtypes(
{"2.15.0 and below": ("unsigned",)},
"tensorflow",
)
@to_ivy_arrays_and_back
def Sign(*, x, name="Sign"):
return ivy.sign(x, np_variant=False)
@to_ivy_arrays_and_back
def Sinh(*, x, name="Sinh"):
return ivy.sinh(x)
@to_ivy_arrays_and_back
def Softplus(*, features, name="Softplus"):
return ivy.softplus(features)
# Softsign
@to_ivy_arrays_and_back
def Softsign(*, features, name="Softsign"):
return ivy.softsign(features)
@to_ivy_arrays_and_back
def SplitV(*, value, size_splits, axis, num_split, name="SplitV"):
return ivy.split(value, num_or_size_splits=size_splits, axis=axis)
@to_ivy_arrays_and_back
def Sqrt(*, x, name="Sqrt"):
return ivy.sqrt(x)
@to_ivy_arrays_and_back
def Square(*, x, name="Square"):
return ivy.square(x)
@to_ivy_arrays_and_back
def Sum(*, input, axis, keep_dims=False, name="Sum"):
return ivy.astype(ivy.sum(input, axis=axis, keepdims=keep_dims), input.dtype)
@with_supported_dtypes(
{"2.15.0 and below": ("float64", "float128", "halfcomplex64", "complex128")},
"tensorflow",
)
@to_ivy_arrays_and_back
def Svd(*, input, full_matrices=False, compute_uv=True, name=None):
return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices)
@to_ivy_arrays_and_back
def TanhGrad(*, y, dy, name="TanhGrad"):
return ivy.multiply(dy, ivy.subtract(1, ivy.multiply(y, y)))
@to_ivy_arrays_and_back
def Transpose(*, x, perm, name="Transpose"):
ret = ivy.permute_dims(x, axes=perm)
return ret
@to_ivy_arrays_and_back
def TruncateDiv(*, x, y, name="TruncateDiv"):
return ivy.astype(ivy.trunc_divide(x, y), x.dtype)
@with_unsupported_dtypes({"2.15.0 and below": ("float16", "bfloat16")}, "tensorflow")
@to_ivy_arrays_and_back
def Unpack(*, value, num, axis=0, name="Unpack"):
return ivy.unstack(value, axis=axis)[:num]
@with_supported_dtypes(
{
"2.15.0 and below": (
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"complex64",
"complex128",
)
},
"tensorflow",
)
@to_ivy_arrays_and_back
def UnsortedSegmentProd(*, data, segment_ids, num_segments, name=None):
data = ivy.array(data)
segment_ids = ivy.array(segment_ids)
ivy.utils.assertions.check_equal(
list(segment_ids.shape), [list(data.shape)[0]], as_array=False
)
ivy.utils.assertions.check_greater(int(num_segments), int(ivy.max(segment_ids)))
shape = list(ivy.shape(data))
shape[0] = int(num_segments)
x = ivy.ones(shape, dtype=data.dtype)
for i in range((segment_ids).shape[0]):
x[segment_ids[i]] = ivy.multiply(x[segment_ids[i]], data[i])
return x
@to_ivy_arrays_and_back
def Xdivy(*, x, y, name="Xdivy"):
if (x == 0).all():
return 0.0
return ivy.divide(x, y)
@with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, "tensorflow")
@to_ivy_arrays_and_back
def Xlog1py(*, x, y, name="Xlog1py"):
if (x == 0).all():
return 0.0
return ivy.multiply(x, ivy.log1p(y))
@to_ivy_arrays_and_back
def ZerosLike(*, x, name="ZerosLike"):
return ivy.zeros_like(x)
| ivy/ivy/functional/frontends/tensorflow/raw_ops.py/0 | {
"file_path": "ivy/ivy/functional/frontends/tensorflow/raw_ops.py",
"repo_id": "ivy",
"token_count": 11620
} | 43 |
# global
import ivy
from collections import OrderedDict
from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Callable
# local
from ivy.functional.frontends.torch.nn.parameter import Parameter
from ivy.functional.frontends.torch.tensor import Tensor
class Module(ivy.Module):
_version: int = 1
training: bool
_parameters: Dict[str, Optional[Parameter]]
_modules: Dict[str, Optional["Module"]]
def __init__(self, *args, device=None, devices=None, **kwargs) -> None:
super().__init__(
self,
*args,
device=device,
devices=devices,
training=True,
build_mode="explicit",
dynamic_backend=True,
**kwargs,
)
super().__setattr__("_frontend_module", True)
super().__setattr__(
"_attr_mapping", {"_parameters": "v", "_modules": "module_dict"}
)
def _create_variables(self, device=None, dtype=None):
# Create variables stored in the `__dict__` that were set
# using direct `__setattr__` e.g. self.weight = ...
v = ivy.Container(
OrderedDict(
[
(k.replace(".", "/"), v)
for k, v in self.__dict__.items()
if isinstance(v, Parameter)
and not k.startswith(
("_"),
)
]
)
)
# Created variables that were added using `register_paramter`,
# since those would appear in `self._v`
v = (
ivy.Container(
OrderedDict(
(
{
_k.replace(".", "/"): _v
for (_k, _v) in self._v.items()
if _k.replace(".", "/") not in v
and not isinstance(_v, ivy.Container)
}
),
**v,
)
)
if self._v
else v
)
return v
def _build(self, *args, **kwargs):
for module in self.__dict__.values():
if isinstance(module, Module) and module is not self:
if not module._built:
module.build(
*module._args,
dynamic_backend=module._dynamic_backend,
**module._kwargs,
)
return True
def _replace_update_v(self, new_v, native=None):
from ivy.functional.ivy.gradients import _is_variable
native = ivy.default(native, self)
for k, v in new_v.items():
if isinstance(v, ivy.Container):
# noinspection PyProtectedMember
native.module_dict[k] = self._replace_update_v(v, native.module_dict[k])
elif isinstance(v, Parameter):
# noinspection PyProtectedMember
native.__setattr__(k, v)
elif _is_variable(v):
native.__setattr__(k, Parameter(v))
elif isinstance(v, Tensor):
# noinspection PyProtectedMember
native.__setattr__(k, Parameter(v, requires_grad=v.requires_grad))
else:
raise ivy.utils.exceptions.IvyException(
f"found item in variable container {v} which was neither a sub"
" ivy.Container nor a variable."
)
return native
_update_v = _replace_update_v
def forward(self, *input: Any) -> None:
raise NotImplementedError(
f'Module [{type(self).__name__}] is missing the required "forward" function'
)
def call(self, inputs, *args, training=None, mask=None, **kwargs):
if isinstance(inputs, (list, tuple)):
try:
return self.forward(*inputs, *args, **kwargs)
except Exception:
return self.forward(inputs, *args, **kwargs)
else:
return self.forward(inputs, *args, **kwargs)
def _forward(self, *a, **kw):
ret = self._call_impl(*a, **kw)
return ret
def add_module(self, name: str, module: Optional["Module"]) -> None:
if not isinstance(module, Module) and module is not None:
raise TypeError(f"{type(module)} is not a Module subclass")
elif not isinstance(name, str):
raise TypeError(f"module name should be a string. Got {type(name)}")
elif hasattr(self, name) and name not in self._modules:
raise KeyError(f"attribute '{name}' already exists")
elif "." in name:
raise KeyError(f'module name can\'t contain ".", got: {name}')
elif name == "":
raise KeyError('module name can\'t be empty string ""')
self._modules[name] = module
super().__setattr__(name, module)
def apply(self, fn: Callable[["Module"], None]):
for module in self.children():
module.apply(fn)
fn(self)
return self
def register_buffer(self, name: str, value: Optional["Tensor"]) -> None:
super().register_buffer(name, value)
def register_parameter(self, name: str, value: Optional["Parameter"]) -> None:
super().register_parameter(name, value)
def register_module(self, name: str, module: Optional["Module"]) -> None:
r"""Alias for :func:`add_module`."""
self.add_module(name, module)
def get_submodule(self, target: str) -> "Module":
if target == "":
return self
atoms: List[str] = target.split(".")
mod: Module = self
for item in atoms:
if not hasattr(mod, item):
raise AttributeError(
mod._get_name() + " has no attribute `" + item + "`"
)
mod = getattr(mod, item)
if not isinstance(mod, Module):
raise TypeError("`" + item + "` is not an nn.Module")
return mod
def get_parameter(self, target: str):
target = target.replace(".", "/")
return self.v[target]
def _named_members(
self, get_members_fn, prefix="", recurse=True, remove_duplicate: bool = True
):
r"""Helper method for yielding various names + members of modules."""
memo = set()
modules = (
self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate)
if recurse
else [(prefix, self)]
)
for module_prefix, module in modules:
members = get_members_fn(module)
for k, v in members:
if v is None or id(v) in memo or not isinstance(v, Parameter):
continue
if remove_duplicate:
memo.add(id(v))
name = module_prefix + ("." if module_prefix else "") + k
yield name, v
def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
for _, param in self.named_parameters(recurse=recurse):
yield param
def named_parameters(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, Parameter]]:
if not getattr(self, "_built", False):
self.build(
*self._args, dynamic_backend=self._dynamic_backend, **self._kwargs
)
gen = self._named_members(
lambda module: module.v.items(),
prefix=prefix,
recurse=recurse,
remove_duplicate=remove_duplicate,
)
yield from gen
def children(self) -> Iterator["Module"]:
for _, module in self.named_children():
yield module
def named_children(self) -> Iterator[Tuple[str, "Module"]]:
if not getattr(self, "_built", False):
self.build(
*self._args, dynamic_backend=self._dynamic_backend, **self._kwargs
)
memo = set()
for name, module in self._module_dict.items():
if module is not None and id(module) not in memo:
memo.add(id(module))
yield name, module
def modules(self) -> Iterator["Module"]:
for _, module in self.named_modules():
yield module
def named_modules(
self,
memo: Optional[Set["Module"]] = None,
prefix: str = "",
remove_duplicate: bool = True,
):
if not getattr(self, "_built", False):
self.build(
*self._args, dynamic_backend=self._dynamic_backend, **self._kwargs
)
if memo is None:
memo = set()
if id(self) not in memo:
if remove_duplicate:
memo.add(id(self))
yield prefix, self
for name, module in self._module_dict.items():
if module is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
yield from module.named_modules(
memo, submodule_prefix, remove_duplicate
)
def requires_grad_(self, requires_grad: bool = True):
for p in self.parameters():
p.requires_grad_(requires_grad)
return self
def _get_name(self):
return self.__class__.__name__
def _extra_repr(self) -> str:
return ""
def _call_impl(self, *args, **kwargs):
return self.call(*args, **kwargs)
def __getattribute__(self, name: str) -> Any:
if name == "__dict__":
return super().__getattribute__(name)
if "_module_dict" in self.__dict__:
modules = self.__dict__["_module_dict"]
if name in modules:
return modules[name]
if "_buffers" in self.__dict__:
buffers = self.__dict__["_buffers"]
if name in buffers:
return buffers[name]
if "_v" in self.__dict__:
v = self.__dict__["_v"]
if name in v:
return v[name]
# Adding this attribute mapping s.t if someone tries
# to retrieve self._modules/self._parameters, we
# can handle that here
if "_attr_mapping" in self.__dict__:
mapping = self.__dict__["_attr_mapping"]
if name in mapping:
return super().__getattribute__(mapping[name])
return super().__getattribute__(name)
def __setattr__(self, name, value) -> None:
def remove_from(*dicts_or_sets):
for d in dicts_or_sets:
if name in d:
if isinstance(d, dict):
del d[name]
else:
d.discard(name)
params = self.__dict__.get("_v")
if params is not None and name in params and isinstance(value, Parameter):
remove_from(self.__dict__, self._buffers, self._module_dict)
self.register_parameter(name, value)
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def __repr__(self):
# We treat the extra repr like the sub-module, one item per line
extra_lines = []
extra_repr = self._extra_repr()
# empty string will be split into list ['']
if extra_repr:
extra_lines = extra_repr.split("\n")
child_lines = []
for key, module in self._module_dict.items():
mod_str = repr(module)
mod_str = self._addindent(mod_str, 2)
child_lines.append("(" + key + "): " + mod_str)
lines = extra_lines + child_lines
main_str = self._get_name() + "("
if lines:
# simple one-liner info, which most builtin Modules will use
if len(extra_lines) == 1 and not child_lines:
main_str += extra_lines[0]
else:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
def __dir__(self):
module_attrs = dir(self.__class__)
attrs = list(self.__dict__.keys())
parameters = list(self._v.keys())
modules = list(self._module_dict.keys())
buffers = list(self._buffers.keys())
keys = module_attrs + attrs + parameters + modules + buffers
# Eliminate attrs that are not legal Python variable names
keys = [key for key in keys if not key[0].isdigit()]
return sorted(keys)
def __getstate__(self):
state = self.__dict__.copy()
state.pop("_compiled_call_impl", None)
return state
def __setstate__(self, state):
self.__dict__.update(state)
| ivy/ivy/functional/frontends/torch/nn/modules/module.py/0 | {
"file_path": "ivy/ivy/functional/frontends/torch/nn/modules/module.py",
"repo_id": "ivy",
"token_count": 6280
} | 44 |
import ivy
from ivy.functional.frontends.xgboost.objective.regression_loss import (
LogisticRegression,
)
from ivy.functional.frontends.xgboost.linear.updater_coordinate import (
coordinate_updater,
)
from copy import deepcopy
class GBLinear:
def __init__(self, params=None, compile=False, cache=None):
# we start boosting from zero
self.num_boosted_rounds = 0
# default parameter
# xgboost provides other options for it but the way to modify it remains
# undocumented for Python API
self.updater = coordinate_updater
# LogisticRegression corresponds to 'binary:logistic' objective in terms of
# calculations
# In xgboost LogisticClassification is used, but it simply subclasses
# LogisticRegression redefining the method which returns the name of objective
self.obj = LogisticRegression()
self.base_score = self.obj.prob_to_margin(params["base_score"])
# when weights for groups are not provided this equals to a number of instances
# in data
# TODO: add weight sum calculation from provided weights, by now always assume
# default behaviour
self.num_inst = params["num_instances"]
self.sum_instance_weight_ = self.num_inst
self.scale_pos_weight = (
1.0 if not params["scale_pos_weight"] else params["scale_pos_weight"]
)
# set to True, because we're assuming default behaviour for group weights
self.is_null_weights = True
self.is_converged_ = False
self.tolerance = 0.0
self.num_output_group = params["num_output_group"]
self.num_feature = params["num_feature"]
# xgboost stores weights in a vector form, but it was decided to store them as a
# 2D matrix here it simplifies calculations while math remains the same
# added 1 in the first dim, because xgboost stores weights and biases jointly
self.weight = ivy.zeros(
(self.num_feature + 1, self.num_output_group), dtype=ivy.float32
)
# used to calculate convergence(comparing max difference of weights to
# tolerance)
self.prev_weight = deepcopy(self.weight)
# if base margin is None, use base_score instead
self.base_margin = (
params["base_margin"] if params["base_margin"] else self.base_score
)
# setup lr and denormalize regularization params for updater
self.learning_rate = params["learning_rate"]
self.reg_lambda_denorm = self.sum_instance_weight_ * params["reg_lambda"]
self.reg_alpha_denorm = self.sum_instance_weight_ * params["reg_alpha"]
# compilation block
self.compile = compile
if self.compile:
# don't enable native compilation for torch, bc it's already fast enough
# and this only increases the compilation time
backend_compile = True if ivy.current_backend_str() != "torch" else False
self._comp_pred = ivy.trace_graph(_pred, backend_compile=backend_compile)
self._comp_get_gradient = ivy.trace_graph(
_get_gradient, backend_compile=backend_compile, static_argnums=(0,)
)
self._comp_updater = ivy.trace_graph(
self.updater, backend_compile=backend_compile
)
# run each function to compile it
# this process doesn't affect the training
pred = self._comp_pred(cache[0], self.weight, self.base_margin)
gpair = self._comp_get_gradient(
self.obj, pred, cache[1], self.scale_pos_weight
)
self._comp_updater(
gpair,
cache[0],
self.learning_rate,
self.weight,
self.num_feature,
0,
self.reg_alpha_denorm,
self.reg_lambda_denorm,
)
def boosted_rounds(self):
return self.num_boosted_rounds
def model_fitted(self):
return self.num_boosted_rounds != 0
def check_convergence(self):
if self.tolerance == 0.0:
return False
elif self.is_converged_:
return True
largest_dw = ivy.max(ivy.abs(self.weight - self.prev_weight))
self.prev_weight = self.weight.copy()
self.is_converged_ = largest_dw <= self.tolerance
return self.is_converged_
# used to obtain raw predictions
def pred(self, data):
args = (data, self.weight, self.base_margin)
if self.compile:
return self._comp_pred(*args)
else:
return _pred(*args)
def get_gradient(self, pred, label):
args = (self.obj, pred, label, self.scale_pos_weight)
if self.compile:
return self._comp_get_gradient(*args)
else:
return _get_gradient(*args)
def do_boost(self, data, gpair, iter):
if not self.check_convergence():
self.num_boosted_rounds += 1
args = (
gpair,
data,
self.learning_rate,
self.weight,
self.num_feature,
iter,
self.reg_alpha_denorm,
self.reg_lambda_denorm,
)
if self.compile:
self.weight = self._comp_updater(*args)
else:
self.weight = self.updater(*args)
# --- Helpers --- #
# --------------- #
def _get_gradient(obj, pred, label, scale_pos_weight):
p = obj.pred_transform(pred)
# because we assume default behaviour for group weights this always equals to 1
# ToDo: add handling for group weights case
w = 1.0
# group weights for positive class are scaled
w_scaled = ivy.where(label == 1.0, w * scale_pos_weight, w)
return ivy.hstack(
[
obj.first_order_gradient(p, label) * w_scaled,
obj.second_order_gradient(p, label) * w_scaled,
]
)
def _pred(dt, w, base):
return ivy.matmul(dt, w[:-1]) + w[-1] + base
| ivy/ivy/functional/frontends/xgboost/gbm/gbm.py/0 | {
"file_path": "ivy/ivy/functional/frontends/xgboost/gbm/gbm.py",
"repo_id": "ivy",
"token_count": 2723
} | 45 |
from .activations import *
from .constants import *
from .creation import *
from .data_type import *
from .device import *
from .elementwise import *
from .general import *
from .gradients import *
from .layers import *
from .linear_algebra import *
from .losses import *
from .manipulation import *
from .meta import *
from .nest import *
from .norms import *
from .random import *
from .searching import *
from .set import *
from .sorting import *
from .statistical import *
from .sparse_array import *
from .utility import *
import types
__all__ = [
name
for name, thing in globals().items()
if not (
name.startswith("_")
or name == "ivy"
or (callable(thing) and "ivy" not in thing.__module__)
or (isinstance(thing, types.ModuleType) and "ivy" not in thing.__name__)
)
]
del types
| ivy/ivy/functional/ivy/experimental/__init__.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/__init__.py",
"repo_id": "ivy",
"token_count": 292
} | 46 |
# local
from typing import Optional, Union, Sequence
import ivy
from ivy.func_wrapper import (
handle_out_argument,
to_native_arrays_and_back,
inputs_to_native_shapes,
handle_nestable,
infer_dtype,
handle_device,
handle_backend_invalid,
)
from ivy.utils.exceptions import handle_exceptions
# dirichlet
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@handle_device
def dirichlet(
alpha: Union[ivy.Array, ivy.NativeArray, float, Sequence[float]],
/,
*,
size: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Draw size samples of dimension k from a Dirichlet distribution. A
Dirichlet- distributed random variable can be seen as a multivariate
generalization of a Beta distribution. The Dirichlet distribution is a
conjugate prior of a multinomial distribution in Bayesian inference.
Parameters
----------
alpha
Sequence of floats of length k
size
optional int or tuple of ints, Output shape. If the given shape is,
e.g., (m, n), then m * n * k samples are drawn. Default is None,
in which case a vector of length k is returned.
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to.
Returns
-------
ret
The drawn samples, of shape (size, k).
Examples
--------
>>> alpha = [1.0, 2.0, 3.0]
>>> ivy.dirichlet(alpha)
ivy.array([0.10598304, 0.21537054, 0.67864642])
>>> alpha = [1.0, 2.0, 3.0]
>>> ivy.dirichlet(alpha, size = (2,3))
ivy.array([[[0.48006698, 0.07472073, 0.44521229],
[0.55479872, 0.05426367, 0.39093761],
[0.19531053, 0.51675832, 0.28793114]],
[[0.12315625, 0.29823365, 0.5786101 ],
[0.15564976, 0.50542368, 0.33892656],
[0.1325352 , 0.44439589, 0.42306891]]])
"""
return ivy.current_backend().dirichlet(
alpha,
size=size,
dtype=dtype,
seed=seed,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_device
def beta(
a: Union[float, ivy.NativeArray, ivy.Array],
b: Union[float, ivy.NativeArray, ivy.Array],
/,
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return an array filled with random values sampled from a beta
distribution.
Parameters
----------
a
Alpha parameter of the beta distribution.
b
Beta parameter of the beta distribution.
shape
If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples are drawn
Can only be specified when ``mean`` and ``std`` are numeric values, else
exception will be raised.
Default is ``None``, where a single value is returned.
device
device on which to create the array. 'cuda:0',
'cuda:1', 'cpu' etc. (Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Returns an array with the given shape filled with random values sampled from
a beta distribution.
"""
return ivy.current_backend().beta(
a, b, shape=shape, device=device, dtype=dtype, seed=seed, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@handle_device
def gamma(
alpha: Union[float, ivy.NativeArray, ivy.Array],
beta: Union[float, ivy.NativeArray, ivy.Array],
/,
*,
shape: Optional[Union[float, ivy.NativeArray, ivy.Array]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Return an array filled with random values sampled from a gamma
distribution.
Parameters
----------
alpha
Alpha parameter of the gamma distribution.
beta
Beta parameter of the gamma distribution.
shape
Shape parameter of the gamma distribution.
device
device on which to create the array. 'cuda:0',
'cuda:1', 'cpu' etc. (Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape
that the inputs broadcast to.
Returns
-------
ret
Returns an array filled with random values sampled from a gamma distribution.
"""
return ivy.current_backend().gamma(
alpha, beta, shape=shape, device=device, dtype=dtype, seed=seed, out=out
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@infer_dtype
@handle_device
def poisson(
lam: Union[float, ivy.Array, ivy.NativeArray],
*,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
fill_value: Optional[Union[int, float]] = 0,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Draws samples from a poisson distribution.
Parameters
----------
lam
Rate parameter(s) describing the poisson distribution(s) to sample.
It must have a shape that is broadcastable to the requested shape.
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(lam)' samples are drawn)
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution.
fill_value
if lam is negative, fill the output array with this value
on that specific dimension.
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Drawn samples from the poisson distribution
Examples
--------
>>> lam = [1.0, 2.0, 3.0]
>>> ivy.poisson(lam)
ivy.array([1., 4., 4.])
>>> lam = [1.0, 2.0, 3.0]
>>> ivy.poisson(lam, shape = (2,3))
ivy.array([[0., 2., 2.],
[1., 2., 3.]])
"""
return ivy.current_backend(lam).poisson(
lam,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
fill_value=fill_value,
out=out,
)
@handle_exceptions
@handle_backend_invalid
@handle_nestable
@handle_out_argument
@inputs_to_native_shapes
@to_native_arrays_and_back
@infer_dtype
@handle_device
def bernoulli(
probs: Union[float, ivy.Array, ivy.NativeArray],
*,
logits: Optional[Union[float, ivy.Array, ivy.NativeArray]] = None,
shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None,
device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None,
dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None,
seed: Optional[int] = None,
out: Optional[ivy.Array] = None,
) -> ivy.Array:
"""Draws samples from Bernoulli distribution parameterized by probs or
logits (but not both)
Parameters
----------
logits
An N-D Array representing the log-odds of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution where the probability of an event is sigmoid
(logits). Only one of logits or probs should be passed in.
probs
An N-D Array representing the probability of a 1 event.
Each entry in the Array parameterizes an independent Bernoulli
distribution. Only one of logits or probs should be passed in
shape
If the given shape is, e.g '(m, n, k)', then 'm * n * k' samples are drawn.
(Default value = 'None', where 'ivy.shape(logits)' samples are drawn)
device
device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
(Default value = None).
dtype
output array data type. If ``dtype`` is ``None``, the output array data
type will be the default floating-point data type. Default ``None``
seed
A python integer. Used to create a random seed distribution
out
optional output array, for writing the result to. It must have a shape that the
inputs broadcast to.
Returns
-------
ret
Drawn samples from the Bernoulli distribution
"""
return ivy.current_backend(probs).bernoulli(
probs,
logits=logits,
shape=shape,
device=device,
dtype=dtype,
seed=seed,
out=out,
)
| ivy/ivy/functional/ivy/experimental/random.py/0 | {
"file_path": "ivy/ivy/functional/ivy/experimental/random.py",
"repo_id": "ivy",
"token_count": 3976
} | 47 |
Subsets and Splits