text
stringlengths
17
362k
id
stringlengths
13
115
metadata
dict
__index_level_0__
int64
0
75
"""Collection of random Ivy functions.""" # global from typing import Optional, Union # local import ivy from ivy.func_wrapper import ( handle_array_function, infer_dtype, handle_out_argument, to_native_arrays_and_back, inputs_to_native_shapes, handle_nestable, handle_device, handle_backend_invalid, ) from ivy.utils.backend import backend_stack from ivy.utils.exceptions import handle_exceptions # Helpers # # ------- # def _check_bounds_and_get_shape(low, high, shape): if shape is not None: ivy.utils.assertions.check_all_or_any_fn( low, high, fn=lambda x: isinstance(x, (int, float)), type="all", message="low and high bounds must be numerics when shape is specified", ) return ivy.Shape(shape) valid_types = (ivy.Array,) if len(backend_stack) == 0: valid_types += (ivy.current_backend().NativeArray,) else: valid_types += (ivy.NativeArray,) if isinstance(low, valid_types): if isinstance(high, valid_types): ivy.utils.assertions.check_equal( ivy.shape(low), ivy.shape(high), as_array=False ) return ivy.shape(low) if isinstance(high, valid_types): return ivy.shape(high) return ivy.Shape(()) def _randint_check_dtype_and_bound(low, high, dtype): ivy.utils.assertions.check_all_or_any_fn( low, high, dtype, fn=ivy.is_uint_dtype, type="any", limit=[0], message="randint cannot take arguments of type uint", ) ivy.utils.assertions.check_all_or_any_fn( low, high, dtype, fn=ivy.is_float_dtype, type="any", limit=[0], message="randint cannot take arguments of type float", ) ivy.utils.assertions.check_less(low, high) def _check_valid_scale(std): ivy.utils.assertions.check_greater( std, 0, allow_equal=True, message="std must be non-negative" ) def _check_shapes_broadcastable(out, inp): if out is not None: ivy.utils.assertions.check_shapes_broadcastable(out, inp) # Extra # # ------# @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @inputs_to_native_shapes @to_native_arrays_and_back @handle_array_function @infer_dtype @handle_device def random_uniform( *, low: Union[float, ivy.NativeArray, ivy.Array] = 0.0, high: Union[float, ivy.NativeArray, ivy.Array] = 1.0, shape: Optional[Union[ivy.Array, ivy.Shape, ivy.NativeShape]] = None, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None, seed: Optional[int] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Draws samples from a uniform distribution. Samples are uniformly distributed over the half-open interval ``[low, high)`` (includes ``low``, but excludes ``high``). In other words, any value within the given interval is equally likely to be drawn by uniform. Parameters ---------- low Lower boundary of the output interval. All values generated will be greater than or equal to ``low``. If array, must have same shape as ``high``. high Upper boundary of the output interval. All the values generated will be less than ``high``. If array, must have same shape as ``low``. shape If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples are drawn. Can only be specified when ``low`` and ``high`` are numeric values, else exception will be raised. Default is ``None``, where a single value is returned. device device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None). dtype output array data type. If ``dtype`` is ``None``, the output array data type will be the default floating-point data type. Default ``None`` seed A python integer. Used to create a random seed distribution out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret Drawn samples from the parameterized uniform distribution. Examples -------- >>> ivy.random_uniform() ivy.array(0.26431865) >>> ivy.random_uniform(shape=3) ivy.array([0.475, 0.878, 0.861]) >>> ivy.random_uniform(shape=(2,3)) ivy.array([[0.929 , 0.545 , 0.789 ], [0.519 , 0.0435, 0.381 ]]) >>> ivy.random_uniform(low=3.0, high=6.0) ivy.array(3.4608004) >>> ivy.random_uniform(low=1.0, high=2.0, shape=(2,1)) ivy.array([[1.85], [1.81]]) >>> z = ivy.zeros(()) >>> ivy.random_uniform(low=1.0, high=2.0, out=z) ivy.array(1.8458502) >>> ivy.random_uniform(low=1.0, high=2.0, shape=(2,2), device='cpu') ivy.array([[1.81, 1.8 ], [1.32, 1.43]]) >>> ivy.random_uniform(low=1.0, high=2.0, shape=(2,2), device='cpu', ... dtype='int32') ivy.array([[1, 1], [1, 1]]) >>> z = ivy.zeros((1,2)) >>> ivy.random_uniform(low=1.0, high=2.0, shape=(1,2), device='cpu', ... dtype='float64', out=z) ivy.array([[1.34, 1.02]]) >>> x = ivy.array([4.8, 5.6]) >>> y = ivy.array([9.8, 7.4]) >>> ivy.random_uniform(low=x, high=y) ivy.array([0.475, 0.878]) >>> z = ivy.zeros((2,)) >>> ivy.random_uniform(low=x, high=y, out=z, seed=42) ivy.array([6.67270088, 7.31128597]) >>> ivy.random_uniform(low=x, high=y, device='cpu') ivy.array([6.88, 6.75]) >>> ivy.random_uniform(low=x, high=y, device='cpu', dtype='float64') ivy.array([8.62, 6.47]) >>> z = ivy.zeros((2,)) >>> ivy.random_uniform(low=x, high=y, device='cpu', dtype='float64', out=z) ivy.array([5. , 7.3]) """ return ivy.current_backend().random_uniform( low=low, high=high, shape=shape, device=device, dtype=dtype, out=out, seed=seed ) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @inputs_to_native_shapes @to_native_arrays_and_back @handle_array_function @infer_dtype @handle_device def random_normal( *, mean: Union[float, ivy.NativeArray, ivy.Array] = 0.0, std: Union[float, ivy.NativeArray, ivy.Array] = 1.0, shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None, seed: Optional[int] = None, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Draws samples from a normal distribution. Parameters ---------- mean The mean of the normal distribution to sample from. Default is ``0.0``. std The standard deviation of the normal distribution to sample from. Must be non-negative. Default is ``1.0``. shape If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples are drawn. Can only be specified when ``mean`` and ``std`` are numeric values, else exception will be raised. Default is ``None``, where a single value is returned. dtype output array data type. If ``dtype`` is ``None``, the output array data type will be the default floating-point data type. Default ``None`` seed A python integer. Used to create a random seed distribution device device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None). out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret Drawn samples from the parameterized normal distribution. Examples -------- >>> ivy.random_normal() ivy.array(-0.22346112) >>> ivy.random_normal(shape=3) ivy.array([-0.73 , 0.0922, -0.515 ]) >>> ivy.random_normal(shape=(2, 3), seed=42) ivy.array([[ 0.49671414, -0.1382643 , 0.64768857], [ 1.5230298 , -0.23415337, -0.23413695]]) >>> ivy.random_normal(mean=3.0, std=6.0) ivy.array(4.9213753) >>> ivy.random_normal(mean=1.0, std=2.0, shape=(2,1)) ivy.array([[2.19], [2.78]]) >>> z = ivy.zeros(()) >>> ivy.random_normal(mean=1.0, std=2.0, out=z) ivy.array(0.12818667) >>> ivy.random_normal(mean=1.0, std=2.0, shape=(2,2), device='cpu') ivy.array([[ 2.91 , 1.3 ], [ 3.37 , -0.799]]) >>> ivy.random_normal(mean=1.0, std=2.0, shape=(2,2), device='cpu', ... dtype='int32') ivy.array([[ 0, -1], [ 0, 3]]) >>> z = ivy.zeros((1,2)) >>> ivy.random_normal(mean=1.0, std=2.0, shape=(1,2), device='cpu', ... dtype='float64', out=z) ivy.array([[-2.01, -1.95]]) >>> x = ivy.array([4.8, 5.6]) >>> y = ivy.array([9.8, 7.4]) >>> ivy.random_normal(mean=x, std=y) ivy.array([ 4.43 , -0.469]) >>> z = ivy.zeros((2,)) >>> ivy.random_normal(mean=x, std=y, out=z) ivy.array([0.287, 8.55 ]) >>> ivy.random_normal(mean=x, std=y, device='cpu') ivy.array([18.9, 15.2]) >>> ivy.random_normal(mean=x, std=y, device='cpu', dtype='float64') ivy.array([-4.1 , -0.0366]) >>> z = ivy.zeros((2,)) >>> ivy.random_normal(mean=x, std=y, device='cpu', dtype='float64', out=z) ivy.array([12.4, 11. ]) """ return ivy.current_backend().random_normal( mean=mean, std=std, shape=shape, dtype=dtype, seed=seed, device=device, out=out ) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def multinomial( population_size: int, num_samples: int, /, *, batch_size: int = 1, probs: Optional[Union[ivy.Array, ivy.NativeArray]] = None, replace: bool = True, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, seed: Optional[int] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Draws samples from a multinomial distribution. Specifically, returns a tensor where each row contains num_samples indices sampled from the multinomial probability distribution located in the corresponding row of tensor input. Parameters ---------- population_size The size of the population from which to draw samples. num_samples Number of independent samples to draw from the population. batch_size Number of tensors to generate. Default is 1. probs The unnormalized probabilities for all elements in population, default is uniform *[batch_shape, population_size]* replace Whether to replace samples once they've been drawn. Default is ``True``. device device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None) seed A python integer. Used to create a random seed distribution out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret Drawn samples indices from the multinomial distribution. Examples -------- >>> y = ivy.multinomial(10, 5) >>> print(y) ivy.array([[1, 8, 7, 8, 3]]) >>> y = ivy.multinomial(10, 5, batch_size=2, seed=42) >>> print(y) ivy.array([[3, 9, 7, 5, 1], [1, 0, 8, 6, 7]]) >>> y = ivy.multinomial(10, 5, replace=False) >>> print(y) ivy.array([[2, 6, 4, 7, 0]]) With :class:`ivy.Array` input: >>> y = ivy.multinomial(10, 5, probs=ivy.array([1/10]*10)) >>> print(y) ivy.array([5, 2, 7, 6, 9]) >>> y = ivy.multinomial(7, 5, batch_size=2, probs=ivy.array([[1/7]*7, [1/7]*7])) >>> print(y) ivy.array([[0, 4, 3, 4, 5], [1, 1, 0, 3, 2]]) >>> y = ivy.multinomial(7, 5, batch_size=2, probs=ivy.array([[1/7]*7, [1/7]*7]), ... replace=False) >>> print(y) ivy.array([[2, 6, 1, 0, 3], [1, 0, 2, 5, 6]]) With :class:`ivy.NativeArray` input: >>> y = ivy.multinomial(10, 5, probs=ivy.native_array([1/10]*10)) >>> print(y) ivy.array([5, 7, 4, 2, 1]) >>> y = ivy.multinomial(10, 5, batch_size=2, ... probs=ivy.native_array([[1/10]*10, [1/10]*10])) >>> print(y) ivy.array([[8, 0, 4, 1, 7], [2, 3, 4, 9, 3]]) >>> y = ivy.multinomial(10, 5, batch_size=2, ... probs=ivy.native_array([[1/10]*10, [1/10]*10]), ... replace=False) >>> print(y) ivy.array([[0, 2, 6, 9, 1], [6, 7, 2, 4, 3]]) """ return ivy.current_backend().multinomial( population_size, num_samples, batch_size=batch_size, probs=probs, replace=replace, device=device, seed=seed, out=out, ) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @inputs_to_native_shapes @to_native_arrays_and_back @handle_array_function @handle_device def randint( low: Union[int, ivy.NativeArray, ivy.Array], high: Union[int, ivy.NativeArray, ivy.Array], /, *, shape: Optional[Union[ivy.Shape, ivy.NativeShape]] = None, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None, seed: Optional[int] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return an array filled with random integers generated uniformly between low (inclusive) and high (exclusive). Parameters ---------- low Lowest integer that can be drawn from the distribution. high One above the highest integer that can be drawn from the distribution. shape If the given shape is, e.g ``(m, n, k)``, then ``m * n * k`` samples are drawn Can only be specified when ``mean`` and ``std`` are numeric values, else exception will be raised. Default is ``None``, where a single value is returned. device device on which to create the array. 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None). dtype output array data type. If ``dtype`` is ``None``, the output array data type will be the default integer data type. Default ``None`` seed A python integer. Used to create a random seed distribution out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret Returns an array with the given shape filled with integers from the uniform distribution in the “half-open” interval [low, high) Examples -------- >>> y = ivy.randint(0, 9, shape=(1,1)) >>> print(y) ivy.array([[5]]) >>> y = ivy.randint(2, 20, shape=(2, 2), device='cpu', seed=42) >>> print(y) ivy.array([[ 8, 16], [12, 9]]) >>> x = ivy.array([1, 2, 3]) >>> ivy.randint(0, 10, shape=(3,), out=x) >>> print(x) ivy.array([2, 6, 7]) >>> y = ivy.zeros((3, 3)) >>> ivy.randint(3, 15, shape=(3, 3), device='cpu', out=y) >>> print(y) ivy.array([[ 7, 7, 5], [12, 8, 8], [ 8, 11, 3]]) """ return ivy.current_backend().randint( low, high, shape=shape, device=device, dtype=dtype, seed=seed, out=out ) @handle_exceptions @handle_nestable def seed(*, seed_value: int = 0) -> None: """Set the seed for random number generation. Parameters ---------- seed_value Seed for random number generation, must be a positive integer. (Default value = 0) Examples -------- >>> ivy.seed(seed_value=42) """ return ivy.current_backend().seed(seed_value=seed_value) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def shuffle( x: Union[ivy.Array, ivy.NativeArray], axis: Optional[int] = 0, /, *, seed: Optional[int] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Shuffles the given array along a given axis. Parameters ---------- x Input array. Should have a numeric data type. axis The axis which x is shuffled along. Default is 0. seed A python integer. Used to create a random seed distribution out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret An array object, shuffled along the specified axis. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1, 2, 3, 4, 5]) >>> y = ivy.shuffle(x) >>> print(y) ivy.array([2, 1, 4, 3, 5]) >>> x = ivy.array([1, 3, 5, 7]) >>> y = ivy.shuffle(x, seed=394) >>> print(y) ivy.array([3, 1, 5, 7]) >>> x = ivy.array([1, 0, 5]) >>> y = ivy.array([0, 0, 0]) >>> ivy.shuffle(x, seed=394, out=y) >>> print(y) ivy.array([0, 1, 5]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([5, 2, 9]), ... b=ivy.array([7, 1, 6])) >>> y = ivy.shuffle(x) >>> print(y) { a: ivy.array([5, 9, 2]), b: ivy.array([6, 1, 7]) } >>> x = ivy.Container(a=ivy.array([7, 4, 5]), ... b=ivy.array([9, 8, 2])) >>> y = ivy.Container(a=ivy.array([0, 0, 0]), ... b=ivy.array([0, 0, 0])) >>> ivy.shuffle(x, seed=17, out=y) >>> print(y) { a: ivy.array([7, 5, 4]), b: ivy.array([9, 2, 8]) } >>> x = ivy.Container(a=ivy.array([8, 2, 5]), ... b=ivy.array([3, 9, 0])) >>> ivy.shuffle(x, seed=17, out=x) >>> print(x) { a: ivy.array([2, 8, 5]), b: ivy.array([3, 0, 9]) } """ return ivy.current_backend(x).shuffle(x, axis, seed=seed, out=out)
ivy/ivy/functional/ivy/random.py/0
{ "file_path": "ivy/ivy/functional/ivy/random.py", "repo_id": "ivy", "token_count": 8142 }
48
"""Base class for deriving trainable modules.""" # global from typing import Union, Optional # local import ivy from ivy.stateful.module import Module class Sequential(Module): def __init__( self, *sub_modules: Module, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, v: Optional[Union[ivy.Array, ivy.NativeArray]] = None, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype]] = None, ): """Initialize a sequential container. Modules will be added to it in the order they are passed in the constructor. Parameters ---------- submodules Submodules to chain together into a sequence. device device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc. v the variables for each submodule in the sequence, constructed internally by default. """ if v is not None: for i, submod in enumerate(sub_modules): try: submod.v = v["submodules"][f"v{str(i)}"] except KeyError as e: if submod.v: raise ivy.utils.exceptions.IvyException( "variables v passed to Sequential class must have key " "chains in the form of " '"submodules/v{}", where {} is an idx' ) from e self._submodules = list(sub_modules) Module.__init__(self, device=device, v=v, dtype=dtype) def __iter__(self): return iter(self._submodules) def _forward(self, inputs): """Perform forward pass of the Sequential container. Parameters ---------- inputs Inputs to process. Returns ------- ret The output after each of the layers in the Sequential has been applied. """ x = inputs for i, submod in enumerate(self._submodules): try: x = submod(x, v=self.v.submodules[f"v{str(i)}"]) except KeyError as e: if submod.v: raise ivy.utils.exceptions.IvyException( "variables v passed to Sequential class must have key chains " "in the form of " '"submodules/v{}", where {} is an idx' ) from e x = submod(x) return x def _extra_repr(self): submods = [] for i, submod in enumerate(self._submodules): submods.append(f"v{i}={submod}") return ", ".join(submods)
ivy/ivy/stateful/sequential.py/0
{ "file_path": "ivy/ivy/stateful/sequential.py", "repo_id": "ivy", "token_count": 1343 }
49
<jupyter_start><jupyter_code>import torch from torch import nn import tensorflow as tf import numpy as np class TFModel(tf.keras.Model): def __init__(self): super().__init__() self.lin = tf.keras.layers.Dense(10, activation=tf.nn.relu) def call(self, inputs, training=False): return self.lin(inputs) class TorchModel(nn.Module): def __init__(self): super().__init__() self.lin = nn.Linear(10, 10) def forward(self, x): return self.lin(x) tf_model = tf.function(TFModel()) inp = np.random.random((10, 10)) x = tf.convert_to_tensor(inp, dtype=tf.float32) tf_model(x) tf_model.summary() torch_model = torch.compile(TorchModel()) inp2 = np.random.random((10, 10)).astype(np.float32) x2 = torch.from_numpy(inp2) torch_model, torch_model(x2).shape<jupyter_output><empty_output><jupyter_text>Creating the Profiler Logs TensorFlow<jupyter_code>logs = 'logs/' + "tensorflow" from ivy.utils.profiler import tensorflow_profile_start, tensorflow_profile_stop tensorflow_profile_start(logs, host_tracer_level = 3, python_tracer_level = 1, device_tracer_level = 1) tf_model(x) tensorflow_profile_stop() # Launch TensorBoard and navigate to the Profile tab to view performance profile !tensorboard --logdir='logs/'<jupyter_output><empty_output><jupyter_text>Creating the Profiler Logs Torch<jupyter_code>from ivy.utils.profiler import torch_profiler_init, torch_profiler_start, torch_profiler_stop profiler = torch_profiler_init(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA ], on_trace_ready=torch.profiler.tensorboard_trace_handler('./logs/torch'), record_shapes=True, profile_memory=True, with_stack=True) torch_profiler_start(profiler) torch_model(x2) torch_profiler_stop(profiler)<jupyter_output><empty_output>
ivy/ivy/utils/profiler_example.ipynb/0
{ "file_path": "ivy/ivy/utils/profiler_example.ipynb", "repo_id": "ivy", "token_count": 826 }
50
# global import copy import time from typing import Union, List, Optional import numpy as np import types import importlib import inspect from collections import OrderedDict from .globals import mod_backend try: import tensorflow as tf except ImportError: tf = types.SimpleNamespace() tf.TensorShape = None # local from .pipeline_helper import BackendHandler, BackendHandlerMode, get_frontend_config import ivy from ivy_tests.test_ivy.helpers.test_parameter_flags import FunctionTestFlags import ivy_tests.test_ivy.helpers.test_parameter_flags as pf import ivy_tests.test_ivy.helpers.globals as t_globals from ivy.functional.ivy.data_type import _get_function_list, _get_functions_from_string from ivy_tests.test_ivy.test_frontends import NativeClass from ivy_tests.test_ivy.helpers.structs import FrontendMethodData from ivy_tests.test_ivy.helpers.testing_helpers import _create_transpile_report from .assertions import ( value_test, assert_same_type, check_unsupported_dtype, ) # Temporary (.so) configuration def traced_if_required(backend: str, fn, test_trace=False, args=None, kwargs=None): with BackendHandler.update_backend(backend) as ivy_backend: if test_trace: try: if ( t_globals.CURRENT_RUNNING_TEST.fn_name in t_globals.CURRENT_TRACED_DATA and backend not in t_globals.CURRENT_TRACED_DATA[ t_globals.CURRENT_RUNNING_TEST.fn_name ] ): t_globals.CURRENT_TRACED_DATA[ t_globals.CURRENT_RUNNING_TEST.fn_name ][backend] = ivy_backend.trace_graph( fn, args=args, kwargs=kwargs, backend_compile=True ) elif ( t_globals.CURRENT_RUNNING_TEST.fn_name not in t_globals.CURRENT_TRACED_DATA ): t_globals.CURRENT_TRACED_DATA[ t_globals.CURRENT_RUNNING_TEST.fn_name ] = {} t_globals.CURRENT_TRACED_DATA[ t_globals.CURRENT_RUNNING_TEST.fn_name ][backend] = ivy_backend.trace_graph( fn, args=args, kwargs=kwargs, backend_compile=True ) fn = t_globals.CURRENT_TRACED_DATA[ t_globals.CURRENT_RUNNING_TEST.fn_name ][backend] except Exception: import logging logging.warning("API key is invalid, test_trace is skipped.") return fn # Ivy Function testing ########################## # Test Function Helpers ############### def _find_instance_in_args(backend: str, args, array_indices, mask): """Find the first element in the arguments that is considered to be an instance of Array or Container class. Parameters ---------- args Arguments to iterate over array_indices Indices of arrays that exists in the args mask Boolean mask for whether the corresponding element in (args) has a generated test_flags.native_array as False or test_flags.container as true Returns ------- First found instance in the arguments and the updates arguments not including the instance """ i = 0 for i, a in enumerate(mask): if a: break instance_idx = array_indices[i] with BackendHandler.update_backend(backend) as ivy_backend: instance = ivy_backend.index_nest(args, instance_idx) new_args = ivy_backend.copy_nest(args, to_mutable=False) ivy_backend.prune_nest_at_index(new_args, instance_idx) return instance, new_args def _get_frontend_submodules(fn_tree: str, gt_fn_tree: str): split_index = fn_tree.rfind(".") frontend_submods, fn_name = fn_tree[:split_index], fn_tree[split_index + 1 :] # if gt_fn_tree and gt_fn_name are different from our frontend structure if gt_fn_tree is not None: split_index = gt_fn_tree.rfind(".") gt_frontend_submods, gt_fn_name = ( gt_fn_tree[:split_index], gt_fn_tree[split_index + 1 :], ) else: gt_frontend_submods, gt_fn_name = fn_tree[25 : fn_tree.rfind(".")], fn_name return frontend_submods, fn_name, gt_frontend_submods, gt_fn_name def test_function_backend_computation( fw, test_flags, all_as_kwargs_np, input_dtypes, on_device, fn_name ): # split the arguments into their positional and keyword components args_np, kwargs_np = kwargs_to_args_n_kwargs( num_positional_args=test_flags.num_positional_args, kwargs=all_as_kwargs_np ) # Extract all arrays from the arguments and keyword arguments arg_np_arrays, arrays_args_indices, n_args_arrays = _get_nested_np_arrays(args_np) kwarg_np_arrays, arrays_kwargs_indices, n_kwargs_arrays = _get_nested_np_arrays( kwargs_np ) # Make all array-specific test flags and dtypes equal in length total_num_arrays = n_args_arrays + n_kwargs_arrays if len(input_dtypes) < total_num_arrays: input_dtypes = [input_dtypes[0] for _ in range(total_num_arrays)] if len(test_flags.as_variable) < total_num_arrays: test_flags.as_variable = [ test_flags.as_variable[0] for _ in range(total_num_arrays) ] if len(test_flags.native_arrays) < total_num_arrays: test_flags.native_arrays = [ test_flags.native_arrays[0] for _ in range(total_num_arrays) ] if len(test_flags.container) < total_num_arrays: test_flags.container = [ test_flags.container[0] for _ in range(total_num_arrays) ] if test_flags.test_cython_wrapper: ivy.set_cython_wrappers_mode(True) else: ivy.set_cython_wrappers_mode(False) with BackendHandler.update_backend(fw) as ivy_backend: # Update variable flags to be compatible with float dtype and with_out args test_flags.as_variable = [ v if ivy_backend.is_float_dtype(d) and not test_flags.with_out else False for v, d in zip(test_flags.as_variable, input_dtypes) ] # update instance_method flag to only be considered if the # first term is either an ivy.Array or ivy.Container instance_method = test_flags.instance_method and ( not test_flags.native_arrays[0] or test_flags.container[0] ) args, kwargs = create_args_kwargs( backend=fw, args_np=args_np, arg_np_vals=arg_np_arrays, args_idxs=arrays_args_indices, kwargs_np=kwargs_np, kwarg_np_vals=kwarg_np_arrays, kwargs_idxs=arrays_kwargs_indices, input_dtypes=input_dtypes, test_flags=test_flags, on_device=on_device, ) # If function doesn't have an out argument but an out argument is given # or a test with out flag is True if ("out" in kwargs or test_flags.with_out) and "out" not in inspect.signature( getattr(ivy, fn_name) ).parameters: raise RuntimeError(f"Function {fn_name} does not have an out parameter") # Run either as an instance method or from the API directly with BackendHandler.update_backend(fw) as ivy_backend: instance = None if instance_method: array_or_container_mask = [ (not native_flag) or container_flag for native_flag, container_flag in zip( test_flags.native_arrays, test_flags.container ) ] # Boolean mask for args and kwargs True if an entry's # test Array flag is True or test Container flag is true args_instance_mask = array_or_container_mask[ : test_flags.num_positional_args ] kwargs_instance_mask = array_or_container_mask[ test_flags.num_positional_args : ] if any(args_instance_mask): instance, args = _find_instance_in_args( fw, args, arrays_args_indices, args_instance_mask ) else: instance, kwargs = _find_instance_in_args( fw, kwargs, arrays_kwargs_indices, kwargs_instance_mask ) if test_flags.test_trace: def target_fn(instance, *args, **kwargs): return instance.__getattribute__(fn_name)(*args, **kwargs) args = [instance, *args] else: target_fn = instance.__getattribute__(fn_name) else: target_fn = ivy_backend.__dict__[fn_name] # Make copy of arguments for functions that might use inplace update by default copy_kwargs = copy.deepcopy(kwargs) copy_args = copy.deepcopy(args) ret_from_target, ret_np_flat_from_target = get_ret_and_flattened_np_array( fw, target_fn, *copy_args, test_trace=test_flags.test_trace, precision_mode=test_flags.precision_mode, **copy_kwargs, ) assert ivy_backend.nested_map( lambda x: ivy_backend.is_ivy_array(x) if ivy_backend.is_array(x) else True, ret_from_target, ), f"Ivy function returned non-ivy arrays: {ret_from_target}" # Assert indices of return if the indices of the out array provided if test_flags.with_out and not test_flags.test_trace: test_ret = ( ret_from_target[getattr(ivy_backend.__dict__[fn_name], "out_index")] if hasattr(ivy_backend.__dict__[fn_name], "out_index") else ret_from_target ) out = ivy_backend.nested_map( ivy_backend.zeros_like, test_ret, to_mutable=True, include_derived=True ) if instance_method: ( ret_from_target, ret_np_flat_from_target, ) = get_ret_and_flattened_np_array( fw, instance.__getattribute__(fn_name), *args, **kwargs, out=out, precision_mode=test_flags.precision_mode, ) else: ( ret_from_target, ret_np_flat_from_target, ) = get_ret_and_flattened_np_array( fw, ivy_backend.__dict__[fn_name], *args, **kwargs, out=out, precision_mode=test_flags.precision_mode, ) test_ret = ( ret_from_target[getattr(ivy_backend.__dict__[fn_name], "out_index")] if hasattr(ivy_backend.__dict__[fn_name], "out_index") else ret_from_target ) assert not ivy_backend.nested_any( ivy_backend.nested_multi_map( lambda x, _: x[0] is x[1], [test_ret, out] ), lambda x: not x, ), "the array in out argument does not contain same value as the returned" if not max(test_flags.container) and ivy_backend.native_inplace_support: # these backends do not always support native inplace updates assert not ivy_backend.nested_any( ivy_backend.nested_multi_map( lambda x, _: x[0].data is x[1].data, [test_ret, out] ), lambda x: not x, ), ( "the array in out argument does not contain same value as the" " returned" ) if test_flags.with_copy: array_fn = ivy_backend.is_array if "copy" in list(inspect.signature(target_fn).parameters.keys()): kwargs["copy"] = True if instance_method: first_array = instance else: first_array = ivy_backend.func_wrapper._get_first_array( *args, array_fn=array_fn, **kwargs ) ret_, ret_np_flat_ = get_ret_and_flattened_np_array( fw, target_fn, *args, test_trace=test_flags.test_trace, precision_mode=test_flags.precision_mode, **kwargs, ) first_array = ivy_backend.stop_gradient(first_array).to_numpy() ret_ = ivy_backend.stop_gradient(ret_).to_numpy() assert not np.may_share_memory(first_array, ret_) ret_device = None if isinstance(ret_from_target, ivy_backend.Array): # TODO use str for now ret_device = ivy_backend.dev(ret_from_target) return ( ret_from_target, ret_np_flat_from_target, ret_device, args_np, arg_np_arrays, arrays_args_indices, kwargs_np, arrays_kwargs_indices, kwarg_np_arrays, test_flags, input_dtypes, ) def test_function_ground_truth_computation( ground_truth_backend, on_device, args_np, arg_np_arrays, arrays_args_indices, kwargs_np, arrays_kwargs_indices, kwarg_np_arrays, input_dtypes, test_flags, fn_name, ): with BackendHandler.update_backend(ground_truth_backend) as gt_backend: gt_backend.set_default_device(on_device) # TODO remove args, kwargs = create_args_kwargs( backend=test_flags.ground_truth_backend, args_np=args_np, arg_np_vals=arg_np_arrays, args_idxs=arrays_args_indices, kwargs_np=kwargs_np, kwargs_idxs=arrays_kwargs_indices, kwarg_np_vals=kwarg_np_arrays, input_dtypes=input_dtypes, test_flags=test_flags, on_device=on_device, ) ret_from_gt, ret_np_from_gt_flat = get_ret_and_flattened_np_array( test_flags.ground_truth_backend, gt_backend.__dict__[fn_name], *args, test_trace=test_flags.test_trace, precision_mode=test_flags.precision_mode, **kwargs, ) assert gt_backend.nested_map( lambda x: gt_backend.is_ivy_array(x) if gt_backend.is_array(x) else True, ret_from_gt, ), f"Ground-truth function returned non-ivy arrays: {ret_from_gt}" if test_flags.with_out and not test_flags.test_trace: test_ret_from_gt = ( ret_from_gt[getattr(gt_backend.__dict__[fn_name], "out_index")] if hasattr(gt_backend.__dict__[fn_name], "out_index") else ret_from_gt ) out_from_gt = gt_backend.nested_map( gt_backend.zeros_like, test_ret_from_gt, to_mutable=True, include_derived=True, ) ret_from_gt, ret_np_from_gt_flat = get_ret_and_flattened_np_array( test_flags.ground_truth_backend, gt_backend.__dict__[fn_name], *args, test_trace=test_flags.test_trace, precision_mode=test_flags.precision_mode, **kwargs, out=out_from_gt, ) # TODO enable fw_list = gradient_unsupported_dtypes(fn=gt_backend.__dict__[fn_name]) ret_from_gt_device = None if isinstance(ret_from_gt, gt_backend.Array): # TODO use str for now ret_from_gt_device = gt_backend.dev(ret_from_gt) return (ret_from_gt, ret_np_from_gt_flat, ret_from_gt_device, test_flags, fw_list) def test_function( *, input_dtypes: Union[ivy.Dtype, List[ivy.Dtype]], test_flags: FunctionTestFlags, fn_name: str, rtol_: Optional[float] = None, atol_: float = 1e-06, tolerance_dict: Optional[dict] = None, test_values: bool = True, xs_grad_idxs=None, ret_grad_idxs=None, backend_to_test: str, on_device: str, return_flat_np_arrays: bool = False, **all_as_kwargs_np, ): """Test a function that consumes (or returns) arrays for the current backend by comparing the result with numpy. Parameters ---------- input_dtypes data types of the input arguments in order. test_flags FunctionTestFlags object that stores all testing flags, including: num_positional_args, with_out, instance_method, as_variable, native_arrays, container, gradient fw current backend (framework). fn_name name of the function to test. rtol_ relative tolerance value. atol_ absolute tolerance value. test_values if True, test for the correctness of the resulting values. xs_grad_idxs Indices of the input arrays to compute gradients with respect to. If None, gradients are returned with respect to all input arrays. (Default value = None) ret_grad_idxs Indices of the returned arrays for which to return computed gradients. If None, gradients are returned for all returned arrays. (Default value = None) on_device The device on which to create arrays return_flat_np_arrays If test_values is False, this flag dictates whether the original returns are returned, or whether the flattened numpy arrays are returned. all_as_kwargs_np input arguments to the function as keyword arguments. Returns ------- ret optional, return value from the function ret_gt optional, return value from the Ground Truth function Examples -------- >>> input_dtypes = 'float64' >>> as_variable_flags = False >>> with_out = False >>> num_positional_args = 0 >>> native_array_flags = False >>> container_flags = False >>> instance_method = False >>> test_flags = FunctionTestFlags(num_positional_args, with_out, instance_method, as_variable, native_arrays, container_flags, none) >>> fw = "torch" >>> fn_name = "abs" >>> x = np.array([-1]) >>> test_function(input_dtypes, test_flags, fw, fn_name, x=x) >>> input_dtypes = ['float64', 'float32'] >>> as_variable_flags = [False, True] >>> with_out = False >>> num_positional_args = 1 >>> native_array_flags = [True, False] >>> container_flags = [False, False] >>> instance_method = False >>> test_flags = FunctionTestFlags(num_positional_args, with_out, instance_method, as_variable, native_arrays, container_flags, none) >>> fw = "numpy" >>> fn_name = "add" >>> x1 = np.array([1, 3, 4]) >>> x2 = np.array([-3, 15, 24]) >>> test_function(input_dtypes, test_flags, fw, fn_name, x1=x1, x2=x2) """ _switch_backend_context(test_flags.test_trace or test_flags.transpile) ground_truth_backend = test_flags.ground_truth_backend if test_flags.container[0]: test_flags.with_copy = False if test_flags.with_copy is True: test_flags.with_out = False if mod_backend[backend_to_test]: # multiprocessing proc, input_queue, output_queue = mod_backend[backend_to_test] input_queue.put( ( "function_backend_computation", backend_to_test, test_flags, all_as_kwargs_np, input_dtypes, on_device, fn_name, ) ) ( ret_from_target, ret_np_flat_from_target, ret_device, args_np, arg_np_arrays, arrays_args_indices, kwargs_np, arrays_kwargs_indices, kwarg_np_arrays, test_flags, input_dtypes, ) = output_queue.get() else: ( ret_from_target, ret_np_flat_from_target, ret_device, args_np, arg_np_arrays, arrays_args_indices, kwargs_np, arrays_kwargs_indices, kwarg_np_arrays, test_flags, input_dtypes, ) = test_function_backend_computation( backend_to_test, test_flags, all_as_kwargs_np, input_dtypes, on_device, fn_name, ) # compute the return with a Ground Truth backend if mod_backend[ground_truth_backend]: proc, input_queue, output_queue = mod_backend[ground_truth_backend] input_queue.put( ( "function_ground_truth_computation", ground_truth_backend, on_device, args_np, arg_np_arrays, arrays_args_indices, kwargs_np, arrays_kwargs_indices, kwarg_np_arrays, input_dtypes, test_flags, fn_name, ) ) ( ret_from_gt, ret_np_from_gt_flat, ret_from_gt_device, test_flags, fw_list, ) = output_queue.get() else: ( ret_from_gt, ret_np_from_gt_flat, ret_from_gt_device, test_flags, fw_list, ) = test_function_ground_truth_computation( ground_truth_backend, on_device, args_np, arg_np_arrays, arrays_args_indices, kwargs_np, arrays_kwargs_indices, kwarg_np_arrays, input_dtypes, test_flags, fn_name, ) if test_flags.transpile: if mod_backend[backend_to_test]: proc, input_queue, output_queue = mod_backend[backend_to_test] input_queue.put( ( "transpile_if_required_backend", backend_to_test, fn_name, args_np, kwargs_np, ) ) else: _transpile_if_required_backend( backend_to_test, fn_name, args=args_np, kwargs=kwargs_np ) # Gradient test # TODO enable back , ADD backend_to_test to the call below if ( test_flags.test_gradients and not test_flags.instance_method and "bool" not in input_dtypes and not any(d in ["complex64", "complex128"] for d in input_dtypes) ): if backend_to_test not in fw_list or not ivy.nested_argwhere( all_as_kwargs_np, lambda x: ( x.dtype in fw_list[backend_to_test] if isinstance(x, np.ndarray) else None ), ): gradient_test( fn=fn_name, all_as_kwargs_np=all_as_kwargs_np, args_np=args_np, kwargs_np=kwargs_np, input_dtypes=input_dtypes, test_flags=test_flags, rtol_=rtol_, atol_=atol_, xs_grad_idxs=xs_grad_idxs, ret_grad_idxs=ret_grad_idxs, ground_truth_backend=ground_truth_backend, backend_to_test=backend_to_test, on_device=on_device, ) # assuming value test will be handled manually in the test function if not test_values: if return_flat_np_arrays: return ret_np_flat_from_target, ret_np_from_gt_flat return ret_from_target, ret_from_gt if isinstance(rtol_, dict): rtol_ = _get_framework_rtol(rtol_, backend_to_test) if isinstance(atol_, dict): atol_ = _get_framework_atol(atol_, backend_to_test) # value test value_test( ret_np_flat=ret_np_flat_from_target, ret_np_from_gt_flat=ret_np_from_gt_flat, rtol=rtol_, atol=atol_, specific_tolerance_dict=tolerance_dict, backend=backend_to_test, ground_truth_backend=test_flags.ground_truth_backend, ) if not test_flags.test_trace: assert_same_type( ret_from_target, ret_from_gt, backend_to_test, test_flags.ground_truth_backend, ) assert ret_device == ret_from_gt_device, ( f"ground truth backend ({test_flags.ground_truth_backend}) returned array on" f" device {ret_from_gt_device} but target backend ({backend_to_test})" f" returned array on device {ret_device}" ) if ret_device is not None: assert ret_device == on_device, ( f"device is set to {on_device}, but ground truth produced array on" f" {ret_device}" ) def _assert_frontend_ret(ret, for_fn=True): fn_or_method = "function" if for_fn else "method" if not inspect.isclass(ret): is_ret_tuple = issubclass(ret.__class__, tuple) else: is_ret_tuple = issubclass(ret, tuple) if is_ret_tuple: non_frontend_idxs = ivy.nested_argwhere( ret, lambda _x: not _is_frontend_array(_x) if ivy.is_array(_x) else False ) assert not non_frontend_idxs, ( f"Frontend {fn_or_method} return contains non-frontend arrays at positions" f" {non_frontend_idxs} (zero-based):" f" {ivy.multi_index_nest(ret, non_frontend_idxs)}" ) elif ivy.is_array(ret): assert _is_frontend_array( ret ), f"Frontend {fn_or_method} returned non-frontend array: {ret}" def _transpile_if_required_backend(backend: str, fn_name: str, args=None, kwargs=None): iterations = 1 with BackendHandler.update_backend(backend) as ivy_backend: args, kwargs = ivy_backend.args_to_ivy(*args, **kwargs) backend_fn = ivy.__dict__[fn_name] backend_traced_fn = traced_if_required( backend, backend_fn, test_trace=True, args=args, kwargs=kwargs ) func_timings = [] for i in range(0, iterations): # timing the traced_fn start = time.time() backend_traced_fn(*args, **kwargs) end = time.time() func_timings.append(end - start) func_time = np.mean(func_timings).item() backend_nodes = len(backend_traced_fn._functions) data = { "fn_name": fn_name, "args": str(args), "kwargs": str(kwargs), "time": func_time, "nodes": backend_nodes, } _create_transpile_report(data, backend, "report.json", True) def test_frontend_function( *, input_dtypes: Union[ivy.Dtype, List[ivy.Dtype]], test_flags: pf.frontend_function_flags, backend_to_test: str, on_device="cpu", frontend: str, fn_tree: str, gt_fn_tree: Optional[str] = None, rtol: Optional[float] = None, atol: float = 1e-06, tolerance_dict: Optional[dict] = None, test_values: bool = True, **all_as_kwargs_np, ): """Test a frontend function for the current backend by comparing the result with the function in the associated framework. Parameters ---------- input_dtypes data types of the input arguments in order. test_flags FunctionTestFlags object that stores all testing flags, including: num_positional_args, with_out, instance_method, as_variable, native_arrays, container, gradient, precision_mode frontend current frontend (framework). fn_tree Path to function in frontend framework namespace. gt_fn_tree Path to function in ground truth framework namespace. rtol relative tolerance value. atol absolute tolerance value. tolerance_dict dictionary of tolerance values for specific dtypes. test_values if True, test for the correctness of the resulting values. all_as_kwargs_np input arguments to the function as keyword arguments. Returns ------- ret optional, return value from the function ret_np optional, return value from the Numpy function """ # ToDo add with_backend refactor in GC _switch_backend_context(test_flags.test_trace or test_flags.transpile) assert ( not test_flags.with_out or not test_flags.inplace ), "only one of with_out or with_inplace can be set as True" if test_flags.with_copy is True: test_flags.with_out = False test_flags.inplace = False # split the arguments into their positional and keyword components args_np, kwargs_np = kwargs_to_args_n_kwargs( num_positional_args=test_flags.num_positional_args, kwargs=all_as_kwargs_np ) # extract all arrays from the arguments and keyword arguments arg_np_vals, args_idxs, c_arg_vals = _get_nested_np_arrays(args_np) kwarg_np_vals, kwargs_idxs, c_kwarg_vals = _get_nested_np_arrays(kwargs_np) # make all lists equal in length num_arrays = c_arg_vals + c_kwarg_vals if len(input_dtypes) < num_arrays: input_dtypes = [input_dtypes[0] for _ in range(num_arrays)] if len(test_flags.as_variable) < num_arrays: test_flags.as_variable = [test_flags.as_variable[0] for _ in range(num_arrays)] if len(test_flags.native_arrays) < num_arrays: test_flags.native_arrays = [ test_flags.native_arrays[0] for _ in range(num_arrays) ] with BackendHandler.update_backend(backend_to_test) as ivy_backend: # update var flags to be compatible with float dtype and with_out args test_flags.as_variable = [ v if ivy_backend.is_float_dtype(d) and not test_flags.with_out else False for v, d in zip(test_flags.as_variable, input_dtypes) ] local_importer = ivy_backend.utils.dynamic_import # strip the decorator to get an Ivy array # TODO, fix testing for jax frontend for x32 if frontend == "jax": local_importer.import_module("ivy.functional.frontends.jax").config.update( "jax_enable_x64", True ) ( frontend_submods, fn_name, gt_frontend_submods, gt_fn_name, ) = _get_frontend_submodules(fn_tree, gt_fn_tree) function_module = local_importer.import_module(frontend_submods) frontend_fn = getattr(function_module, fn_name) # apply test flags etc. args, kwargs = create_args_kwargs( backend=backend_to_test, args_np=args_np, arg_np_vals=arg_np_vals, args_idxs=args_idxs, kwargs_np=kwargs_np, kwarg_np_vals=kwarg_np_vals, kwargs_idxs=kwargs_idxs, input_dtypes=input_dtypes, test_flags=test_flags, on_device=on_device, ) # Make copy for arguments for functions that might use # inplace update by default copy_kwargs = copy.deepcopy(kwargs) copy_args = copy.deepcopy(args) # Frontend array generation create_frontend_array = local_importer.import_module( f"ivy.functional.frontends.{frontend}" )._frontend_array if test_flags.generate_frontend_arrays: args_for_test, kwargs_for_test = args_to_frontend( backend_to_test, *args, frontend_array_fn=create_frontend_array, **kwargs, ) copy_args, copy_kwargs = args_to_frontend( backend_to_test, *args, frontend_array_fn=create_frontend_array, **kwargs, ) else: args_for_test = copy.deepcopy(args) kwargs_for_test = copy.deepcopy(kwargs) ret = get_frontend_ret( backend_to_test, frontend_fn, *args_for_test, test_trace=test_flags.test_trace, frontend_array_function=( create_frontend_array if test_flags.test_trace else None ), precision_mode=test_flags.precision_mode, **kwargs_for_test, ) # test if return is frontend _assert_frontend_ret(ret) if test_flags.with_out and "out" in list( inspect.signature(frontend_fn).parameters.keys() ): if not inspect.isclass(ret): is_ret_tuple = issubclass(ret.__class__, tuple) else: is_ret_tuple = issubclass(ret, tuple) out = ret if is_ret_tuple: flatten_ret = flatten_frontend( ret=ret, backend=backend_to_test, frontend_array_fn=create_frontend_array, ) flatten_out = flatten_frontend( ret=out, backend=backend_to_test, frontend_array_fn=create_frontend_array, ) for ret_array, out_array in zip(flatten_ret, flatten_out): if ivy_backend.native_inplace_support and not any( (ivy_backend.isscalar(ret), ivy_backend.isscalar(out)) ): assert ret_array.ivy_array.data is out_array.ivy_array.data assert ret_array is out_array else: if ivy_backend.native_inplace_support and not any( (ivy_backend.isscalar(ret), ivy_backend.isscalar(out)) ): assert ret.ivy_array.data is out.ivy_array.data assert ret is out elif test_flags.with_copy: assert _is_frontend_array(ret) if "copy" in list(inspect.signature(frontend_fn).parameters.keys()): copy_kwargs["copy"] = True first_array = ivy_backend.func_wrapper._get_first_array( *copy_args, array_fn=( _is_frontend_array if test_flags.generate_frontend_arrays else ivy_backend.is_array ), **copy_kwargs, ) ret_ = get_frontend_ret( backend_to_test, frontend_fn, *copy_args, test_trace=test_flags.test_trace, frontend_array_function=( create_frontend_array if test_flags.test_trace else None ), precision_mode=test_flags.precision_mode, **copy_kwargs, ) if test_flags.generate_frontend_arrays: first_array = first_array.ivy_array ret_ = ret_.ivy_array if "bfloat16" in str(ret_.dtype): ret_ = ivy_backend.astype(ret_, ivy_backend.float64) if "bfloat16" in str(first_array.dtype): first_array = ivy_backend.astype(first_array, ivy_backend.float64) if not ivy_backend.is_native_array(first_array): first_array = first_array.data ret_ = ret_.data if hasattr(first_array, "requires_grad"): first_array = first_array.detach() if hasattr(ret_, "requires_grad"): ret_ = ret_.detach() if backend_to_test == "tensorflow": first_array = first_array.numpy() ret_ = ret_.numpy() assert not np.may_share_memory(first_array, ret_) elif test_flags.inplace: assert _is_frontend_array(ret) if "inplace" in list(inspect.signature(frontend_fn).parameters.keys()): # the function provides optional inplace update copy_kwargs["inplace"] = True # else the function provides inplace update by default first_array = ivy_backend.func_wrapper._get_first_array( *copy_args, array_fn=( _is_frontend_array if test_flags.generate_frontend_arrays else ivy_backend.is_array ), **copy_kwargs, ) ret_ = get_frontend_ret( backend_to_test, frontend_fn, *copy_args, test_trace=test_flags.test_trace, frontend_array_function=( create_frontend_array if test_flags.test_trace else None ), precision_mode=test_flags.precision_mode, **copy_kwargs, ) if test_flags.generate_frontend_arrays: assert first_array is ret_ elif ( ivy_backend.is_native_array(first_array) and ivy_backend.inplace_arrays_supported() ): assert first_array is ret_.ivy_array.data elif ivy_backend.is_ivy_array(first_array): assert first_array.data is ret_.ivy_array.data # create NumPy args if test_values: ret_np_flat = flatten_frontend_to_np( ret=ret, backend=backend_to_test, ) if not test_values: ret = ivy_backend.nested_map( _frontend_array_to_ivy, ret, include_derived={"tuple": True} ) # create frontend framework args frontend_config = get_frontend_config(frontend) args_frontend = ivy.nested_map( lambda x: ( frontend_config.native_array(x) if isinstance(x, np.ndarray) else ( frontend_config.as_native_dtype(x) if isinstance(x, frontend_config.Dtype) else x ) ), args_np, shallow=False, ) kwargs_frontend = ivy.nested_map( lambda x: frontend_config.native_array(x) if isinstance(x, np.ndarray) else x, kwargs_np, shallow=False, ) # change ivy dtypes to native dtypes if "dtype" in kwargs_frontend and kwargs_frontend["dtype"] is not None: kwargs_frontend["dtype"] = frontend_config.as_native_dtype( kwargs_frontend["dtype"] ) # change ivy device to native devices if "device" in kwargs_frontend: kwargs_frontend["device"] = frontend_config.as_native_device( kwargs_frontend["device"] ) # compute the return via the frontend framework frontend_fw = importlib.import_module(gt_frontend_submods) frontend_fw_fn = frontend_fw.__dict__[gt_fn_name] frontend_ret = frontend_fw_fn(*args_frontend, **kwargs_frontend) if test_flags.transpile: _get_transpiled_data_if_required( frontend_fn, frontend_fw_fn, frontend, backend_to_test, fn_name=f"{gt_frontend_submods}.{gt_fn_name}", generate_frontend_arrays=test_flags.generate_frontend_arrays, args_for_test=args_for_test, kwargs_for_test=kwargs_for_test, frontend_fw_args=args_frontend, frontend_fw_kwargs=kwargs_frontend, ) if test_values: frontend_ret_np_flat = flatten_frontend_fw_to_np( frontend_ret, frontend_config.isscalar, frontend_config.is_native_array, frontend_config.to_numpy, ) # assuming value test will be handled manually in the test function if not test_values: return ( ret, frontend_ret, ) if isinstance(rtol, dict): rtol = _get_framework_rtol(rtol, t_globals.CURRENT_BACKEND) if isinstance(atol, dict): atol = _get_framework_atol(atol, t_globals.CURRENT_BACKEND) value_test( ret_np_flat=ret_np_flat, ret_np_from_gt_flat=frontend_ret_np_flat, rtol=rtol, atol=atol, specific_tolerance_dict=tolerance_dict, backend=backend_to_test, ground_truth_backend=frontend, ) # Method testing def test_gradient_backend_computation( backend_to_test, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, kwargs_idxs, input_dtypes, test_flags, on_device, fn, test_trace, xs_grad_idxs, ret_grad_idxs, ): args, kwargs = create_args_kwargs( backend=backend_to_test, args_np=args_np, arg_np_vals=arg_np_vals, args_idxs=args_idxs, kwargs_np=kwargs_np, kwarg_np_vals=kwarg_np_vals, kwargs_idxs=kwargs_idxs, input_dtypes=input_dtypes, test_flags=test_flags, on_device=on_device, ) with BackendHandler.update_backend(backend_to_test) as ivy_backend: def _grad_fn(all_args): args, kwargs, i = all_args call_fn = ivy_backend.__dict__[fn] if isinstance(fn, str) else fn[i] ret = traced_if_required( backend_to_test, call_fn, test_trace=test_trace, args=args, kwargs=kwargs, )(*args, **kwargs) return ivy_backend.nested_map(ivy_backend.mean, ret, include_derived=True) with ivy_backend.PreciseMode(test_flags.precision_mode): _, grads = ivy_backend.execute_with_gradients( _grad_fn, [args, kwargs, 0], xs_grad_idxs=xs_grad_idxs, ret_grad_idxs=ret_grad_idxs, ) grads_np_flat = flatten_and_to_np(backend=backend_to_test, ret=grads) return grads_np_flat def test_gradient_ground_truth_computation( ground_truth_backend, on_device, fn, input_dtypes, all_as_kwargs_np, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, test_flags, kwargs_idxs, test_trace, xs_grad_idxs, ret_grad_idxs, ): with BackendHandler.update_backend(ground_truth_backend) as gt_backend: gt_backend.set_default_device(on_device) # TODO remove if check_unsupported_dtype( fn=gt_backend.__dict__[fn] if isinstance(fn, str) else fn[1], input_dtypes=input_dtypes, all_as_kwargs_np=all_as_kwargs_np, ): return args, kwargs = create_args_kwargs( backend=ground_truth_backend, args_np=args_np, arg_np_vals=arg_np_vals, args_idxs=args_idxs, kwargs_np=kwargs_np, kwarg_np_vals=kwarg_np_vals, kwargs_idxs=kwargs_idxs, input_dtypes=input_dtypes, test_flags=test_flags, on_device=on_device, ) def _gt_grad_fn(all_args): args, kwargs, i = all_args call_fn = gt_backend.__dict__[fn] if isinstance(fn, str) else fn[i] ret = traced_if_required( ground_truth_backend, call_fn, test_trace=test_trace, args=args, kwargs=kwargs, )(*args, **kwargs) return gt_backend.nested_map(gt_backend.mean, ret, include_derived=True) with gt_backend.PreciseMode(test_flags.precision_mode): _, grads_from_gt = gt_backend.execute_with_gradients( _gt_grad_fn, [args, kwargs, 1], xs_grad_idxs=xs_grad_idxs, ret_grad_idxs=ret_grad_idxs, ) grads_np_from_gt_flat = flatten_and_to_np( backend=ground_truth_backend, ret=grads_from_gt ) return grads_np_from_gt_flat def gradient_test( *, fn, all_as_kwargs_np, args_np, kwargs_np, input_dtypes, test_flags, test_trace: bool = False, rtol_: Optional[float] = None, atol_: float = 1e-06, tolerance_dict=None, xs_grad_idxs=None, ret_grad_idxs=None, backend_to_test: str, ground_truth_backend: str, on_device: str, ): # extract all arrays from the arguments and keyword arguments arg_np_vals, args_idxs, _ = _get_nested_np_arrays(args_np) kwarg_np_vals, kwargs_idxs, _ = _get_nested_np_arrays(kwargs_np) if mod_backend[backend_to_test]: # do this using multiprocessing proc, input_queue, output_queue = mod_backend[backend_to_test] input_queue.put( ( "gradient_backend_computation", backend_to_test, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, kwargs_idxs, input_dtypes, test_flags, on_device, fn, test_trace, xs_grad_idxs, ret_grad_idxs, ) ) grads_np_flat = output_queue.get() else: grads_np_flat = test_gradient_backend_computation( backend_to_test, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, kwargs_idxs, input_dtypes, test_flags, on_device, fn, test_trace, xs_grad_idxs, ret_grad_idxs, ) if mod_backend[ground_truth_backend]: # do this using multiprocessing proc, input_queue, output_queue = mod_backend[ground_truth_backend] input_queue.put( ( "gradient_ground_truth_computation", ground_truth_backend, on_device, fn, input_dtypes, all_as_kwargs_np, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, test_flags, kwargs_idxs, test_trace, xs_grad_idxs, ret_grad_idxs, ) ) grads_np_from_gt_flat = output_queue.get() else: grads_np_from_gt_flat = test_gradient_ground_truth_computation( ground_truth_backend, on_device, fn, input_dtypes, all_as_kwargs_np, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, test_flags, kwargs_idxs, test_trace, xs_grad_idxs, ret_grad_idxs, ) assert len(grads_np_flat) == len(grads_np_from_gt_flat), ( f"result length mismatch: {grads_np_flat} ({len(grads_np_flat)}) !=" f" {grads_np_from_gt_flat} ({len(grads_np_from_gt_flat)})" ) value_test( ret_np_flat=grads_np_flat, ret_np_from_gt_flat=grads_np_from_gt_flat, rtol=rtol_, atol=atol_, specific_tolerance_dict=tolerance_dict, backend=backend_to_test, ground_truth_backend=ground_truth_backend, ) def test_method_backend_computation( init_input_dtypes, init_flags, backend_to_test, init_all_as_kwargs_np, on_device, method_input_dtypes, method_flags, method_all_as_kwargs_np, class_name, method_name, init_with_v, test_trace, method_with_v, ): init_input_dtypes = ivy.default(init_input_dtypes, []) # Constructor arguments # init_all_as_kwargs_np = ivy.default(init_all_as_kwargs_np, {}) # split the arguments into their positional and keyword components args_np_constructor, kwargs_np_constructor = kwargs_to_args_n_kwargs( num_positional_args=init_flags.num_positional_args, kwargs=init_all_as_kwargs_np, ) # extract all arrays from the arguments and keyword arguments con_arg_np_vals, con_args_idxs, con_c_arg_vals = _get_nested_np_arrays( args_np_constructor ) con_kwarg_np_vals, con_kwargs_idxs, con_c_kwarg_vals = _get_nested_np_arrays( kwargs_np_constructor ) # make all lists equal in length num_arrays_constructor = con_c_arg_vals + con_c_kwarg_vals if len(init_input_dtypes) < num_arrays_constructor: init_input_dtypes = [ init_input_dtypes[0] for _ in range(num_arrays_constructor) ] if len(init_flags.as_variable) < num_arrays_constructor: init_flags.as_variable = [ init_flags.as_variable[0] for _ in range(num_arrays_constructor) ] if len(init_flags.native_arrays) < num_arrays_constructor: init_flags.native_arrays = [ init_flags.native_arrays[0] for _ in range(num_arrays_constructor) ] # update variable flags to be compatible with float dtype with BackendHandler.update_backend(backend_to_test) as ivy_backend: init_flags.as_variable = [ v if ivy_backend.is_float_dtype(d) else False for v, d in zip(init_flags.as_variable, init_input_dtypes) ] # Save original constructor data for inplace operations constructor_data = OrderedDict( args_np=args_np_constructor, arg_np_vals=con_arg_np_vals, args_idxs=con_args_idxs, kwargs_np=kwargs_np_constructor, kwarg_np_vals=con_kwarg_np_vals, kwargs_idxs=con_kwargs_idxs, input_dtypes=init_input_dtypes, test_flags=init_flags, on_device=on_device, ) org_con_data = copy.deepcopy(constructor_data) # Create Args args_constructor, kwargs_constructor = create_args_kwargs( backend=backend_to_test, **constructor_data ) # end constructor # # method arguments # method_input_dtypes = ivy.default(method_input_dtypes, []) args_np_method, kwargs_np_method = kwargs_to_args_n_kwargs( num_positional_args=method_flags.num_positional_args, kwargs=method_all_as_kwargs_np, ) # extract all arrays from the arguments and keyword arguments met_arg_np_vals, met_args_idxs, met_c_arg_vals = _get_nested_np_arrays( args_np_method ) met_kwarg_np_vals, met_kwargs_idxs, met_c_kwarg_vals = _get_nested_np_arrays( kwargs_np_method ) # make all lists equal in length num_arrays_method = met_c_arg_vals + met_c_kwarg_vals if len(method_input_dtypes) < num_arrays_method: method_input_dtypes = [method_input_dtypes[0] for _ in range(num_arrays_method)] if len(method_flags.as_variable) < num_arrays_method: method_flags.as_variable = [ method_flags.as_variable[0] for _ in range(num_arrays_method) ] if len(method_flags.native_arrays) < num_arrays_method: method_flags.native_arrays = [ method_flags.native_arrays[0] for _ in range(num_arrays_method) ] if len(method_flags.container) < num_arrays_method: method_flags.container = [ method_flags.container[0] for _ in range(num_arrays_method) ] with BackendHandler.update_backend(backend_to_test) as ivy_backend: method_flags.as_variable = [ v if ivy_backend.is_float_dtype(d) else False for v, d in zip(method_flags.as_variable, method_input_dtypes) ] # Create Args args_method, kwargs_method = create_args_kwargs( backend=backend_to_test, args_np=args_np_method, arg_np_vals=met_arg_np_vals, args_idxs=met_args_idxs, kwargs_np=kwargs_np_method, kwarg_np_vals=met_kwarg_np_vals, kwargs_idxs=met_kwargs_idxs, input_dtypes=method_input_dtypes, test_flags=method_flags, on_device=on_device, ) # End Method # # Run testing with BackendHandler.update_backend(backend_to_test) as ivy_backend: ins = ivy_backend.__dict__[class_name](*args_constructor, **kwargs_constructor) # TODO remove when the handle_method can properly compute unsupported dtypes if any( dtype in ivy_backend.function_unsupported_dtypes( ins.__getattribute__(method_name) ) for dtype in method_input_dtypes ): return v_np = None if isinstance(ins, ivy_backend.Module): if init_with_v: v = ivy_backend.Container( ins._create_variables( device=on_device, dtype=method_input_dtypes[0] ) ) ins = ivy_backend.__dict__[class_name]( *args_constructor, **kwargs_constructor, v=v ) v = ins.__getattribute__("v") v_np = v.cont_map( lambda x, kc: ivy_backend.to_numpy(x) if ivy_backend.is_array(x) else x ) if method_with_v: kwargs_method = dict(**kwargs_method, v=v) ret, ret_np_flat = get_ret_and_flattened_np_array( backend_to_test, ins.__getattribute__(method_name), *args_method, test_trace=test_trace, precision_mode=method_flags.precision_mode, **kwargs_method, ) if isinstance(ret, ivy_backend.Array): ret_device = ivy_backend.dev(ret) else: ret_device = None fw_list = gradient_unsupported_dtypes(fn=ins.__getattribute__(method_name)) return ( ret, ret_np_flat, ret_device, org_con_data, args_np_method, met_arg_np_vals, met_args_idxs, kwargs_np_method, met_kwarg_np_vals, met_kwargs_idxs, v_np, fw_list, ) def test_method_ground_truth_computation( ground_truth_backend, on_device, org_con_data, args_np_method, met_arg_np_vals, met_args_idxs, kwargs_np_method, met_kwarg_np_vals, met_kwargs_idxs, method_input_dtypes, method_flags, class_name, method_name, test_trace, v_np, ): with BackendHandler.update_backend(ground_truth_backend) as gt_backend: gt_backend.set_default_device(on_device) args_gt_constructor, kwargs_gt_constructor = create_args_kwargs( backend=ground_truth_backend, **org_con_data ) args_gt_method, kwargs_gt_method = create_args_kwargs( backend=ground_truth_backend, args_np=args_np_method, arg_np_vals=met_arg_np_vals, args_idxs=met_args_idxs, kwargs_np=kwargs_np_method, kwarg_np_vals=met_kwarg_np_vals, kwargs_idxs=met_kwargs_idxs, input_dtypes=method_input_dtypes, test_flags=method_flags, on_device=on_device, ) ins_gt = gt_backend.__dict__[class_name]( *args_gt_constructor, **kwargs_gt_constructor ) # TODO this when the handle_method can properly compute unsupported dtypes if any( dtype in gt_backend.function_unsupported_dtypes( ins_gt.__getattribute__(method_name) ) for dtype in method_input_dtypes ): return if isinstance(ins_gt, gt_backend.Module): v_gt = v_np.cont_map( lambda x, kc: gt_backend.asarray(x) if isinstance(x, np.ndarray) else x ) kwargs_gt_method = dict(**kwargs_gt_method, v=v_gt) ret_from_gt, ret_np_from_gt_flat = get_ret_and_flattened_np_array( ground_truth_backend, ins_gt.__getattribute__(method_name), *args_gt_method, test_trace=test_trace, precision_mode=method_flags.precision_mode, **kwargs_gt_method, ) assert gt_backend.nested_map( lambda x: gt_backend.is_ivy_array(x) if gt_backend.is_array(x) else True, ret_from_gt, ), f"Ground-truth method returned non-ivy arrays: {ret_from_gt}" fw_list2 = gradient_unsupported_dtypes(fn=ins_gt.__getattribute__(method_name)) # for k, v in fw_list2.items(): # if k not in fw_list: # fw_list[k] = [] # fw_list[k].extend(v) if isinstance(ret_from_gt, gt_backend.Array): ret_from_gt_device = gt_backend.dev(ret_from_gt) else: ret_from_gt_device = None return ret_from_gt, ret_np_from_gt_flat, ret_from_gt_device, fw_list2 def test_method( *, init_input_dtypes: Optional[List[ivy.Dtype]] = None, method_input_dtypes: Optional[List[ivy.Dtype]] = None, init_all_as_kwargs_np: Optional[dict] = None, method_all_as_kwargs_np: Optional[dict] = None, init_flags: pf.MethodTestFlags, method_flags: pf.MethodTestFlags, class_name: str, method_name: str = "__call__", init_with_v: bool = False, method_with_v: bool = False, rtol_: Optional[float] = None, atol_: float = 1e-06, tolerance_dict=None, test_values: Union[bool, str] = True, test_gradients: bool = False, xs_grad_idxs=None, ret_grad_idxs=None, test_trace: bool = False, backend_to_test: str, ground_truth_backend: str, on_device: str, return_flat_np_arrays: bool = False, ): """Test a class-method that consumes (or returns) arrays for the current backend by comparing the result with numpy. Parameters ---------- init_input_dtypes data types of the input arguments to the constructor in order. init_as_variable_flags dictates whether the corresponding input argument passed to the constructor should be treated as an ivy.Array. init_num_positional_args number of input arguments that must be passed as positional arguments to the constructor. init_native_array_flags dictates whether the corresponding input argument passed to the constructor should be treated as a native array. init_all_as_kwargs_np: input arguments to the constructor as keyword arguments. method_input_dtypes data types of the input arguments to the method in order. method_as_variable_flags dictates whether the corresponding input argument passed to the method should be treated as an ivy.Array. method_num_positional_args number of input arguments that must be passed as positional arguments to the method. method_native_array_flags dictates whether the corresponding input argument passed to the method should be treated as a native array. method_container_flags dictates whether the corresponding input argument passed to the method should be treated as an ivy Container. method_all_as_kwargs_np: input arguments to the method as keyword arguments. class_name name of the class to test. method_name name of the method to test. init_with_v if the class being tested is an ivy.Module, then setting this flag as True will call the constructor with the variables v passed explicitly. method_with_v if the class being tested is an ivy.Module, then setting this flag as True will call the method with the variables v passed explicitly. rtol_ relative tolerance value. atol_ absolute tolerance value. test_values can be a bool or a string to indicate whether correctness of values should be tested. If the value is `with_v`, shapes are tested but not values. test_gradients if True, test for the correctness of gradients. xs_grad_idxs Indices of the input arrays to compute gradients with respect to. If None, gradients are returned with respect to all input arrays. (Default value = None) ret_grad_idxs Indices of the returned arrays for which to return computed gradients. If None, gradients are returned for all returned arrays. (Default value = None) test_trace If True, test for the correctness of tracing. ground_truth_backend Ground Truth Backend to compare the result-values. device_ The device on which to create arrays. return_flat_np_arrays If test_values is False, this flag dictates whether the original returns are returned, or whether the flattened numpy arrays are returned. Returns ------- ret optional, return value from the function ret_gt optional, return value from the Ground Truth function """ # check to see if multiprocessing is to be used if mod_backend[backend_to_test]: # yep, multiprocessing proc, input_queue, output_queue = mod_backend[backend_to_test] input_queue.put( ( "method_backend_computation", init_input_dtypes, init_flags, backend_to_test, init_all_as_kwargs_np, on_device, method_input_dtypes, method_flags, method_all_as_kwargs_np, class_name, method_name, init_with_v, test_trace, method_with_v, ) ) ( ret, ret_np_flat, ret_device, org_con_data, args_np_method, met_arg_np_vals, met_args_idxs, kwargs_np_method, met_kwarg_np_vals, met_kwargs_idxs, v_np, fw_list, ) = output_queue.get() else: ( ret, ret_np_flat, ret_device, org_con_data, args_np_method, met_arg_np_vals, met_args_idxs, kwargs_np_method, met_kwarg_np_vals, met_kwargs_idxs, v_np, fw_list, ) = test_method_backend_computation( init_input_dtypes, init_flags, backend_to_test, init_all_as_kwargs_np, on_device, method_input_dtypes, method_flags, method_all_as_kwargs_np, class_name, method_name, init_with_v, test_trace, method_with_v, ) # Compute the return with a Ground Truth backend if mod_backend[ground_truth_backend]: # yep, multiprocessing proc, input_queue, output_queue = mod_backend[ground_truth_backend] input_queue.put( ( "method_ground_truth_computation", ground_truth_backend, on_device, org_con_data, args_np_method, met_arg_np_vals, met_args_idxs, kwargs_np_method, met_kwarg_np_vals, met_kwargs_idxs, method_input_dtypes, method_flags, class_name, method_name, test_trace, v_np, ) ) ( ret_from_gt, ret_np_from_gt_flat, ret_from_gt_device, fw_list2, ) = output_queue.get() else: ( ret_from_gt, ret_np_from_gt_flat, ret_from_gt_device, fw_list2, ) = test_method_ground_truth_computation( ground_truth_backend, on_device, org_con_data, args_np_method, met_arg_np_vals, met_args_idxs, kwargs_np_method, met_kwarg_np_vals, met_kwargs_idxs, method_input_dtypes, method_flags, class_name, method_name, test_trace, v_np, ) for k, v in fw_list2.items(): if k not in fw_list: fw_list[k] = [] fw_list[k].extend(v) # gradient test # TODO enable gradient testing # if ( # test_gradients # and not backend_to_test == "numpy" # and "bool" not in method_input_dtypes # and not any(ivy.is_complex_dtype(d) for d in method_input_dtypes) # ): # if fw in fw_list: # if ivy.nested_argwhere( # method_all_as_kwargs_np, # lambda x: x.dtype in fw_list[fw] if isinstance(x, np.ndarray) # else None, # ): # pass # else: # gradient_test( # fn=[ # ins.__getattribute__(method_name), # ins_gt.__getattribute__(method_name), # ], # all_as_kwargs_np=method_all_as_kwargs_np, # args_np=args_np_method, # kwargs_np=kwargs_np_method, # input_dtypes=method_input_dtypes, # test_flags=method_flags, # test_trace=test_trace, # rtol_=rtol_, # atol_=atol_, # xs_grad_idxs=xs_grad_idxs, # ret_grad_idxs=ret_grad_idxs, # ground_truth_backend=ground_truth_backend, # on_device=on_device, # ) # else: # gradient_test( # fn=[ # ins.__getattribute__(method_name), # ins_gt.__getattribute__(method_name), # ], # all_as_kwargs_np=method_all_as_kwargs_np, # args_np=args_np_method, # kwargs_np=kwargs_np_method, # input_dtypes=method_input_dtypes, # test_flags=method_flags, # test_trace=test_trace, # rtol_=rtol_, # atol_=atol_, # xs_grad_idxs=xs_grad_idxs, # ret_grad_idxs=ret_grad_idxs, # ground_truth_backend=ground_truth_backend, # on_device=on_device, # ) assert ret_device == ret_from_gt_device, ( f"ground truth backend ({ground_truth_backend}) returned array on" f" device {ret_from_gt_device} but target backend ({backend_to_test})" f" returned array on device {ret_device}" ) if ret_device is not None: assert ret_device == on_device, ( f"device is set to {on_device}, but ground truth produced array on" f" {ret_device}" ) # assuming value test will be handled manually in the test function if not test_values: if return_flat_np_arrays: return ret_np_flat, ret_np_from_gt_flat return ret, ret_from_gt # value test if isinstance(rtol_, dict): rtol_ = _get_framework_rtol(rtol_, backend_to_test) if isinstance(atol_, dict): atol_ = _get_framework_atol(atol_, backend_to_test) value_test( backend=backend_to_test, ground_truth_backend=ground_truth_backend, ret_np_flat=ret_np_flat, ret_np_from_gt_flat=ret_np_from_gt_flat, rtol=rtol_, atol=atol_, specific_tolerance_dict=tolerance_dict, ) def test_frontend_method( *, init_input_dtypes: Union[ivy.Dtype, List[ivy.Dtype]] = None, method_input_dtypes: Union[ivy.Dtype, List[ivy.Dtype]], init_flags, method_flags, init_all_as_kwargs_np: Optional[dict] = None, method_all_as_kwargs_np: dict, frontend: str, frontend_method_data: FrontendMethodData, backend_to_test: str, on_device, rtol_: Optional[float] = None, atol_: float = 1e-06, tolerance_dict: Optional[dict] = None, test_values: Union[bool, str] = True, ): """Test a class-method that consumes (or returns) arrays for the current backend by comparing the result with numpy. Parameters ---------- init_input_dtypes data types of the input arguments to the constructor in order. init_as_variable_flags dictates whether the corresponding input argument passed to the constructor should be treated as an ivy.Variable. init_num_positional_args number of input arguments that must be passed as positional arguments to the constructor. init_native_array_flags dictates whether the corresponding input argument passed to the constructor should be treated as a native array. init_all_as_kwargs_np: input arguments to the constructor as keyword arguments. method_input_dtypes data types of the input arguments to the method in order. method_all_as_kwargs_np: input arguments to the method as keyword arguments. frontend current frontend (framework). rtol_ relative tolerance value. atol_ absolute tolerance value. tolerance_dict dictionary of tolerance values for specific dtypes. test_values can be a bool or a string to indicate whether correctness of values should be tested. If the value is `with_v`, shapes are tested but not values. Returns ------- ret optional, return value from the function ret_gt optional, return value from the Ground Truth function """ # ToDo add with_backend refactor in GC _switch_backend_context(method_flags.test_trace) # Constructor arguments # args_np_constructor, kwargs_np_constructor = kwargs_to_args_n_kwargs( num_positional_args=init_flags.num_positional_args, kwargs=init_all_as_kwargs_np, ) # extract all arrays from the arguments and keyword arguments con_arg_np_vals, con_args_idxs, con_c_arg_vals = _get_nested_np_arrays( args_np_constructor ) con_kwarg_np_vals, con_kwargs_idxs, con_c_kwarg_vals = _get_nested_np_arrays( kwargs_np_constructor ) # make all lists equal in length num_arrays_constructor = con_c_arg_vals + con_c_kwarg_vals if len(init_input_dtypes) < num_arrays_constructor: init_input_dtypes = [ init_input_dtypes[0] for _ in range(num_arrays_constructor) ] if len(init_flags.as_variable) < num_arrays_constructor: init_flags.as_variable = [ init_flags.as_variable[0] for _ in range(num_arrays_constructor) ] if len(init_flags.native_arrays) < num_arrays_constructor: init_flags.native_arrays = [ init_flags.native_arrays[0] for _ in range(num_arrays_constructor) ] # update variable flags to be compatible with float dtype with BackendHandler.update_backend(backend_to_test) as ivy_backend: init_flags.as_variable = [ v if ivy_backend.is_float_dtype(d) else False for v, d in zip(init_flags.as_variable, init_input_dtypes) ] # Create Args args_constructor, kwargs_constructor = create_args_kwargs( backend=backend_to_test, args_np=args_np_constructor, arg_np_vals=con_arg_np_vals, args_idxs=con_args_idxs, kwargs_np=kwargs_np_constructor, kwarg_np_vals=con_kwarg_np_vals, kwargs_idxs=con_kwargs_idxs, input_dtypes=init_input_dtypes, test_flags=init_flags, on_device=on_device, ) # End constructor # # Method arguments # args_np_method, kwargs_np_method = kwargs_to_args_n_kwargs( num_positional_args=method_flags.num_positional_args, kwargs=method_all_as_kwargs_np, ) # extract all arrays from the arguments and keyword arguments met_arg_np_vals, met_args_idxs, met_c_arg_vals = _get_nested_np_arrays( args_np_method ) met_kwarg_np_vals, met_kwargs_idxs, met_c_kwarg_vals = _get_nested_np_arrays( kwargs_np_method ) # make all lists equal in length num_arrays_method = met_c_arg_vals + met_c_kwarg_vals if len(method_input_dtypes) < num_arrays_method: method_input_dtypes = [method_input_dtypes[0] for _ in range(num_arrays_method)] if len(method_flags.as_variable) < num_arrays_method: method_flags.as_variable = [ method_flags.as_variable[0] for _ in range(num_arrays_method) ] if len(method_flags.native_arrays) < num_arrays_method: method_flags.native_arrays = [ method_flags.native_arrays[0] for _ in range(num_arrays_method) ] with BackendHandler.update_backend(backend_to_test) as ivy_backend: if frontend == "jax": importlib.import_module("ivy.functional.frontends.jax").config.update( "jax_enable_x64", True ) method_flags.as_variable = [ v if ivy_backend.is_float_dtype(d) else False for v, d in zip(method_flags.as_variable, method_input_dtypes) ] # Create Args args_method, kwargs_method = create_args_kwargs( backend=backend_to_test, args_np=args_np_method, arg_np_vals=met_arg_np_vals, args_idxs=met_args_idxs, kwargs_np=kwargs_np_method, kwarg_np_vals=met_kwarg_np_vals, kwargs_idxs=met_kwargs_idxs, input_dtypes=method_input_dtypes, test_flags=method_flags, on_device=on_device, ) # End Method # local_importer = ivy_backend.utils.dynamic_import create_frontend_array = local_importer.import_module( f"ivy.functional.frontends.{frontend}" )._frontend_array args_constructor_ivy, kwargs_constructor_ivy = ivy_backend.args_to_ivy( *args_constructor, **kwargs_constructor ) args_method_ivy, kwargs_method_ivy = ivy_backend.args_to_ivy( *args_method, **kwargs_method ) args_constructor_np = ivy_backend.nested_map( lambda x: ( ivy_backend.to_numpy(x._data) if isinstance(x, ivy_backend.Array) else x ), args_constructor_ivy, shallow=False, ) kwargs_constructor_np = ivy_backend.nested_map( lambda x: ( ivy_backend.to_numpy(x._data) if isinstance(x, ivy_backend.Array) else x ), kwargs_constructor_ivy, shallow=False, ) args_method_np = ivy_backend.nested_map( lambda x: ( ivy_backend.to_numpy(x._data) if isinstance(x, ivy_backend.Array) else x ), args_method_ivy, shallow=False, ) kwargs_method_np = ivy_backend.nested_map( lambda x: ( ivy_backend.to_numpy(x._data) if isinstance(x, ivy_backend.Array) else x ), kwargs_method_ivy, shallow=False, ) frontend_fw_module = ivy_backend.utils.dynamic_import.import_module( frontend_method_data.ivy_init_module ) ivy_frontend_creation_fn = getattr( frontend_fw_module, frontend_method_data.init_name ) ins = ivy_frontend_creation_fn(*args_constructor, **kwargs_constructor) # If we have inplace operations, we need to create the frontend arrays # before running the method, so that after running the method # We can check whether the return is still an instance of the frontend array if method_flags.inplace: # make copies of the args and kwargs copy_args_method = copy.deepcopy(args_method) copy_kwargs_method = copy.deepcopy(kwargs_method) copy_ins = ivy_frontend_creation_fn(*args_constructor, **kwargs_constructor) frontend_ret_ins = copy_ins.__getattribute__( frontend_method_data.method_name )(*copy_args_method, **copy_kwargs_method) assert frontend_ret_ins is copy_ins, ( "Inplace method did not return the same instance of the" f" frontend array, expected {copy_ins}, got {frontend_ret_ins}" ) ret = get_frontend_ret( backend_to_test, ins.__getattribute__(frontend_method_data.method_name), *args_method_ivy, frontend_array_function=( create_frontend_array if method_flags.test_trace else None ), test_trace=method_flags.test_trace, precision_mode=method_flags.precision_mode, **kwargs_method_ivy, ) # test if return is frontend _assert_frontend_ret(ret, for_fn=False) ret_np_flat = flatten_frontend_to_np( ret=ret, backend=backend_to_test, ) # Compute the return with the native frontend framework frontend_config = get_frontend_config(frontend) args_constructor_frontend = ivy.nested_map( lambda x: frontend_config.native_array(x) if isinstance(x, np.ndarray) else x, args_constructor_np, shallow=False, ) kwargs_constructor_frontend = ivy.nested_map( lambda x: frontend_config.native_array(x) if isinstance(x, np.ndarray) else x, kwargs_constructor_np, shallow=False, ) args_method_frontend = ivy.nested_map( lambda x: ( frontend_config.native_array(x) if isinstance(x, np.ndarray) else ( frontend_config.as_native_dtype(x) if isinstance(x, frontend_config.Dtype) else ( frontend_config.as_native_device(x) if isinstance(x, frontend_config.Device) else x ) ) ), args_method_np, shallow=False, ) kwargs_method_frontend = ivy.nested_map( lambda x: frontend_config.native_array(x) if isinstance(x, np.ndarray) else x, kwargs_method_np, shallow=False, ) # change ivy dtypes to native dtypes if "dtype" in kwargs_method_frontend: kwargs_method_frontend["dtype"] = frontend_config.as_native_dtype( kwargs_method_frontend["dtype"] ) # change ivy device to native devices if "device" in kwargs_method_frontend: kwargs_method_frontend["device"] = frontend_config.as_native_device( kwargs_method_frontend["device"] ) frontend_creation_fn = getattr( importlib.import_module(frontend_method_data.framework_init_module), frontend_method_data.init_name, ) ins_gt = frontend_creation_fn( *args_constructor_frontend, **kwargs_constructor_frontend ) frontend_ret = ins_gt.__getattribute__(frontend_method_data.method_name)( *args_method_frontend, **kwargs_method_frontend ) if frontend == "tensorflow" and isinstance(frontend_ret, tf.TensorShape): frontend_ret_np_flat = [np.asarray(frontend_ret, dtype=np.int32)] else: frontend_ret_np_flat = flatten_frontend_fw_to_np( frontend_ret, frontend_config.isscalar, frontend_config.is_native_array, frontend_config.to_numpy, ) # assuming value test will be handled manually in the test function if not test_values: return ret, frontend_ret # value test if isinstance(rtol_, dict): rtol_ = _get_framework_rtol(rtol_, backend_to_test) if isinstance(atol_, dict): atol_ = _get_framework_atol(atol_, backend_to_test) value_test( ret_np_flat=ret_np_flat, ret_np_from_gt_flat=frontend_ret_np_flat, rtol=rtol_, atol=atol_, specific_tolerance_dict=tolerance_dict, backend=backend_to_test, ground_truth_backend=frontend, ) # Helpers DEFAULT_RTOL = None DEFAULT_ATOL = 1e-06 def _get_framework_rtol(rtols: dict, current_fw: str): if current_fw in rtols: return rtols[current_fw] return DEFAULT_RTOL def _get_framework_atol(atols: dict, current_fw: str): if current_fw in atols: return atols[current_fw] return DEFAULT_ATOL def _get_nested_np_arrays(nest): """Search for a NumPy arrays in a nest. Parameters ---------- nest nest to search in. Returns ------- Items found, indices, and total number of arrays found """ indices = ivy.nested_argwhere(nest, lambda x: isinstance(x, np.ndarray)) ret = ivy.multi_index_nest(nest, indices) return ret, indices, len(ret) def create_args_kwargs( *, backend: str, args_np, arg_np_vals, args_idxs, kwargs_np, kwarg_np_vals, kwargs_idxs, input_dtypes, test_flags: Union[pf.FunctionTestFlags, pf.MethodTestFlags], on_device, ): """Create arguments and keyword-arguments for the function to test. Parameters ---------- args_np A dictionary of arguments in Numpy. kwargs_np A dictionary of keyword-arguments in Numpy. input_dtypes data-types of the input arguments and keyword-arguments. Returns ------- Backend specific arguments, keyword-arguments """ # create args with BackendHandler.update_backend(backend) as ivy_backend: args = ivy_backend.copy_nest(args_np, to_mutable=False) ivy_backend.set_nest_at_indices( args, args_idxs, test_flags.apply_flags( arg_np_vals, input_dtypes, 0, backend=backend, on_device=on_device, ), ) # create kwargs kwargs = ivy_backend.copy_nest(kwargs_np, to_mutable=False) ivy_backend.set_nest_at_indices( kwargs, kwargs_idxs, test_flags.apply_flags( kwarg_np_vals, input_dtypes, len(arg_np_vals), backend=backend, on_device=on_device, ), ) return args, kwargs def convtrue(argument): """Convert NativeClass in argument to true framework counter part.""" if isinstance(argument, NativeClass): return argument._native_class return argument def wrap_frontend_function_args(argument): """Wrap frontend function arguments to return native arrays.""" # TODO pass as an argument and do not rely on global state with BackendHandler.update_backend(t_globals.CURRENT_FRONTEND_STR) as ivy_frontend: if ivy_frontend.nested_any( argument, lambda x: hasattr(x, "__module__") and x.__module__.startswith("ivy.functional.frontends"), ): return ivy_frontend.output_to_native_arrays( ivy_frontend.frontend_outputs_to_ivy_arrays(argument) ) if ivy_frontend.nested_any(argument, lambda x: isinstance(x, ivy_frontend.Shape)): return argument.shape return argument def kwargs_to_args_n_kwargs(*, num_positional_args, kwargs): """Split the kwargs into args and kwargs. The first num_positional_args ported to args. """ args = [v for v in list(kwargs.values())[:num_positional_args]] kwargs = {k: kwargs[k] for k in list(kwargs.keys())[num_positional_args:]} return args, kwargs def flatten(*, backend: str, ret): """Return a flattened numpy version of the arrays in ret.""" if not isinstance(ret, tuple): ret = (ret,) with BackendHandler.update_backend(backend) as ivy_backend: ret_idxs = ivy_backend.nested_argwhere(ret, ivy_backend.is_ivy_array) # no ivy array in the returned values, which means it returned scalar if len(ret_idxs) == 0: ret_idxs = ivy_backend.nested_argwhere(ret, ivy_backend.isscalar) ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) ret_flat = [ ivy_backend.asarray( x, dtype=ivy_backend.Dtype(str(np.asarray(x).dtype)) ) for x in ret_flat ] else: ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) return ret_flat def flatten_frontend(*, ret, backend: str, frontend_array_fn=None): """Return a flattened version of the frontend arrays in ret.""" if not isinstance(ret, tuple): ret = (ret,) with BackendHandler.update_backend(backend) as ivy_backend: ret_idxs = ivy_backend.nested_argwhere(ret, _is_frontend_array) if len(ret_idxs) == 0: # handle scalars ret_idxs = ivy_backend.nested_argwhere(ret, ivy_backend.isscalar) ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) ret_flat = [frontend_array_fn(x) for x in ret_flat] else: ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) return ret_flat def flatten_frontend_fw_to_np( frontend_ret, isscalar_func, is_native_array_func, to_numpy_func ): if not isinstance(frontend_ret, tuple): frontend_ret = (frontend_ret,) frontend_ret_idxs = ivy.nested_argwhere(frontend_ret, is_native_array_func) if len(frontend_ret_idxs) == 0: # handle scalars frontend_ret_idxs = ivy.nested_argwhere(frontend_ret, isscalar_func) frontend_ret_flat = ivy.multi_index_nest(frontend_ret, frontend_ret_idxs) else: frontend_ret_flat = ivy.multi_index_nest(frontend_ret, frontend_ret_idxs) return [to_numpy_func(x) for x in frontend_ret_flat] def flatten_and_to_np(*, backend: str, ret): # flatten the return ret_flat = flatten(backend=backend, ret=ret) with BackendHandler.update_backend(backend) as ivy_backend: ret = [ivy_backend.to_numpy(x) for x in ret_flat] return ret def flatten_frontend_to_np(*, backend: str, ret): # flatten the return if not isinstance(ret, tuple): ret = (ret,) with BackendHandler.update_backend(backend) as ivy_backend: ret_idxs = ivy_backend.nested_argwhere(ret, _is_frontend_array) if len(ret_idxs) == 0: # handle scalars ret_idxs = ivy_backend.nested_argwhere(ret, ivy_backend.isscalar) ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) return [ivy_backend.to_numpy(x) for x in ret_flat] else: ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) return [ivy_backend.to_numpy(x.ivy_array) for x in ret_flat] def get_ret_and_flattened_np_array( backend_to_test: str, fn, *args, test_trace=False, precision_mode=False, **kwargs ): """Run func with args and kwargs. Return the result along with its flattened version. """ fn = traced_if_required( backend_to_test, fn, test_trace=test_trace, args=args, kwargs=kwargs ) with BackendHandler.update_backend(backend_to_test) as ivy_backend: with ivy_backend.PreciseMode(precision_mode): ret = fn(*args, **kwargs) def map_fn(x): if _is_frontend_array(x): return x.ivy_array elif ivy_backend.is_native_array(x) or isinstance(x, np.ndarray): return ivy_backend.to_ivy(x) return x ret = ivy_backend.nested_map(map_fn, ret, include_derived={"tuple": True}) return ret, flatten_and_to_np(backend=backend_to_test, ret=ret) def get_frontend_ret( backend, frontend_fn, *args, frontend_array_function=None, precision_mode=False, test_trace: bool = False, **kwargs, ): frontend_fn = traced_if_required( backend, frontend_fn, test_trace=test_trace, args=args, kwargs=kwargs ) with BackendHandler.update_backend(backend) as ivy_backend: if test_trace: args, kwargs = ivy_backend.nested_map( _frontend_array_to_ivy, (args, kwargs), include_derived={"tuple": True} ) with ivy_backend.PreciseMode(precision_mode): ret = frontend_fn(*args, **kwargs) if test_trace: assert frontend_array_function is not None ret = ivy_backend.nested_map( arrays_to_frontend(backend, frontend_array_function), ret, include_derived={"tuple": True}, ) return ret def _get_transpiled_data_if_required( frontend_fn, frontend_fw_fn, frontend, backend, fn_name, generate_frontend_arrays, args_for_test, kwargs_for_test, frontend_fw_args, frontend_fw_kwargs, ): iterations = 1 # for backend transpilation with BackendHandler.update_backend(backend) as ivy_backend: if generate_frontend_arrays: args_for_test, kwargs_for_test = ivy.nested_map( _frontend_array_to_ivy, (args_for_test, kwargs_for_test), include_derived={"tuple": True}, ) else: args_for_test, kwargs_for_test = ivy_backend.args_to_ivy( *args_for_test, **kwargs_for_test ) traced_fn = traced_if_required( backend, frontend_fn, test_trace=True, args=args_for_test, kwargs=kwargs_for_test, ) # running inference to get runtime frontend_timings = [] frontend_fw_timings = [] for i in range(0, iterations): # timing the traced_fn start = time.time() traced_fn(*args_for_test, **kwargs_for_test) end = time.time() frontend_timings.append(end - start) # timing the frontend_fw_fn start = time.time() frontend_fw_fn(*frontend_fw_args, **frontend_fw_kwargs) end = time.time() frontend_fw_timings.append(end - start) # compile to get ivy nodes with BackendHandler.update_backend(backend) as ivy_backend: traced_fn_to_ivy = ivy_backend.trace_graph( frontend_fn, to="ivy", args=args_for_test, kwargs=kwargs_for_test ) frontend_time = np.mean(frontend_timings).item() frontend_fw_time = np.mean(frontend_fw_timings).item() backend_nodes = len(traced_fn._functions) ivy_nodes = len(traced_fn_to_ivy._functions) data = { "frontend": frontend, "frontend_func": fn_name, "args": str(args_for_test), "kwargs": str(kwargs_for_test), "time": frontend_time, "fw_time": frontend_fw_time, "nodes": backend_nodes, "ivy_nodes": ivy_nodes, } # creating json object and creating a file _create_transpile_report(data, backend, "report.json") def args_to_container(array_args): array_args_container = ivy.Container({str(k): v for k, v in enumerate(array_args)}) return array_args_container def as_lists(*args): """Change the elements in args to be of type list.""" return (a if isinstance(a, list) else [a] for a in args) def gradient_incompatible_function(*, fn): return ( not ivy.supports_gradients and hasattr(fn, "computes_gradients") and fn.computes_gradients ) def gradient_unsupported_dtypes(*, fn): visited = set() to_visit = [fn] out, res = {}, {} while to_visit: fn = to_visit.pop() if fn in visited: continue visited.add(fn) unsupported_grads = ( fn.unsupported_gradients if hasattr(fn, "unsupported_gradients") else {} ) for k, v in unsupported_grads.items(): if k not in out: out[k] = [] out[k].extend(v) # skip if it's not a function if not (inspect.isfunction(fn) or inspect.ismethod(fn)): continue fl = _get_function_list(fn) res = _get_functions_from_string(fl, __import__(fn.__module__)) to_visit.extend(res) return out def _is_frontend_array(x): return hasattr(x, "ivy_array") def _frontend_array_to_ivy(x): if _is_frontend_array(x): return x.ivy_array else: return x def args_to_frontend( backend: str, *args, frontend_array_fn=None, include_derived=None, **kwargs ): with BackendHandler.update_backend(backend) as ivy_backend: frontend_args = ivy_backend.nested_map( arrays_to_frontend(backend=backend, frontend_array_fn=frontend_array_fn), args, include_derived, shallow=False, ) frontend_kwargs = ivy_backend.nested_map( arrays_to_frontend(backend=backend, frontend_array_fn=frontend_array_fn), kwargs, include_derived, shallow=False, ) return frontend_args, frontend_kwargs def arrays_to_frontend(backend: str, frontend_array_fn): with BackendHandler.update_backend(backend) as ivy_backend: def _new_fn(x): if _is_frontend_array(x): return x elif ivy_backend.is_array(x): if tuple(x.shape) == (): try: ret = frontend_array_fn( x, dtype=ivy_backend.Dtype(str(x.dtype)) ) except ivy_backend.utils.exceptions.IvyException: ret = frontend_array_fn(x, dtype=ivy_backend.array(x).dtype) else: ret = frontend_array_fn(x) return ret return x return _new_fn def _switch_backend_context(trace: bool): if trace: BackendHandler._update_context(BackendHandlerMode.SetBackend) else: ( BackendHandler._update_context(BackendHandlerMode.WithBackend) if BackendHandler._ctx_flag else None )
ivy/ivy_tests/test_ivy/helpers/function_testing.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/helpers/function_testing.py", "repo_id": "ivy", "token_count": 47406 }
51
from .base import FrontendConfigWithBackend def get_config(): return NumpyFrontendConfig() class NumpyFrontendConfig(FrontendConfigWithBackend): backend_str = "numpy"
ivy/ivy_tests/test_ivy/test_frontends/config/numpy.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/numpy.py", "repo_id": "ivy", "token_count": 57 }
52
# global from hypothesis import given # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy.functional.frontends.jax.func_wrapper import ( inputs_to_ivy_arrays, outputs_to_frontend_arrays, to_ivy_arrays_and_back, ) from ivy.functional.frontends.jax.array import Array import ivy.functional.frontends.jax as jax_frontend # --- Helpers --- # # --------------- # def _fn(x, check_default=False): if check_default and jax_frontend.config.jax_enable_x64: ivy.utils.assertions.check_equal( ivy.default_float_dtype(), "float64", as_array=False ) ivy.utils.assertions.check_equal( ivy.default_int_dtype(), "int64", as_array=False ) return x # --- Main --- # # ------------ # @given( dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_jax_inputs_to_ivy_arrays(dtype_and_x, backend_fw): ivy.set_backend(backend_fw) x_dtype, x = dtype_and_x # check for ivy array input_ivy = ivy.array(x[0], dtype=x_dtype[0]) output = inputs_to_ivy_arrays(_fn)(input_ivy) assert isinstance(output, ivy.Array) assert input_ivy.dtype == output.dtype assert ivy.all(input_ivy == output) # check for native array input_native = ivy.native_array(input_ivy) output = inputs_to_ivy_arrays(_fn)(input_native) assert isinstance(output, ivy.Array) assert ivy.as_ivy_dtype(input_native.dtype) == output.dtype assert ivy.all(ivy.equal(input_native, output.data)) # check for frontend array input_frontend = Array(x[0]) output = inputs_to_ivy_arrays(_fn)(input_frontend) assert isinstance(output, ivy.Array) assert input_frontend.dtype == output.dtype assert ivy.all(input_frontend.ivy_array == output) ivy.previous_backend() @given( dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_jax_outputs_to_frontend_arrays(dtype_and_x, backend_fw): ivy.set_backend(backend_fw) x_dtype, x = dtype_and_x # check for ivy array input_ivy = ivy.array(x[0], dtype=x_dtype[0]) output = outputs_to_frontend_arrays(_fn)(input_ivy, check_default=True) assert isinstance(output, Array) assert input_ivy.dtype == output.dtype assert ivy.all(input_ivy == output.ivy_array) assert ivy.default_float_dtype_stack == ivy.default_int_dtype_stack == [] ivy.previous_backend() @given( dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_jax_to_ivy_arrays_and_back(dtype_and_x, backend_fw): ivy.set_backend(backend_fw) x_dtype, x = dtype_and_x # check for ivy array input_ivy = ivy.array(x[0], dtype=x_dtype[0]) output = to_ivy_arrays_and_back(_fn)(input_ivy, check_default=True) assert isinstance(output, Array) assert input_ivy.dtype == output.dtype assert ivy.all(input_ivy == output.ivy_array) # check for native array input_native = ivy.native_array(input_ivy) output = to_ivy_arrays_and_back(_fn)(input_native, check_default=True) assert isinstance(output, Array) assert ivy.as_ivy_dtype(input_native.dtype) == output.dtype assert ivy.all(ivy.equal(input_native, output.ivy_array.data)) # check for frontend array input_frontend = Array(x[0]) output = to_ivy_arrays_and_back(_fn)(input_frontend, check_default=True) assert isinstance(output, Array) assert str(input_frontend.dtype) == str(output.dtype) assert ivy.all(input_frontend.ivy_array == output.ivy_array) assert ivy.default_float_dtype_stack == ivy.default_int_dtype_stack == [] ivy.previous_backend()
ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_func_wrapper.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_func_wrapper.py", "repo_id": "ivy", "token_count": 1610 }
53
# global from hypothesis import strategies as st import numpy as np # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_helpers # --- Helpers --- # # --------------- # @st.composite def _func_and_shape_dtype_helper(draw): # here assumption is that the input func will take the len(shape) no of parameters def add_numbers(*args): total = 0 for num in args: total += num return total shape = draw( helpers.get_shape( allow_none=False, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, ) ) dtype = draw(helpers.get_dtypes("valid")) return add_numbers, shape, dtype[0] # isin @st.composite def _isin_data_generation_helper(draw): dtype_and_x = helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ) return draw(dtype_and_x) # --- Main --- # # ------------ # # all @handle_frontend_test( fn_tree="jax.numpy.all", # aliases=["jax.numpy.alltrue"], deprecated since 0.4.12. # uncomment with multi-version testing pipeline dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), ), test_with_out=st.just(False), ) def test_jax_all( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], ) # allclose @handle_frontend_test( fn_tree="jax.numpy.allclose", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), equal_nan=st.booleans(), test_with_out=st.just(False), ) def test_jax_allclose( *, dtype_and_input, equal_nan, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, input = dtype_and_input helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=input[0], b=input[1], equal_nan=equal_nan, ) # any @handle_frontend_test( fn_tree="jax.numpy.any", # aliases=["jax.numpy.sometrue"], deprecated since 0.4.12. # uncomment with multi-version testing pipeline dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), valid_axis=True, max_axes_size=1, force_int_axis=True, ), keepdims=st.booleans(), where=np_helpers.where(), test_with_out=st.just(False), ) def test_jax_any( *, dtype_x_axis, keepdims, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, axis = dtype_x_axis if isinstance(axis, tuple): axis = axis[0] where, input_dtypes, test_flags = np_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], axis=axis, out=None, keepdims=keepdims, where=where, ) @handle_frontend_test( fn_tree="jax.numpy.array_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, min_value=-np.inf, max_value=np.inf, shared_dtype=True, ), equal_nan=st.booleans(), test_with_out=st.just(False), ) def test_jax_array_equal( *, dtype_and_x, equal_nan, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a1=x[0], a2=x[1], equal_nan=equal_nan, ) @handle_frontend_test( fn_tree="jax.numpy.array_equiv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_array_equiv( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a1=x[0], a2=x[1], ) # bitwise_and # TODO: add testing for other dtypes @handle_frontend_test( fn_tree="jax.numpy.bitwise_and", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=2 ), test_with_out=st.just(False), ) def test_jax_bitwise_and( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], ) # bitwise_not @handle_frontend_test( fn_tree="jax.numpy.bitwise_not", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("bool")), test_with_out=st.just(False), ) def test_jax_bitwise_not( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # bitwise_or # TODO: add testing for other dtypes @handle_frontend_test( fn_tree="jax.numpy.bitwise_or", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=2 ), test_with_out=st.just(False), ) def test_jax_bitwise_or( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], ) # bitwise_xor # TODO: add testing for other dtypes @handle_frontend_test( fn_tree="jax.numpy.bitwise_xor", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=2 ), test_with_out=st.just(False), ) def test_jax_bitwise_xor( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], ) # equal @handle_frontend_test( fn_tree="jax.numpy.equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x1=x[0], x2=x[1], ) # fromfunction @handle_frontend_test( fn_tree="jax.numpy.fromfunction", input_dtype=helpers.get_dtypes("valid"), function_and_shape_and_dtype=_func_and_shape_dtype_helper(), test_with_out=st.just(False), ) def test_jax_fromfunction( input_dtype, function_and_shape_and_dtype, backend_fw, frontend, on_device, fn_tree, test_flags, ): function, shape, dtype = function_and_shape_and_dtype helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, function=function, shape=shape, dtype=dtype, ) # greater @handle_frontend_test( fn_tree="jax.numpy.greater", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_greater( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x1=x[0], x2=x[1], ) # greater_equal @handle_frontend_test( fn_tree="jax.numpy.greater_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_greater_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x1=x[0], x2=x[1], ) # invert @handle_frontend_test( fn_tree="jax.numpy.invert", dtypes_values=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), ), ) def test_jax_invert( dtypes_values, on_device, fn_tree, frontend, test_flags, backend_fw, ): x_dtypes, x = dtypes_values np_helpers.test_frontend_function( input_dtypes=x_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isclose @handle_frontend_test( fn_tree="jax.numpy.isclose", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), equal_nan=st.booleans(), test_with_out=st.just(False), ) def test_jax_isclose( *, dtype_and_input, equal_nan, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, input = dtype_and_input helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=input[0], b=input[1], equal_nan=equal_nan, ) # iscomplex @handle_frontend_test( fn_tree="jax.numpy.iscomplex", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("real_and_complex"), min_num_dims=1 ), test_with_out=st.just(False), ) def test_jax_iscomplex( dtype_and_x, frontend, on_device, *, fn_tree, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # iscomplexobj @handle_frontend_test( fn_tree="jax.numpy.iscomplexobj", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_with_out=st.just(False), ) def test_jax_iscomplexobj( dtype_and_x, frontend, on_device, *, fn_tree, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isfinite @handle_frontend_test( fn_tree="jax.numpy.isfinite", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), allow_nan=True ), test_with_out=st.just(False), ) def test_jax_isfinite( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) @handle_frontend_test( fn_tree="jax.numpy.isin", assume_unique_and_dtype_and_x=_isin_data_generation_helper(), invert=st.booleans(), test_with_out=st.just(False), ) def test_jax_isin( *, assume_unique_and_dtype_and_x, invert, on_device, fn_tree, frontend, test_flags, backend_fw, ): x_and_dtype = assume_unique_and_dtype_and_x dtypes, values = x_and_dtype elements, test_elements = values helpers.test_frontend_function( input_dtypes=dtypes, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, element=elements, test_elements=test_elements, invert=invert, backend_to_test=backend_fw, ) # isinf @handle_frontend_test( fn_tree="jax.numpy.isinf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), allow_inf=True ), test_with_out=st.just(False), ) def test_jax_isinf( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isnan @handle_frontend_test( fn_tree="jax.numpy.isnan", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, allow_inf=True, ), ) def test_jax_isnan( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isneginf @handle_frontend_test( fn_tree="jax.numpy.isneginf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, allow_inf=True, ), test_with_out=st.just(False), ) def test_jax_isneginf( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isposinf @handle_frontend_test( fn_tree="jax.numpy.isposinf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, allow_inf=True, ), ) def test_jax_isposinf( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isreal @handle_frontend_test( fn_tree="jax.numpy.isreal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, allow_inf=True, ), ) def test_jax_isreal( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) @handle_frontend_test( fn_tree="jax.numpy.isrealobj", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1 ), test_with_out=st.just(False), ) def test_jax_isrealobj( dtype_and_x, frontend, on_device, *, fn_tree, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # isscalar @handle_frontend_test( fn_tree="jax.numpy.isscalar", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric") ), ) def test_jax_isscalar( *, dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): x_dtypes, x = dtype_and_x np_helpers.test_frontend_function( input_dtypes=x_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # left_shift @handle_frontend_test( fn_tree="jax.numpy.left_shift", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, ), ) def test_jax_left_shift( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], ) # less @handle_frontend_test( fn_tree="jax.numpy.less", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_less( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x1=x[0], x2=x[1], ) # less_equal @handle_frontend_test( fn_tree="jax.numpy.less_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_less_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x1=x[0], x2=x[1], ) # logical_and @handle_frontend_test( fn_tree="jax.numpy.logical_and", dtypes_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=2, ), ) def test_jax_logical_and( dtypes_values, on_device, fn_tree, frontend, test_flags, backend_fw, ): x_dtypes, x = dtypes_values np_helpers.test_frontend_function( input_dtypes=x_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], ) # logical_not @handle_frontend_test( fn_tree="jax.numpy.logical_not", dtypes_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=1, ), ) def test_jax_logical_not( dtypes_values, on_device, fn_tree, frontend, test_flags, backend_fw, ): x_dtypes, x = dtypes_values np_helpers.test_frontend_function( input_dtypes=x_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # logical_or @handle_frontend_test( fn_tree="jax.numpy.logical_or", dtypes_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=2, ), ) def test_jax_logical_or( dtypes_values, on_device, fn_tree, frontend, test_flags, backend_fw, ): x_dtypes, x = dtypes_values np_helpers.test_frontend_function( input_dtypes=x_dtypes, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], backend_to_test=backend_fw, ) # logical_xor @handle_frontend_test( fn_tree="jax.numpy.logical_xor", dtypes_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("bool"), num_arrays=2, ), ) def test_jax_logical_xor( dtypes_values, on_device, fn_tree, frontend, test_flags, backend_fw, ): x_dtypes, x = dtypes_values np_helpers.test_frontend_function( input_dtypes=x_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], ) # not_equal @handle_frontend_test( fn_tree="jax.numpy.not_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_not_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x1=x[0], x2=x[1], ) # packbits @handle_frontend_test( fn_tree="jax.numpy.packbits", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("integer"), min_num_dims=1, min_dim_size=1, valid_axis=True, max_axes_size=1, force_int_axis=True, ), test_with_out=st.just(False), bitorder=st.sampled_from(["big", "little"]), ) def test_jax_packbits( dtype_x_axis, bitorder, frontend, on_device, *, fn_tree, test_flags, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], axis=axis, bitorder=bitorder, backend_to_test=backend_fw, ) @handle_frontend_test( fn_tree="jax.numpy.right_shift", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_jax_right_shift( *, dtype_and_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): dtype, xs = dtype_and_x xs[1] = np.asarray(np.clip(xs[1], 0, np.iinfo(dtype[1]).bits - 1), dtype=dtype[1]) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=xs[0], x2=xs[1], ) # setxor1d @handle_frontend_test( fn_tree="jax.numpy.setxor1d", dtypes_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True ), assume_unique=st.booleans(), test_with_out=st.just(False), ) def test_jax_setxor1d( dtypes_values, on_device, fn_tree, frontend, test_flags, assume_unique, backend_fw, ): x_dtypes, x = dtypes_values helpers.test_frontend_function( input_dtypes=x_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, ar1=x[0], ar2=x[1], assume_unique=assume_unique, )
ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_logic.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_logic.py", "repo_id": "ivy", "token_count": 13851 }
54
# global from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers from ivy_tests.test_ivy.helpers import handle_frontend_test @handle_frontend_test( fn_tree="numpy.array_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True ), equal_nan=st.booleans(), ) def test_numpy_array_equal( *, dtype_and_x, equal_nan, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a1=x[0], a2=x[1], equal_nan=equal_nan, ) @handle_frontend_test( fn_tree="numpy.array_equiv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True ), test_with_out=st.just(False), ) def test_numpy_array_equiv( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a1=x[0], a2=x[1], ) # equal @handle_frontend_test( fn_tree="numpy.equal", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="equal" ), ) def test_numpy_equal( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], out=None, where=where, casting=casting, order="K", dtype=None, subok=True, ) @handle_frontend_test( fn_tree="numpy.greater", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="greater" ), ) def test_numpy_greater( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], out=None, where=where, casting=casting, order="K", dtype=None, subok=True, ) @handle_frontend_test( fn_tree="numpy.greater_equal", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="greater_equal" ), ) def test_numpy_greater_equal( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], out=None, where=where, casting=casting, order="K", dtype=None, subok=True, ) @handle_frontend_test( fn_tree="numpy.less", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="less" ), ) def test_numpy_less( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], out=None, where=where, casting=casting, order="K", dtype=None, subok=True, ) @handle_frontend_test( fn_tree="numpy.less_equal", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="less_equal" ), ) def test_numpy_less_equal( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], out=None, where=where, casting=casting, order="K", dtype=None, subok=True, ) @handle_frontend_test( fn_tree="numpy.not_equal", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="not_equal" ), ) def test_numpy_not_equal( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x1=x[0], x2=x[1], out=None, where=where, casting=casting, order="K", dtype=None, subok=True, )
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_comparison.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_comparison.py", "repo_id": "ivy", "token_count": 4769 }
55
# global from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers # --- Helpers --- # # --------------- # @st.composite def _arrays_idx_n_dtypes(draw): num_arrays = draw( st.shared(helpers.ints(min_value=2, max_value=4), key="num_arrays") ) shape = draw( helpers.get_shape( min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10 ) ) input_dtypes, x, casting, dtype = draw( np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=shape, num_arrays=num_arrays, shared_dtype=True, ) ], ), ) axis = draw(helpers.get_axis(shape=shape, force_int=True)) return x, input_dtypes, axis, casting, dtype # --- Main --- # # ------------ # # column_stack @handle_frontend_test( fn_tree="numpy.column_stack", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shared_dtype=True, num_arrays=helpers.ints(min_value=2, max_value=10), shape=helpers.get_shape( min_num_dims=1, ), ), ) def test_numpy_column_stack( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, xs = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, tup=xs, ) # concat @handle_frontend_test( fn_tree="numpy.concatenate", xs_n_input_dtypes_n_unique_idx=_arrays_idx_n_dtypes(), ) def test_numpy_concatenate( xs_n_input_dtypes_n_unique_idx, frontend, test_flags, fn_tree, backend_fw, on_device, ): xs, input_dtypes, unique_idx, casting, dtype = xs_n_input_dtypes_n_unique_idx helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-01, atol=1e-01, arrays=xs, axis=unique_idx, casting=casting, dtype=dtype, out=None, ) # hstack @handle_frontend_test( fn_tree="numpy.hstack", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shared_dtype=True, num_arrays=helpers.ints(min_value=2, max_value=10), shape=helpers.get_shape( min_num_dims=1, ), ), ) def test_numpy_hstack( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, xs = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, tup=xs, ) # stack @handle_frontend_test( fn_tree="numpy.stack", dtype_and_x=_arrays_idx_n_dtypes(), ) def test_numpy_stack( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): xs, input_dtypes, unique_idx, _, _ = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, arrays=xs, axis=unique_idx, ) # vstack @handle_frontend_test( fn_tree="numpy.vstack", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shared_dtype=True, num_arrays=helpers.ints(min_value=2, max_value=10), shape=helpers.get_shape( min_num_dims=1, ), ), ) def test_numpy_vstack( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, xs = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, tup=xs, )
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_joining_arrays.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_joining_arrays.py", "repo_id": "ivy", "token_count": 2377 }
56
# global from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers from ivy_tests.test_ivy.helpers import handle_frontend_test # around @handle_frontend_test( fn_tree="numpy.around", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), decimals=st.integers(min_value=0, max_value=5), ) def test_numpy_around( *, dtype_and_x, decimals, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], decimals=decimals, ) # ceil @handle_frontend_test( fn_tree="numpy.ceil", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="ceil" ), ) def test_numpy_ceil( dtypes_values_casting, where, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, ) # fix @handle_frontend_test( fn_tree="numpy.fix", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_numpy_fix( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, a=x[0], ) # floor @handle_frontend_test( fn_tree="numpy.floor", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="floor" ), ) def test_numpy_floor( dtypes_values_casting, where, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, ) # rint @handle_frontend_test( fn_tree="numpy.rint", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="rint" ), ) def test_numpy_rint( dtypes_values_casting, where, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, x, casting, dtype = dtypes_values_casting where, input_dtype, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtype, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, ) # round @handle_frontend_test( fn_tree="numpy.round", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), max_value=50, min_value=-50, ), decimals=st.integers(min_value=0, max_value=3), ) def test_numpy_round( *, dtype_and_x, decimals, on_device, backend_fw, fn_tree, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], decimals=decimals, ) # trunc @handle_frontend_test( fn_tree="numpy.trunc", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ) ], ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="trunc" ), ) def test_numpy_trunc( dtypes_values_casting, where, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, )
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_rounding.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_mathematical_functions/test_rounding.py", "repo_id": "ivy", "token_count": 3510 }
57
# global from hypothesis import strategies as st, assume import numpy as np # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.test_functional.test_core.test_statistical import ( _statistical_dtype_values, ) import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers from ivy_tests.test_ivy.helpers import handle_frontend_test # --- Helpers --- # # --------------- # @st.composite def _get_dtype_value1_value2_cov( draw, available_dtypes, min_num_dims, max_num_dims, min_dim_size, max_dim_size, abs_smallest_val=None, min_value=None, max_value=None, allow_inf=False, exclude_min=False, exclude_max=False, large_abs_safety_factor=4, small_abs_safety_factor=4, safety_factor_scale="log", ): shape = draw( helpers.get_shape( allow_none=False, min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ) ) dtype = draw(st.sampled_from(draw(available_dtypes))) values = [] for i in range(2): values.append( draw( helpers.array_values( dtype=dtype, shape=shape, abs_smallest_val=abs_smallest_val, min_value=min_value, max_value=max_value, allow_inf=allow_inf, exclude_min=exclude_min, exclude_max=exclude_max, large_abs_safety_factor=large_abs_safety_factor, small_abs_safety_factor=small_abs_safety_factor, safety_factor_scale=safety_factor_scale, ) ) ) value1, value2 = values[0], values[1] # modifiers: rowVar, bias, ddof rowVar = draw(st.booleans()) bias = draw(st.booleans()) ddof = draw(helpers.ints(min_value=0, max_value=1)) numVals = None if rowVar is False: numVals = -1 if numVals == 0 else 0 else: numVals = 0 if len(shape) == 1 else -1 fweights = draw( helpers.array_values( dtype="int64", shape=shape[numVals], abs_smallest_val=1, min_value=1, max_value=10, allow_inf=False, ) ) aweights = draw( helpers.array_values( dtype="float64", shape=shape[numVals], abs_smallest_val=1, min_value=1, max_value=10, allow_inf=False, small_abs_safety_factor=1, ) ) return [dtype], value1, value2, rowVar, bias, ddof, fweights, aweights # --- Main --- # # ------------ # # average @handle_frontend_test( fn_tree="numpy.average", dtype_and_a=_statistical_dtype_values(function="average"), dtype_and_x=_statistical_dtype_values(function="average"), keep_dims=st.booleans(), returned=st.booleans(), test_with_out=st.just(False), ) def test_numpy_average( dtype_and_a, dtype_and_x, frontend, test_flags, fn_tree, backend_fw, keep_dims, returned, on_device, ): try: input_dtype, a, axis = dtype_and_a input_dtypes, xs, axiss = dtype_and_x if isinstance(axis, tuple): axis = axis[0] helpers.test_frontend_function( a=a[0], input_dtypes=input_dtype, backend_to_test=backend_fw, weights=xs[0], axis=axis, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, keepdims=keep_dims, returned=returned, on_device=on_device, rtol=1e-2, atol=1e-2, ) except ZeroDivisionError: assume(False) except AssertionError: assume(False) # cov @handle_frontend_test( fn_tree="numpy.cov", dtype_x1_x2_cov=_get_dtype_value1_value2_cov( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_num_dims=2, min_dim_size=2, max_dim_size=5, min_value=1, max_value=1e10, abs_smallest_val=0.01, large_abs_safety_factor=2, safety_factor_scale="log", ), test_with_out=st.just(False), ) def test_numpy_cov( dtype_x1_x2_cov, test_flags, frontend, fn_tree, backend_fw, on_device, ): dtype, x1, x2, rowvar, bias, ddof, fweights, aweights = dtype_x1_x2_cov np_frontend_helpers.test_frontend_function( input_dtypes=[dtype[0], dtype[0], "int64", "float64"], frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, m=x1, y=x2, rowvar=rowvar, bias=bias, ddof=ddof, fweights=fweights, aweights=aweights, ) # mean @handle_frontend_test( fn_tree="numpy.mean", dtype_and_x=_statistical_dtype_values(function="mean"), dtype=helpers.get_dtypes("float", full=False, none=True), where=np_frontend_helpers.where(), keep_dims=st.booleans(), ) def test_numpy_mean( dtype_and_x, dtype, where, frontend, backend_fw, test_flags, fn_tree, on_device, keep_dims, ): input_dtypes, x, axis = dtype_and_x where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], axis=axis, dtype=dtype[0], out=None, keepdims=keep_dims, where=where, ) # nanmean @handle_frontend_test( fn_tree="numpy.nanmean", dtype_and_a=_statistical_dtype_values(function="mean"), dtype=helpers.get_dtypes("float", full=False, none=True), where=np_frontend_helpers.where(), keep_dims=st.booleans(), ) def test_numpy_nanmean( dtype_and_a, dtype, where, frontend, backend_fw, test_flags, fn_tree, on_device, keep_dims, ): input_dtypes, a, axis = dtype_and_a if isinstance(axis, tuple): axis = axis[0] where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, a=a[0], axis=axis, dtype=dtype[0], out=None, keepdims=keep_dims, where=where, ) @handle_frontend_test( fn_tree="numpy.nanmedian", keep_dims=st.booleans(), overwrite_input=st.booleans(), dtype_x_axis=_statistical_dtype_values(function="nanmedian"), ) def test_numpy_nanmedian( dtype_x_axis, frontend, test_flags, fn_tree, backend_fw, on_device, keep_dims, overwrite_input, ): input_dtypes, x, axis = dtype_x_axis np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], axis=axis, overwrite_input=overwrite_input, out=None, keepdims=keep_dims, ) # nanstd @handle_frontend_test( fn_tree="numpy.nanstd", dtype_and_a=_statistical_dtype_values(function="nanstd"), dtype=helpers.get_dtypes("float", full=False, none=True), where=np_frontend_helpers.where(), keep_dims=st.booleans(), ) def test_numpy_nanstd( dtype_and_a, dtype, where, frontend, backend_fw, test_flags, fn_tree, on_device, keep_dims, ): input_dtypes, a, axis, correction = dtype_and_a if isinstance(axis, tuple): axis = axis[0] where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) assume(np.dtype(dtype[0]) >= np.dtype(input_dtypes[0])) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=a[0], axis=axis, dtype=dtype[0], out=None, ddof=correction, keepdims=keep_dims, where=where, atol=1e-2, rtol=1e-2, ) # nanvar @handle_frontend_test( fn_tree="numpy.nanvar", dtype_x_axis=_statistical_dtype_values(function="nanvar"), dtype=helpers.get_dtypes("float", full=False, none=True), where=np_frontend_helpers.where(), keep_dims=st.booleans(), ) def test_numpy_nanvar( dtype_x_axis, dtype, where, frontend, test_flags, backend_fw, fn_tree, on_device, keep_dims, ): input_dtypes, x, axis, ddof = dtype_x_axis if isinstance(axis, tuple): axis = axis[0] where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, atol=1e-1, rtol=1e-1, a=x[0], axis=axis, dtype=dtype[0], out=None, ddof=ddof, keepdims=keep_dims, where=where, ) # std @handle_frontend_test( fn_tree="numpy.std", dtype_and_x=_statistical_dtype_values(function="std"), dtype=helpers.get_dtypes("float", full=False, none=True), where=np_frontend_helpers.where(), keep_dims=st.booleans(), ) def test_numpy_std( dtype_and_x, dtype, where, frontend, backend_fw, test_flags, fn_tree, on_device, keep_dims, ): input_dtypes, x, axis, correction = dtype_and_x if isinstance(axis, tuple): axis = axis[0] where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) assume(np.dtype(dtype[0]) >= np.dtype(input_dtypes[0])) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-1, atol=1e-1, x=x[0], axis=axis, ddof=correction, keepdims=keep_dims, out=None, dtype=dtype[0], where=where, ) @handle_frontend_test( fn_tree="numpy.var", dtype_and_x=_statistical_dtype_values(function="var"), dtype=helpers.get_dtypes("float", full=False, none=True), where=np_frontend_helpers.where(), keep_dims=st.booleans(), ) def test_numpy_var( dtype_and_x, dtype, where, frontend, backend_fw, test_flags, fn_tree, on_device, keep_dims, ): input_dtypes, x, axis, correction = dtype_and_x if isinstance(axis, tuple): axis = axis[0] where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) assume(np.dtype(dtype[0]) >= np.dtype(input_dtypes[0])) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-1, atol=1e-1, x=x[0], axis=axis, ddof=correction, keepdims=keep_dims, out=None, dtype=dtype[0], where=where, )
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_averages_and_variances.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_statistics/test_averages_and_variances.py", "repo_id": "ivy", "token_count": 6532 }
58
# global import numpy as np from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test # allclose @handle_frontend_test( fn_tree="paddle.allclose", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), equal_nan=st.booleans(), ) def test_paddle_allclose( *, dtype_and_x, equal_nan, on_device, fn_tree, frontend, backend_fw, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, backend_to_test=backend_fw, x=x[0], y=x[1], equal_nan=equal_nan, ) @handle_frontend_test( fn_tree="paddle.bitwise_and", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(True), ) def test_paddle_bitwise_and( *, dtype_and_x, on_device, fn_tree, backend_fw, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # bitwise_not @handle_frontend_test( fn_tree="paddle.bitwise_not", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_with_out=st.just(True), ) def test_paddle_bitwise_not( *, dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # bitwise_or @handle_frontend_test( fn_tree="paddle.bitwise_or", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True ), test_with_out=st.just(True), ) def test_paddle_bitwise_or( *, dtype_and_x, on_device, fn_tree, backend_fw, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # bitwise_xor @handle_frontend_test( fn_tree="paddle.bitwise_xor", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), test_with_out=st.just(True), ) def test_paddle_bitwise_xor( *, dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # Tests # # ----- # # equal @handle_frontend_test( fn_tree="paddle.equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], y=x[1], ) @handle_frontend_test( fn_tree="paddle.equal_all", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-np.inf, max_value=np.inf, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_equal_all( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # greater_equal @handle_frontend_test( fn_tree="paddle.greater_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_greater_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], y=x[1], ) # greater_than @handle_frontend_test( fn_tree="paddle.greater_than", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_greater_than( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], y=x[1], ) @handle_frontend_test( fn_tree="paddle.is_empty", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_is_empty( dtype_and_x, frontend, test_flags, backend_fw, fn_tree, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, x=x[0], ) # is_tensor @handle_frontend_test( fn_tree="paddle.is_tensor", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), ) def test_paddle_is_tensor( *, dtype_and_x, on_device, fn_tree, backend_fw, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, on_device=on_device, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], ) # isclose @handle_frontend_test( fn_tree="paddle.isclose", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), equal_nan=st.booleans(), ) def test_paddle_isclose( *, dtype_and_x, equal_nan, on_device, backend_fw, fn_tree, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], equal_nan=equal_nan, ) # less_equal @handle_frontend_test( fn_tree="paddle.less_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_less_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], y=x[1], ) # less_than @handle_frontend_test( fn_tree="paddle.less_than", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_less_than( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], y=x[1], ) # logical_and @handle_frontend_test( fn_tree="paddle.logical_and", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), ) def test_paddle_logical_and( *, dtype_and_x, on_device, backend_fw, fn_tree, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) @handle_frontend_test( fn_tree="paddle.logical_not", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), test_with_out=st.just(True), ) def test_paddle_logical_not( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # logical_or @handle_frontend_test( fn_tree="paddle.logical_or", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True ), test_with_out=st.just(True), ) def test_paddle_logical_or( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # logical_xor @handle_frontend_test( fn_tree="paddle.logical_xor", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True ), test_with_out=st.just(True), ) def test_paddle_logical_xor( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # not_equal @handle_frontend_test( fn_tree="paddle.not_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, safety_factor_scale="log", small_abs_safety_factor=32, ), ) def test_paddle_not_equal( dtype_and_x, frontend, test_flags, fn_tree, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x[0], y=x[1], )
ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_logic.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_logic.py", "repo_id": "ivy", "token_count": 6625 }
59
# global from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test # --- Helpers --- # # --------------- # @st.composite def _valid_dct(draw): dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), max_value=65280, min_value=-65280, min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10, shared_dtype=True, ) ) dims_len = len(x[0].shape) n = draw(st.sampled_from([None, "int"])) axis = draw(helpers.ints(min_value=-dims_len, max_value=dims_len)) norm = draw(st.sampled_from([None, "ortho"])) type = draw(helpers.ints(min_value=1, max_value=4)) if n == "int": n = draw(helpers.ints(min_value=1, max_value=20)) if n <= 1 and type == 1: n = 2 if norm == "ortho" and type == 1: norm = None return dtype, x, type, n, axis, norm @st.composite def _valid_idct(draw): dtype, x = draw( helpers.dtype_and_values( available_dtypes=["float32", "float64"], max_value=65280, min_value=-65280, min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10, shared_dtype=True, ) ) n = None axis = -1 norm = draw(st.sampled_from([None, "ortho"])) type = draw(helpers.ints(min_value=1, max_value=4)) if norm == "ortho" and type == 1: norm = None return dtype, x, type, n, axis, norm # Helpers @st.composite def _x_and_fft(draw, dtypes): min_fft_points = 2 dtype = draw(dtypes) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), ) ) dim = draw( helpers.get_axis(shape=x_dim, allow_neg=True, allow_none=False, max_size=1) ) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) n = draw(st.integers(min_fft_points, 256)) return dtype, x, dim, norm, n @st.composite def _x_and_fft2(draw): min_fft2_points = 2 dtype = draw(helpers.get_dtypes("float_and_complex", full=False)) x, dim = draw( helpers.arrays_and_axes( available_dtypes=dtype[0], min_dim_size=2, max_dim_size=100, min_num_dims=2, max_num_dims=4, ), ) s = ( draw(st.integers(min_fft2_points, 256)), draw(st.integers(min_fft2_points, 256)), ) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) return dtype, x, s, dim, norm @st.composite def _x_and_ifft(draw): min_fft_points = 2 dtype = draw(helpers.get_dtypes("complex")) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e-10, max_value=1e10, ) ) dim = draw(st.integers(1 - len(list(x_dim)), len(list(x_dim)) - 1)) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) n = draw(st.integers(min_fft_points, 256)) return dtype, x, dim, norm, n @st.composite def _x_and_ifftn(draw): _x_and_ifftn = draw(_x_and_fft2()) workers = draw(st.integers(1, 4)) return _x_and_ifftn + (workers,) @st.composite def _x_and_rfftn(draw): min_rfftn_points = 2 dtype = draw(helpers.get_dtypes("float")) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=3 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e10, max_value=1e10, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ) ) axes = draw( st.lists( st.integers(0, len(x_dim) - 1), min_size=1, max_size=len(x_dim), unique=True ) ) s = draw( st.lists( st.integers(min_rfftn_points, 256), min_size=len(axes), max_size=len(axes) ) ) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) return dtype, x, s, axes, norm # --- Main --- # # ------------ # # dct @handle_frontend_test( fn_tree="scipy.fft.dct", dtype_x_and_args=_valid_dct(), test_with_out=st.just(False), ) def test_scipy_dct( dtype_x_and_args, frontend, test_flags, fn_tree, on_device, backend_fw, ): input_dtype, x, _type, n, axis, norm = dtype_x_and_args helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], type=_type, n=n, axis=axis, norm=norm, rtol_=1e-3, atol_=1e-1, ) # Tests # fft @handle_frontend_test( fn_tree="scipy.fft.fft", d_x_d_n_n=_x_and_fft(helpers.get_dtypes("complex")), test_with_out=st.just(False), ) def test_scipy_fft( d_x_d_n_n, frontend, test_flags, fn_tree, on_device, backend_fw, ): dtype, x, dim, norm, n = d_x_d_n_n helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x, dim=dim, norm=norm, n=n, ) # fft2 @handle_frontend_test( fn_tree="scipy.fft.fft2", d_x_d_s_n=_x_and_fft2(), test_with_out=st.just(False), ) def test_scipy_fft2( d_x_d_s_n, frontend, test_flags, fn_tree, on_device, backend_fw, ): dtype, x, s, ax, norm = d_x_d_s_n helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], s=s, axes=ax, norm=norm, ) # idct @handle_frontend_test( fn_tree="scipy.fft.idct", dtype_x_and_args=_valid_idct(), test_with_out=st.just(False), ) def test_scipy_idct( dtype_x_and_args, frontend, test_flags, fn_tree, on_device, backend_fw, ): input_dtype, x, _type, n, axis, norm = dtype_x_and_args helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], type=_type, n=n, axis=axis, norm=norm, rtol_=1e-3, atol_=1e-1, ) # ifft @handle_frontend_test( fn_tree="scipy.fft.ifft", d_x_d_n_n=_x_and_ifft(), test_with_out=st.just(False), ) def test_scipy_ifft( d_x_d_n_n, frontend, test_flags, fn_tree, on_device, backend_fw, ): dtype, x, dim, norm, n = d_x_d_n_n helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x, axis=dim, norm=norm, n=n, ) # ifftn @handle_frontend_test( fn_tree="scipy.fft.ifftn", d_x_d_s_n_workers=_x_and_ifftn(), test_with_out=st.just(False), ) def test_scipy_ifftn( d_x_d_s_n_workers, frontend, test_flags, fn_tree, on_device, backend_fw, ): dtype, x, s, ax, norm, workers = d_x_d_s_n_workers helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], s=s, axes=ax, norm=norm, workers=workers, ) # rfftn @handle_frontend_test( fn_tree="scipy.fft.rfftn", dtype_and_x=_x_and_rfftn(), ) def test_scipy_rfftn(dtype_and_x, frontend, backend_fw, test_flags, fn_tree, on_device): dtype, x, s, axes, norm = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=True, x=x, s=s, axes=axes, norm=norm, )
ivy/ivy_tests/test_ivy/test_frontends/test_scipy/test_fft/test_fft.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_scipy/test_fft/test_fft.py", "repo_id": "ivy", "token_count": 4847 }
60
from hypothesis import strategies as st from types import SimpleNamespace import sys # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test from typing import Optional try: import tensorflow as tf except ImportError: tf = SimpleNamespace() # Helper function for deserialize. def get_callable_functions( module_name: str, ): module = sys.modules[module_name] fn_list = [] for fn_name in dir(module): obj = getattr(module, fn_name) if callable(obj): fn_list.append(fn_name) return fn_list # Helper function for deserialize. def simple_test_two_function( *, fn_name: str, x, frontend: str, fn_str: str, dtype_data: str, rtol_: Optional[float] = None, atol_: float = 1e-06, ivy_submodules: list = [], framework_submodules: list = [], ): ivy.set_backend(frontend) fn_ivy = ivy.functional.frontends.__dict__[frontend] for ivy_submodule in ivy_submodules: fn_ivy = fn_ivy.__dict__[ivy_submodule] fn_ivy = fn_ivy.__dict__[fn_str] fn_framework = tf for framework_submodule in framework_submodules: fn_framework = fn_framework.__dict__[framework_submodule] fn_framework = fn_framework.__dict__[fn_str] x = ivy.array(x).to_native() ret_ivy = fn_ivy(fn_name)(x) ret = fn_framework(fn_name)(x) ret_ivy = ivy.array(ret_ivy, dtype=dtype_data) ret = ivy.array(ret, dtype=dtype_data) ret_np_flat = helpers.flatten_and_to_np(ret=ret) frontend_ret_np_flat = helpers.flatten_and_to_np(ret=ret_ivy) helpers.value_test( ret_np_flat=ret_np_flat, ret_np_from_gt_flat=frontend_ret_np_flat, rtol=rtol_, atol=atol_, ground_truth_backend=frontend, ) ivy.previous_backend() # deserialize @handle_frontend_test( fn_tree="tensorflow.keras.activations.deserialize", fn_name=st.sampled_from(get_callable_functions("keras.activations")).filter( lambda x: not x[0].isupper() and x not in [ "deserialize", "get", "keras_export", "serialize", "deserialize_keras_object", "serialize_keras_object", "get_globals", ] ), dtype_and_data=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), ) def test_tensorflow_deserialize( *, dtype_and_data, fn_name, fn_tree, frontend, ): dtype_data, data = dtype_and_data simple_test_two_function( fn_name=fn_name, x=data[0], frontend=frontend, fn_str="deserialize", dtype_data=dtype_data[0], rtol_=1e-01, atol_=1e-01, ivy_submodules=["keras", "activations"], framework_submodules=["keras", "activations"], ) # elu @handle_frontend_test( fn_tree="tensorflow.keras.activations.elu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-3, max_value=3, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, ), alpha=st.one_of( helpers.floats( min_value=-3, max_value=3, ) ), test_with_out=st.just(False), ) def test_tensorflow_elu( *, dtype_and_x, alpha, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], alpha=alpha, ) # gelu @handle_frontend_test( fn_tree="tensorflow.keras.activations.gelu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=2, small_abs_safety_factor=2, ), approximate=st.booleans(), test_with_out=st.just(False), ) def test_tensorflow_gelu( *, dtype_and_x, approximate, on_device, fn_tree, frontend, test_flags, backend_fw ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], approximate=approximate, ) @handle_frontend_test( fn_tree="tensorflow.keras.activations.get", fn_name=st.sampled_from(get_callable_functions("keras.activations")).filter( lambda x: not x[0].isupper() and x not in [ "deserialize", "get", "keras_export", "serialize", "deserialize_keras_object", "serialize_keras_object", "get_globals", ] ), dtype_and_data=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), ) def test_tensorflow_get(fn_name, dtype_and_data): dtype_data, data = dtype_and_data simple_test_two_function( fn_name=fn_name, x=data[0], frontend="tensorflow", fn_str="get", dtype_data=dtype_data[0], rtol_=1e-01, atol_=1e-01, ivy_submodules=["keras", "activations"], framework_submodules=["keras", "activations"], ) @handle_frontend_test( fn_tree="tensorflow.keras.activations.hard_sigmoid", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), test_with_out=st.just(False), ) def test_tensorflow_hard_sigmoid( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], ) @handle_frontend_test( fn_tree="tensorflow.keras.activations.linear", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), test_with_out=st.just(False), ) def test_tensorflow_linear( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) @handle_frontend_test( fn_tree="tensorflow.keras.activations.relu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric") ), test_with_out=st.just(False), ) def test_tensorflow_relu( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], ) # selu @handle_frontend_test( fn_tree="tensorflow.keras.activations.selu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-3, max_value=3, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, ), test_with_out=st.just(False), ) def test_tensorflow_selu( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-03, atol=1e-03, x=x[0], ) # serialize @handle_frontend_test( fn_tree="tensorflow.keras.activations.serialize", fn_name=st.sampled_from(get_callable_functions("keras.activations")).filter( lambda x: not x[0].isupper() and x not in [ "deserialize", "get", "keras_export", "serialize", "deserialize_keras_object", "serialize_keras_object", "get_globals", ] ), dtype_and_data=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), ) def test_tensorflow_serialize( *, dtype_and_data, fn_name, fn_tree, frontend, ): dtype_data, data = dtype_and_data simple_test_two_function( fn_name=fn_name, x=data[0], frontend=frontend, fn_str="serialize", dtype_data=dtype_data[0], rtol_=1e-01, atol_=1e-01, ivy_submodules=["keras", "activations"], framework_submodules=["keras", "activations"], ) # sigmoid @handle_frontend_test( fn_tree="tensorflow.keras.activations.sigmoid", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), test_with_out=st.just(False), ) def test_tensorflow_sigmoid( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], ) # softmax @handle_frontend_test( fn_tree="tensorflow.keras.activations.softmax", dtype_x_and_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_axes_size=1, force_int_axis=True, valid_axis=True, ), test_with_out=st.just(False), ) def test_tensorflow_softmax( *, dtype_x_and_axis, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x, axis = dtype_x_and_axis helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], axis=axis, ) # softplus @handle_frontend_test( fn_tree="tensorflow.keras.activations.softplus", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), test_with_out=st.just(False), ) def test_tensorflow_softplus( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], ) # softsign @handle_frontend_test( fn_tree="tensorflow.keras.activations.softsign", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_with_out=st.just(False), ) def test_tensorflow_softsign( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], ) # swish @handle_frontend_test( fn_tree="tensorflow.keras.activations.swish", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), test_with_out=st.just(False), ) def test_tensorflow_swish( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], ) # tanh @handle_frontend_test( fn_tree="tensorflow.keras.activations.tanh", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), test_with_out=st.just(False), ) def test_tensorflow_tanh( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-2, atol=1e-2, x=x[0], )
ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_keras/test_activations.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_keras/test_activations.py", "repo_id": "ivy", "token_count": 7031 }
61
# global from hypothesis import strategies as st, given import numpy as np import tensorflow as tf # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import BackendHandler # --- Helpers --- # # --------------- # def _helper_init_tensorarray(backend_fw, l_kwargs, fn=None): id_write, kwargs = l_kwargs with BackendHandler.update_backend(backend_fw) as ivy_backend: local_importer = ivy_backend.utils.dynamic_import tf_frontend = local_importer.import_module( "ivy.functional.frontends.tensorflow" ) ta = tf_frontend.tensor.TensorArray(**kwargs) ta_gt = tf.TensorArray(**kwargs) if fn == "unstack": ta_gt = ta_gt.unstack(tf.constant(id_write)) ta = ta.unstack(tf_frontend.constant(id_write)) elif fn == "split": ta_gt = ta_gt.split(**id_write) ta = ta.split(**id_write) elif fn == "scatter": indices, value = [*zip(*id_write)] ta_gt = ta_gt.scatter(indices, tf.cast(tf.stack(value), dtype=ta_gt.dtype)) value = tf_frontend.stack(list(map(tf_frontend.constant, value))) ta = ta.scatter(indices, tf_frontend.cast(value, ta.dtype)) else: for id, write in id_write: ta_gt = ta_gt.write(id, tf.constant(write)) ta = ta.write(id, tf_frontend.constant(write)) return ta_gt, ta @st.composite def _helper_random_tensorarray(draw, fn=None): size = draw(st.integers(1, 10)) dynamic_size = draw(st.booleans()) clear_after_read = draw(st.booleans()) infer_shape = draw(st.booleans()) element_shape = draw(helpers.get_shape()) element_shape = draw(st.one_of(st.just(None), st.just(element_shape))) shape = None if ( infer_shape or element_shape is not None or fn in ["scatter", "stack", "gather", "concat"] ): if fn == "concat": element_shape = None infer_shape = False shape = list(draw(helpers.get_shape(min_num_dims=1))) elif element_shape is None: shape = draw(helpers.get_shape()) else: shape = element_shape dtype = draw(helpers.get_dtypes(full=False, prune_function=False))[0] if fn in ["stack", "concat"]: ids_to_write = [True for i in range(size)] else: ids_to_write = [draw(st.booleans()) for i in range(size)] if sum(ids_to_write) == 0: ids_to_write[draw(st.integers(0, size - 1))] = True kwargs = { "dtype": dtype, "size": size, "dynamic_size": dynamic_size, "clear_after_read": clear_after_read, "infer_shape": infer_shape, "element_shape": element_shape, } id_write = [] for id, flag in enumerate(ids_to_write): if fn == "concat": shape[0] = draw(st.integers(1, 10)) if flag: write = np.array( draw( helpers.array_values( dtype=dtype, shape=shape if shape is not None else helpers.get_shape(), ) ) ) id_write.append((id, write)) if fn != "gather": return id_write, kwargs else: ids = [] for id, _ in id_write: if draw(st.booleans()): ids.append(id) if not ids: ids.append(id) return id_write, kwargs, ids @st.composite def _helper_split(draw): shape = draw(helpers.get_shape(min_num_dims=1)) dtype = draw(helpers.get_dtypes(full=False, prune_function=False))[0] value = draw(helpers.array_values(dtype=dtype, shape=shape)) dynamic_size = draw(st.booleans()) if dynamic_size: size = draw(st.integers(1, shape[0] + 5)) else: size = shape[0] total = 0 length = [] for i in range(shape[0]): length.append(draw(st.integers(0, shape[0] - total))) total += length[-1] if total != shape[0]: length[-1] += shape[0] - total return {"value": value, "lengths": length}, { "dtype": dtype, "size": size, "dynamic_size": dynamic_size, } @st.composite def _helper_unstack(draw): shape = draw(helpers.get_shape(min_num_dims=1)) size = draw(st.integers(1, 10)) dynamic_size = draw(st.booleans()) if size >= shape[0] else True dtype = draw(helpers.get_dtypes(full=False, prune_function=False))[0] tensor = draw(helpers.array_values(dtype=dtype, shape=shape)) kwargs = {"dtype": dtype, "size": size, "dynamic_size": dynamic_size} return tensor, kwargs # --- Main --- # # ------------ # @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_close( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) ta.close() ta_gt.close() assert np.array(ta.size()) == 0 assert np.array(ta_gt.size()) == 0 @given(l_kwargs=_helper_random_tensorarray(fn="concat")) def test_tensorflow_concat( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) helpers.value_test( ret_np_from_gt_flat=ta_gt.concat().numpy().flatten(), ret_np_flat=np.array(ta.concat()).flatten(), backend=backend_fw, ) @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_dtype( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) assert ta_gt.dtype == ta.dtype.ivy_dtype @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_dynamic_size( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) assert ta_gt.dynamic_size == ta.dynamic_size @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_element_shape( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) assert ta_gt.element_shape == ta.element_shape @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_flow( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) assert ta_gt.flow == ta.flow @given(l_kwargs=_helper_random_tensorarray(fn="gather")) def test_tensorflow_gather( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs[:2]) *_, indices = l_kwargs helpers.value_test( ret_np_from_gt_flat=ta_gt.gather(indices).numpy().flatten(), ret_np_flat=np.array(ta.gather(indices)).flatten(), backend=backend_fw, ) @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_handle( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) assert ta_gt.handle == ta.handle # test for read and write methods @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_read( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) id_read, _ = l_kwargs for id, read in id_read: helpers.value_test( ret_np_from_gt_flat=ta_gt.read(id).numpy().flatten(), ret_np_flat=np.array(ta.read(id)).flatten(), backend=backend_fw, ) @given(l_kwargs=_helper_random_tensorarray(fn="scatter")) def test_tensorflow_scatter( l_kwargs, backend_fw, ): id_read, _ = l_kwargs ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs, "scatter") for id, read in id_read: helpers.value_test( ret_np_from_gt_flat=ta_gt.read(id).numpy().flatten(), ret_np_flat=np.array(ta.read(id)).flatten(), backend=backend_fw, ) @given(l_kwargs=_helper_random_tensorarray()) def test_tensorflow_size( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) helpers.value_test( ret_np_from_gt_flat=ta_gt.size().numpy().flatten(), ret_np_flat=np.array(ta.size()).flatten(), backend=backend_fw, ) @given( kwargs_v_l=_helper_split(), ) def test_tensorflow_split(kwargs_v_l, backend_fw): ta_gt, ta = _helper_init_tensorarray(backend_fw, kwargs_v_l, "split") for id in range(ta_gt.size()): helpers.value_test( ret_np_from_gt_flat=ta_gt.read(id).numpy().flatten(), ret_np_flat=np.array(ta.read(id)).flatten(), backend=backend_fw, ) @given(l_kwargs=_helper_random_tensorarray(fn="stack")) def test_tensorflow_stack( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs) helpers.value_test( ret_np_from_gt_flat=ta_gt.stack().numpy().flatten(), ret_np_flat=np.array(ta.stack()).flatten(), backend=backend_fw, ) @given(l_kwargs=_helper_unstack()) def test_tensorflow_unstack( l_kwargs, backend_fw, ): ta_gt, ta = _helper_init_tensorarray(backend_fw, l_kwargs, "unstack") helpers.value_test( ret_np_from_gt_flat=ta_gt.stack().numpy().flatten(), ret_np_flat=np.array(ta.stack()).flatten(), backend=backend_fw, )
ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensorarray.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensorarray.py", "repo_id": "ivy", "token_count": 4438 }
62
from hypothesis import strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test # Cosine Similarity @handle_frontend_test( fn_tree="torch.nn.functional.cosine_similarity", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=2, max_value=5, min_dim_size=2, shared_dtype=True, num_arrays=2, ), dim=st.integers(min_value=-1, max_value=0), ) def test_torch_cosine_similarity( *, dtype_and_x, dim, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-01, x1=x[0], x2=x[1], dim=dim, ) # Pairwise Distance @handle_frontend_test( fn_tree="torch.nn.functional.pairwise_distance", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_dim_size=2, max_dim_size=5, min_num_dims=2, min_value=2, max_value=5, allow_inf=False, ), p=st.integers(min_value=0, max_value=2), keepdim=st.booleans(), ) def test_torch_pairwise_distance( *, dtype_and_x, p, keepdim, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-01, x1=x[0], x2=x[1], p=p, keepdim=keepdim, ) # P-norm Distance @handle_frontend_test( fn_tree="torch.nn.functional.pdist", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=2, min_dim_size=10, max_dim_size=10, min_value=1.0, max_value=1.0e5, ), p=st.integers(min_value=0, max_value=1.0e5), ) def test_torch_pdist( *, dtype_and_x, p, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=x[0], p=p, )
ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_distance_functions.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_distance_functions.py", "repo_id": "ivy", "token_count": 1467 }
63
# global from hypothesis import strategies as st # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa: E501 put_along_axis_helper, ) # broadcast_tensors @handle_frontend_test( fn_tree="torch.broadcast_tensors", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, num_arrays=helpers.ints(min_value=2, max_value=5), ), ) def test_torch_broadcast_tensors( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x kw = {} for i, array in enumerate(x): kw[f"x{i}"] = array test_flags.num_positional_args = len(kw) helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, on_device=on_device, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, **kw, ) @handle_frontend_test( fn_tree="torch.is_complex", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, min_dim_size=1, max_dim_size=1, ), ) def test_torch_is_complex( dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=x[0], ) @handle_frontend_test( fn_tree="torch.is_floating_point", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, ), ) def test_torch_is_floating_point( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x ivy.set_backend(backend_fw) helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=ivy.asarray(x[0]), ) ivy.previous_backend() @handle_frontend_test( fn_tree="torch.is_nonzero", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, min_dim_size=1, max_dim_size=1, ), ) def test_torch_is_nonzero( dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=x[0], ) @handle_frontend_test( fn_tree="torch.is_tensor", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), ) def test_torch_is_tensor( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, on_device=on_device, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, obj=x[0], ) @handle_frontend_test( fn_tree="torch.numel", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, ), ) def test_torch_numel( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=x[0], ) # scatter @handle_frontend_test( fn_tree="torch.scatter", args=put_along_axis_helper(), test_with_out=st.just(False), ) def test_torch_scatter( *, args, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtypes, x, indices, value, axis = args helpers.test_frontend_function( input_dtypes=dtypes, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, backend_to_test=backend_fw, input=x, dim=axis, index=indices, src=value, ) # scatter_add @handle_frontend_test( fn_tree="torch.scatter_add", args=put_along_axis_helper(), test_with_out=st.just(False), ) def test_torch_scatter_add( *, args, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtypes, x, indices, value, axis = args helpers.test_frontend_function( input_dtypes=dtypes, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, backend_to_test=backend_fw, input=x, dim=axis, index=indices, src=value, ) # scatter_reduce @handle_frontend_test( fn_tree="torch.scatter_reduce", args=put_along_axis_helper(), # ToDo: test for "mean" as soon as ivy.put_along_axis supports it mode=st.sampled_from(["sum", "prod", "amin", "amax"]), test_with_out=st.just(False), ) def test_torch_scatter_reduce( *, args, mode, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtypes, x, indices, value, axis = args test_flags.ground_truth_backend = "torch" helpers.test_frontend_function( input_dtypes=dtypes, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, backend_to_test=backend_fw, input=x, dim=axis, index=indices, src=value, reduce=mode, )
ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor_functions.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor_functions.py", "repo_id": "ivy", "token_count": 3152 }
64
"""Collection of tests for unified linear algebra functions.""" # global import sys import numpy as np from hypothesis import assume, strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_test, BackendHandler from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import ( matrix_is_stable, ) # --- Helpers --- # # --------------- # @st.composite def _det_helper(draw): square = draw(helpers.ints(min_value=2, max_value=8).map(lambda x: (x, x))) shape_prefix = draw(helpers.get_shape()) dtype_x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=2, max_value=5, shape=shape_prefix + square, ) ) return dtype_x @st.composite def _diag_helper(draw): dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), small_abs_safety_factor=2, large_abs_safety_factor=2, safety_factor_scale="log", min_num_dims=1, max_num_dims=2, min_dim_size=1, max_dim_size=50, ) ) shape = x[0].shape if len(shape) == 2: k = draw(helpers.ints(min_value=-shape[0] + 1, max_value=shape[1] - 1)) else: k = draw(helpers.ints(min_value=0, max_value=shape[0])) return dtype, x, k @st.composite def _get_dtype_and_matrix(draw, *, symmetric=False): # batch_shape, shared, random_size input_dtype = draw(st.shared(st.sampled_from(draw(helpers.get_dtypes("float"))))) random_size = draw(helpers.ints(min_value=2, max_value=4)) batch_shape = draw(helpers.get_shape(min_num_dims=1, max_num_dims=3)) if symmetric: num_independnt_vals = int((random_size**2) / 2 + random_size / 2) array_vals_flat = np.array( draw( helpers.array_values( dtype=input_dtype, shape=tuple(list(batch_shape) + [num_independnt_vals]), min_value=2, max_value=5, ) ) ) array_vals = np.zeros(batch_shape + (random_size, random_size)) c = 0 for i in range(random_size): for j in range(random_size): if j < i: continue array_vals[..., i, j] = array_vals_flat[..., c] array_vals[..., j, i] = array_vals_flat[..., c] c += 1 return [input_dtype], array_vals return [input_dtype], draw( helpers.array_values( dtype=input_dtype, shape=tuple(list(batch_shape) + [random_size, random_size]), min_value=2, max_value=5, ) ) # vector_to_skew_symmetric_matrix @st.composite def _get_dtype_and_vector(draw): # batch_shape, shared, random_size input_dtype = draw( st.shared( st.sampled_from(draw(helpers.get_dtypes("numeric"))), key="shared_dtype", ) ) batch_shape = draw(helpers.get_shape(min_num_dims=2, max_num_dims=4)) return [input_dtype], draw( helpers.array_values( dtype=input_dtype, shape=tuple(list(batch_shape) + [3]), min_value=2, max_value=5, ) ) @st.composite def _get_dtype_value1_value2_axis_for_tensordot( draw, available_dtypes, min_value=None, max_value=None, allow_inf=False, exclude_min=False, exclude_max=False, min_num_dims=1, max_num_dims=10, min_dim_size=1, max_dim_size=10, ): shape = draw( helpers.get_shape( allow_none=False, min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ) ) axis = draw(helpers.ints(min_value=1, max_value=len(shape))) dtype = draw(st.sampled_from(draw(available_dtypes))) values = [] for i in range(2): values.append( draw( helpers.array_values( dtype=dtype, shape=shape, min_value=min_value, max_value=max_value, allow_inf=allow_inf, exclude_min=exclude_min, exclude_max=exclude_max, large_abs_safety_factor=72, small_abs_safety_factor=72, safety_factor_scale="log", ) ) ) value1, value2 = values[0], values[1] if not isinstance(axis, list): value2 = value2.transpose( [k for k in range(len(shape) - axis, len(shape))] + [k for k in range(0, len(shape) - axis)] ) return [dtype], value1, value2, axis @st.composite def _get_first_matrix_and_dtype(draw, *, transpose=False, conjugate=False): # batch_shape, random_size, shared input_dtype = draw( st.shared( st.sampled_from(draw(helpers.get_dtypes("numeric"))), key="shared_dtype", ).filter(lambda x: "float16" not in x) ) shared_size = draw( st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size") ) random_size = draw( st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size") ) matrix = draw( helpers.array_values( dtype=input_dtype, shape=(random_size, shared_size), min_value=2, max_value=5, ) ) if conjugate: conjugate = draw(st.booleans()) return [input_dtype], matrix, conjugate if transpose: transpose = draw(st.booleans()) adjoint = draw(st.booleans()) if adjoint and transpose: adjoint = draw(st.just("False")) if transpose and not adjoint: matrix = np.transpose(matrix) if adjoint and not transpose: matrix = np.transpose(np.conjugate(matrix)) return [input_dtype], matrix, transpose, adjoint return [input_dtype], matrix @st.composite def _get_second_matrix_and_dtype(draw, *, transpose=False): # batch_shape, shared, random_size input_dtype = draw( st.shared( st.sampled_from(draw(helpers.get_dtypes("numeric"))), key="shared_dtype", ).filter(lambda x: "float16" not in x) ) shared_size = draw( st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size") ) random_size = draw( st.shared(helpers.ints(min_value=2, max_value=4), key="shared_size") ) matrix = draw( helpers.array_values( dtype=input_dtype, shape=(random_size, shared_size), min_value=2, max_value=5, ) ) if transpose: transpose = draw(st.booleans()) adjoint = draw(st.booleans()) if adjoint and transpose: adjoint = draw(st.just("False")) if transpose and not adjoint: matrix = np.transpose(matrix) if adjoint and not transpose: matrix = np.transpose(np.conjugate(matrix)) return [input_dtype], matrix, transpose, adjoint return [input_dtype], matrix @st.composite def _matrix_rank_helper(draw): _batch_shape = draw( helpers.get_shape(min_num_dims=1, max_num_dims=3, min_dim_size=1) ) _batch_dim = draw(st.sampled_from([(), _batch_shape])) _matrix_dim = draw(helpers.ints(min_value=2, max_value=20)) shape = _batch_dim + (_matrix_dim, _matrix_dim) dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=shape, min_value=-1e05, max_value=1e05, abs_smallest_val=1e-05, safety_factor_scale="log", ) ) if np.all(np.swapaxes(x[0], -1, -2) == x[0]): hermitian = True else: hermitian = False tol_strategy = st.one_of( st.none(), st.floats(allow_nan=False, allow_infinity=False), helpers.array_values( dtype=helpers.get_dtypes("float", prune_function=False), shape=_batch_shape, min_value=-1e05, max_value=1e05, abs_smallest_val=1e-05, safety_factor_scale="log", ), ) atol = draw(tol_strategy) rtol = draw(tol_strategy) return dtype, x[0], hermitian, atol, rtol @st.composite def dtype_value1_value2_axis( draw, available_dtypes, abs_smallest_val=None, min_value=None, max_value=None, allow_inf=False, exclude_min=False, exclude_max=False, min_num_dims=1, max_num_dims=10, min_dim_size=1, max_dim_size=10, specific_dim_size=3, large_abs_safety_factor=4, small_abs_safety_factor=4, safety_factor_scale="log", ): # For cross product, a dim with size 3 is required shape = draw( helpers.get_shape( allow_none=False, min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ) ) axis = draw(helpers.ints(min_value=0, max_value=len(shape))) # make sure there is a dim with specific dim size shape = list(shape) shape = shape[:axis] + [specific_dim_size] + shape[axis:] shape = tuple(shape) dtype = draw(st.sampled_from(draw(available_dtypes))) values = [] for i in range(2): values.append( draw( helpers.array_values( dtype=dtype, shape=shape, abs_smallest_val=abs_smallest_val, min_value=min_value, max_value=max_value, allow_inf=allow_inf, exclude_min=exclude_min, exclude_max=exclude_max, large_abs_safety_factor=large_abs_safety_factor, small_abs_safety_factor=small_abs_safety_factor, safety_factor_scale=safety_factor_scale, ) ) ) value1, value2 = values[0], values[1] return [dtype], value1, value2, axis # --- Main --- # # ------------ # # cholesky # execute with grads error @handle_test( fn_tree="functional.ivy.cholesky", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), upper=st.booleans(), ) def test_cholesky(*, dtype_x, upper, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_x x = x[0] x = np.matmul(x.T, x) + np.identity(x.shape[0]) # make symmetric positive-definite helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x, upper=upper, rtol_=1e-3, atol_=1e-3, ) # cross @handle_test( fn_tree="functional.ivy.cross", dtype_x1_x2_axis=dtype_value1_value2_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=5, min_dim_size=3, max_dim_size=3, min_value=-1e5, max_value=1e5, abs_smallest_val=0.01, safety_factor_scale="log", ), ) def test_cross(*, dtype_x1_x2_axis, test_flags, backend_fw, fn_name, on_device): dtype, x1, x2, axis = dtype_x1_x2_axis helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x1=x1, x2=x2, axis=axis, ) # det @handle_test( fn_tree="functional.ivy.det", dtype_x=_det_helper(), ) def test_det(*, dtype_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_x assume(matrix_is_stable(x[0])) helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x=x[0], ) # diag @handle_test( fn_tree="functional.ivy.diag", dtype_x_k=_diag_helper(), ) def test_diag(*, dtype_x_k, test_flags, backend_fw, fn_name, on_device): dtype, x, k = dtype_x_k helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], k=k, ) # diagonal @handle_test( fn_tree="functional.ivy.diagonal", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=50, ), offset=helpers.ints(min_value=-10, max_value=50), axes=st.lists( helpers.ints(min_value=-2, max_value=1), min_size=2, max_size=2, unique=True ).filter(lambda axes: axes[0] % 2 != axes[1] % 2), ) def test_diagonal(*, dtype_x, offset, axes, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_x helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], offset=offset, axis1=axes[0], axis2=axes[1], ) # eigh @handle_test( fn_tree="functional.ivy.eigh", dtype_x=_get_dtype_and_matrix(symmetric=True), UPLO=st.sampled_from(("L", "U")), test_gradients=st.just(False), ) def test_eigh(*, dtype_x, UPLO, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_x results = helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x, UPLO=UPLO, test_values=False, return_flat_np_arrays=True, ) if results is None: return ret_np_flat, ret_from_np_flat = results reconstructed_np = None for i in range(len(ret_np_flat) // 2): eigenvalue = ret_np_flat[i] eigenvector = ret_np_flat[len(ret_np_flat) // 2 + i] if reconstructed_np is not None: reconstructed_np += eigenvalue * np.matmul( eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1) ) else: reconstructed_np = eigenvalue * np.matmul( eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1) ) reconstructed_from_np = None for i in range(len(ret_from_np_flat) // 2): eigenvalue = ret_from_np_flat[i] eigenvector = ret_from_np_flat[len(ret_np_flat) // 2 + i] if reconstructed_from_np is not None: reconstructed_from_np += eigenvalue * np.matmul( eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1) ) else: reconstructed_from_np = eigenvalue * np.matmul( eigenvector.reshape(1, -1), eigenvector.reshape(-1, 1) ) # value test helpers.assert_all_close( reconstructed_np, reconstructed_from_np, rtol=1e-1, atol=1e-2, backend=backend_fw, ) # eigvalsh @handle_test( fn_tree="functional.ivy.eigvalsh", dtype_x=_get_dtype_and_matrix(symmetric=True), UPLO=st.sampled_from(("L", "U")), test_gradients=st.just(False), ) def test_eigvalsh(*, dtype_x, UPLO, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-3, test_values=False, x=x, UPLO=UPLO, ) # inner @handle_test( fn_tree="functional.ivy.inner", dtype_xy=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, large_abs_safety_factor=8, small_abs_safety_factor=8, safety_factor_scale="log", min_num_dims=1, max_num_dims=1, ), ) def test_inner(*, dtype_xy, test_flags, backend_fw, fn_name, on_device): types, arrays = dtype_xy helpers.test_function( input_dtypes=types, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-2, x1=arrays[0], x2=arrays[1], ) # inv @handle_test( fn_tree="functional.ivy.inv", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), small_abs_safety_factor=24, large_abs_safety_factor=24, safety_factor_scale="log", shape=helpers.ints(min_value=2, max_value=20).map(lambda x: (x, x)), ).filter(lambda x: np.linalg.cond(x[1][0].tolist()) < 1 / sys.float_info.epsilon), adjoint=st.booleans(), ) def test_inv(*, dtype_x, adjoint, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, x=x[0], adjoint=adjoint, ) # matmul @handle_test( fn_tree="functional.ivy.matmul", x=_get_first_matrix_and_dtype(transpose=True), y=_get_second_matrix_and_dtype(transpose=True), ) def test_matmul(*, x, y, test_flags, backend_fw, fn_name, on_device): input_dtype1, x_1, transpose_a, adjoint_a = x input_dtype2, y_1, transpose_b, adjoint_b = y helpers.test_function( input_dtypes=input_dtype1 + input_dtype2, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x1=x_1, x2=y_1, transpose_a=transpose_a, transpose_b=transpose_b, adjoint_a=adjoint_a, adjoint_b=adjoint_b, ) # matrix_norm @handle_test( fn_tree="functional.ivy.matrix_norm", # ground_truth_backend="numpy", dtype_value_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, valid_axis=True, min_axes_size=2, max_axes_size=2, force_tuple_axis=True, allow_neg_axes=False, ), kd=st.booleans(), ord=st.sampled_from((-2, -1, 1, 2, -float("inf"), float("inf"), "fro", "nuc")), ) def test_matrix_norm( *, dtype_value_axis, kd, ord, test_flags, backend_fw, fn_name, on_device ): dtype, x, axis = dtype_value_axis assume(matrix_is_stable(x[0], cond_limit=10)) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-2, x=x[0], axis=axis, keepdims=kd, ord=ord, ) # matrix_power @handle_test( fn_tree="functional.ivy.matrix_power", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1e-3, max_value=20, shape=helpers.ints(min_value=2, max_value=8).map(lambda x: (x, x)), ), n=helpers.ints(min_value=-6, max_value=6), ) def test_matrix_power(*, dtype_x, n, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_x assume(matrix_is_stable(x[0])) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x=x[0], n=n, ) # matrix_rank @handle_test( fn_tree="functional.ivy.matrix_rank", dtype_x_hermitian_atol_rtol=_matrix_rank_helper(), ground_truth_backend="numpy", ) def test_matrix_rank( *, dtype_x_hermitian_atol_rtol, test_flags, backend_fw, fn_name, on_device ): dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol assume(matrix_is_stable(x, cond_limit=10)) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x, atol=atol, rtol=rtol, hermitian=hermitian, ) # matrix_transpose @handle_test( fn_tree="functional.ivy.matrix_transpose", dtype_x=_get_first_matrix_and_dtype(conjugate=True), ) def test_matrix_transpose(*, dtype_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x, conjugate = dtype_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x, conjugate=conjugate, ) # outer @handle_test( fn_tree="functional.ivy.outer", dtype_xy=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, min_value=1, max_value=50, min_num_dims=1, max_num_dims=1, ), ) def test_outer(*, dtype_xy, test_flags, backend_fw, fn_name, on_device): types, arrays = dtype_xy helpers.test_function( input_dtypes=types, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x1=arrays[0], x2=arrays[1], ) # pinv @handle_test( fn_tree="functional.ivy.pinv", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=5, min_dim_size=1, max_dim_size=5, large_abs_safety_factor=32, small_abs_safety_factor=32, safety_factor_scale="log", ), rtol=st.floats(1e-5, 1e-3), ) def test_pinv(*, dtype_x, rtol, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_x helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, x=x[0], rtol=rtol, ) # qr @handle_test( fn_tree="functional.ivy.qr", dtype_x=_get_dtype_and_matrix(), mode=st.sampled_from(("reduced", "complete")), test_with_out=st.just(False), test_gradients=st.just(False), ) def test_qr(*, dtype_x, mode, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_x results = helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x, mode=mode, test_values=False, return_flat_np_arrays=True, ) if results is None: return ret_np_flat, ret_from_np_flat = results for i in range(len(ret_np_flat) // 2): q_np_flat = ret_np_flat[i] r_np_flat = ret_np_flat[len(ret_np_flat) // 2 + i] reconstructed_np_flat = np.matmul(q_np_flat, r_np_flat) for i in range(len(ret_from_np_flat) // 2): q_from_np_flat = ret_from_np_flat[i] r_from_np_flat = ret_from_np_flat[len(ret_np_flat) // 2 + i] reconstructed_from_np_flat = np.matmul(q_from_np_flat, r_from_np_flat) # value test helpers.assert_all_close( reconstructed_np_flat, reconstructed_from_np_flat, rtol=1e-1, atol=1e-1, backend=backend_fw, ground_truth_backend=test_flags.ground_truth_backend, ) # slogdet # TODO: add with_out testing when testing with tuples is supported # execute with grads error @handle_test( fn_tree="functional.ivy.slogdet", dtype_x=_det_helper(), test_with_out=st.just(False), ) def test_slogdet(*, dtype_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_x assume(matrix_is_stable(x[0])) ret_grad_idxs = ( [[1, "a"], [1, "b", "c"], [1, "b", "d"]] if test_flags.container[0] else [[1]] ) helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, rtol_=1e-1, atol_=1e-2, fn_name=fn_name, on_device=on_device, ret_grad_idxs=ret_grad_idxs, x=x[0], ) @handle_test( fn_tree="functional.ivy.solve", x=helpers.get_first_solve_batch_matrix(choose_adjoint=True), y=helpers.get_second_solve_batch_matrix(), ) def test_solve(*, x, y, test_flags, backend_fw, fn_name, on_device): input_dtype1, x1, adjoint = x input_dtype2, x2, _ = y helpers.test_function( input_dtypes=[input_dtype1, input_dtype2], test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x1=x1, x2=x2, adjoint=adjoint, ) # svd @handle_test( fn_tree="functional.ivy.svd", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=3, max_num_dims=5, min_dim_size=2, max_dim_size=5, min_value=0.1, max_value=10.0, ), fm=st.booleans(), uv=st.booleans(), test_with_out=st.just(False), test_gradients=st.just(False), ) def test_svd(*, dtype_x, uv, fm, test_flags, backend_fw, fn_name, on_device): dtype, x = dtype_x results = helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], compute_uv=uv, full_matrices=fm, test_values=False, return_flat_np_arrays=True, ) if results is None: return # value test based on recreating the original matrix and testing the consistency ret_flat_np, ret_from_gt_flat_np = results if uv: for i in range(len(ret_flat_np) // 3): U = ret_flat_np[i] S = ret_flat_np[len(ret_flat_np) // 3 + i] Vh = ret_flat_np[2 * len(ret_flat_np) // 3 + i] m = U.shape[-1] n = Vh.shape[-1] S = np.expand_dims(S, -2) if m > n else np.expand_dims(S, -1) for i in range(len(ret_from_gt_flat_np) // 3): U_gt = ret_from_gt_flat_np[i] S_gt = ret_from_gt_flat_np[len(ret_from_gt_flat_np) // 3 + i] Vh_gt = ret_from_gt_flat_np[2 * len(ret_from_gt_flat_np) // 3 + i] S_gt = np.expand_dims(S_gt, -2) if m > n else np.expand_dims(S_gt, -1) with BackendHandler.update_backend("numpy") as ivy_backend: S_mat = ( S * ivy_backend.eye( U.shape[-1], Vh.shape[-2], batch_shape=U.shape[:-2] ).data ) S_mat_gt = ( S_gt * ivy_backend.eye( U_gt.shape[-1], Vh_gt.shape[-2], batch_shape=U_gt.shape[:-2] ).data ) reconstructed = np.matmul(np.matmul(U, S_mat), Vh) reconstructed_gt = np.matmul(np.matmul(U_gt, S_mat_gt), Vh_gt) # value test helpers.assert_all_close( reconstructed, reconstructed_gt, atol=1e-04, backend=backend_fw, ground_truth_backend=test_flags.ground_truth_backend, ) helpers.assert_all_close( reconstructed, x[0], atol=1e-04, backend=backend_fw, ground_truth_backend=test_flags.ground_truth_backend, ) else: S = ret_flat_np S_gt = ret_from_gt_flat_np helpers.assert_all_close( S[0], S_gt[0], atol=1e-04, backend=backend_fw, ground_truth_backend=test_flags.ground_truth_backend, ) # svdvals @handle_test( fn_tree="functional.ivy.svdvals", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=50, min_num_dims=2, ), test_gradients=st.just(False), ) def test_svdvals(*, dtype_x, test_flags, backend_fw, fn_name, on_device): input_dtype, x = dtype_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, x=x[0], ) # tensordot @handle_test( fn_tree="functional.ivy.tensordot", dtype_x1_x2_axis=_get_dtype_value1_value2_axis_for_tensordot( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10, ), ) def test_tensordot(*, dtype_x1_x2_axis, test_flags, backend_fw, fn_name, on_device): ( dtype, x1, x2, axis, ) = dtype_x1_x2_axis helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=0.8, atol_=0.8, x1=x1, x2=x2, axes=axis, ) # trace @handle_test( fn_tree="functional.ivy.trace", dtype_x_axes=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), valid_axis=True, min_axes_size=2, max_axes_size=2, min_num_dims=2, large_abs_safety_factor=24, small_abs_safety_factor=24, safety_factor_scale="log", ), # TODO: test for more offsets offset=st.integers(min_value=-3, max_value=3), ) def test_trace(*, dtype_x_axes, offset, test_flags, backend_fw, fn_name, on_device): dtype, x, axes = dtype_x_axes axis1, axis2 = axes helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x=x[0], offset=offset, axis1=axis1, axis2=axis2, ) # vander @handle_test( fn_tree="functional.ivy.vander", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=st.tuples( helpers.ints(min_value=1, max_value=10), ), large_abs_safety_factor=15, small_abs_safety_factor=15, safety_factor_scale="log", ), N=st.integers(min_value=1, max_value=10) | st.none(), increasing=st.booleans(), ) def test_vander( *, dtype_and_x, N, increasing, test_flags, backend_fw, fn_name, on_device ): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, x=x[0], N=N, increasing=increasing, ) # vecdot @handle_test( fn_tree="functional.ivy.vecdot", dtype_x1_x2_axis=dtype_value1_value2_axis( available_dtypes=helpers.get_dtypes("numeric"), large_abs_safety_factor=100, small_abs_safety_factor=100, safety_factor_scale="log", min_num_dims=1, max_num_dims=4, min_dim_size=1, max_dim_size=4, ), ) def test_vecdot(*, dtype_x1_x2_axis, test_flags, backend_fw, fn_name, on_device): dtype, x1, x2, axis = dtype_x1_x2_axis helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=5e-1, atol_=5e-1, x1=x1, x2=x2, axis=axis, ) # vector_norm @handle_test( fn_tree="functional.ivy.vector_norm", dtype_values_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), valid_axis=True, min_value=-1e04, max_value=1e04, abs_smallest_val=1e-04, max_axes_size=2, allow_neg_axes=True, ), kd=st.booleans(), ord=st.one_of( helpers.ints(min_value=-5, max_value=5), helpers.floats(min_value=-5, max_value=5.0), st.sampled_from((float("inf"), -float("inf"))), ), dtype=helpers.get_dtypes("numeric", full=False, none=True), ) def test_vector_norm( *, dtype_values_axis, kd, ord, dtype, test_flags, backend_fw, fn_name, on_device ): x_dtype, x, axis = dtype_values_axis # to avoid tuple axis with only one axis as force_int_axis can't generate # axis with two axes if isinstance(axis, tuple) and len(axis) == 1: axis = axis[0] helpers.test_function( input_dtypes=x_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], axis=axis, keepdims=kd, ord=ord, dtype=dtype[0], atol_=1e-08, ) # Specific value test to handle cases when ord is one of {inf, -inf} with BackendHandler.update_backend(backend_fw) as ivy_backend: arr = ivy_backend.array([[1.0, 2.0, 3.0], [-1.0, 2.0, 4.0]]) arr_normed_inf = ivy_backend.vector_norm(arr, axis=0, ord=float("inf")) arr_normed_min_inf = ivy_backend.vector_norm(arr, axis=0, ord=float("-inf")) with BackendHandler.update_backend(test_flags.ground_truth_backend) as gt_backend: gt_arr_normed_inf = gt_backend.array([1.0, 2.0, 4.0]) gt_arr_normed_min_inf = gt_backend.array([1.0, 2.0, 3.0]) helpers.assert_all_close( arr_normed_inf, gt_arr_normed_inf, backend=backend_fw, ground_truth_backend=test_flags.ground_truth_backend, ) helpers.assert_all_close( arr_normed_min_inf, gt_arr_normed_min_inf, backend=backend_fw, ground_truth_backend=test_flags.ground_truth_backend, ) @handle_test( fn_tree="functional.ivy.vector_to_skew_symmetric_matrix", dtype_x=_get_dtype_and_vector(), ) def test_vector_to_skew_symmetric_matrix( *, dtype_x, test_flags, backend_fw, fn_name, on_device ): input_dtype, x = dtype_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, vector=x, )
ivy/ivy_tests/test_ivy/test_functional/test_core/test_linalg.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_linalg.py", "repo_id": "ivy", "token_count": 18456 }
65
# global import pytest import numpy as np # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers.pipeline_helper import BackendHandler # --- Helpers --- # # --------------- # def _get_primals_and_tangents(x_, dtype, ivy_backend, primals_cont, tangents_cont): if primals_cont: primals = ivy_backend.Container( { "l": { "a": ivy_backend.array(x_[0][0], dtype=dtype), "b": ivy_backend.array(x_[0][1], dtype=dtype), } } ) else: primals = ivy_backend.array(x_[0], dtype=dtype) if tangents_cont: tangents = ivy_backend.Container( { "l": { "a": ivy_backend.array([t[0] for t in x_[1]], dtype=dtype), "b": ivy_backend.array([t[0] for t in x_[1]], dtype=dtype), } } ) else: if primals_cont: tangents = ivy_backend.array([t[0] for t in x_[1]], dtype=dtype) else: tangents = ivy_backend.array(x_[1], dtype=dtype).T return primals, tangents # --- Main --- # # ------------ # # bind_custom_gradient_function @pytest.mark.parametrize( "x_", [[[4.6, 2.1, 5], [2.8, 1.3, 6.2]], [[4.6, 2.1], [5, 2.8], [1.3, 6.2]]] ) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("inter_func_str", ["square", "cos"]) @pytest.mark.parametrize( "custom_grad_fn", [lambda *args: args[1] * args[0][0], lambda *args: args[1] * args[0][1]], ) def test_bind_custom_gradient_function( x_, dtype, inter_func_str, custom_grad_fn, backend_fw ): if backend_fw == "numpy": return with BackendHandler.update_backend(backend_fw) as ivy_backend: def inter_func_(x): return ivy_backend.__dict__[inter_func_str](x) x = ivy_backend.array(x_, dtype=dtype) inter_func = ivy_backend.bind_custom_gradient_function( inter_func_, custom_grad_fn ) def func(x): return ivy_backend.mean(ivy_backend.exp(inter_func(x))) ret, grad = ivy_backend.execute_with_gradients(func, x) ret_np = helpers.flatten_and_to_np(backend=backend_fw, ret=ret) grad_np = helpers.flatten_and_to_np(backend=backend_fw, ret=grad) with BackendHandler.update_backend("tensorflow") as gt_backend: x = gt_backend.array(x_, dtype=dtype) def inter_func_(x): return gt_backend.__dict__[inter_func_str](x) inter_func = gt_backend.bind_custom_gradient_function( inter_func_, custom_grad_fn ) def func(x): return gt_backend.mean(gt_backend.exp(inter_func(x))) ret_gt, grad_gt = gt_backend.execute_with_gradients(func, x) ret_np_from_gt = helpers.flatten_and_to_np(backend="tensorflow", ret=ret_gt) grad_np_from_gt = helpers.flatten_and_to_np(backend="tensorflow", ret=grad_gt) for ret, ret_from_gt in zip(ret_np, ret_np_from_gt): assert np.allclose(ret, ret_from_gt) for grad, grad_from_gt in zip(grad_np, grad_np_from_gt): assert grad.shape == grad_from_gt.shape assert np.allclose(grad, grad_from_gt) # write a test for jvp @pytest.mark.parametrize( "x_", [[[[4.6, 2.1, 5], [2.8, 1.3, 6.2]], [[4.6, 2.1], [5, 2.8], [1.3, 6.2]]]] ) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("func_str", ["square", "cos"]) @pytest.mark.parametrize( "nested_structs", ["nested_input_nested_output", "nested_input_flat_output", "none"] ) def test_jvp(x_, dtype, func_str, backend_fw, nested_structs): if backend_fw in ["numpy", "paddle", "mxnet"]: pytest.skip() with BackendHandler.update_backend(backend_fw) as ivy_backend: base_func = ivy_backend.__dict__[func_str] if nested_structs == "none": primals, tangents = _get_primals_and_tangents( x_, dtype, ivy_backend, False, False ) func = base_func elif nested_structs == "nested_input_nested_output": primals, tangents = _get_primals_and_tangents( x_, dtype, ivy_backend, True, True ) func = base_func elif nested_structs == "nested_input_flat_output": primals, tangents = _get_primals_and_tangents( x_, dtype, ivy_backend, True, True ) def func(x): return base_func(x["l"]["a"]) + base_func(x["l"]["b"]) primals = (primals,) tangents = (tangents,) primals_out, jvp = ivy_backend.jvp(func, primals, tangents) flat_primals_np = helpers.flatten_and_to_np(ret=primals_out, backend=backend_fw) jvp_np = helpers.flatten_and_to_np(ret=jvp, backend=backend_fw) assert jvp_np != [] with BackendHandler.update_backend("jax") as gt_backend: base_func = gt_backend.__dict__[func_str] if nested_structs == "none": primals, tangents = _get_primals_and_tangents( x_, dtype, gt_backend, False, False ) func = base_func elif nested_structs == "nested_input_nested_output": primals, tangents = _get_primals_and_tangents( x_, dtype, gt_backend, True, True ) func = base_func elif nested_structs == "nested_input_flat_output": primals, tangents = _get_primals_and_tangents( x_, dtype, gt_backend, True, True ) def func(x): return base_func(x["l"]["a"]) + base_func(x["l"]["b"]) # func = base_func primals = (primals,) tangents = (tangents,) primals_out_gt, jvp = gt_backend.jvp(func, primals, tangents) flat_primals_np_from_gt = helpers.flatten_and_to_np( ret=primals_out_gt, backend="jax" ) jvp_np_from_gt = helpers.flatten_and_to_np(ret=jvp, backend="jax") assert jvp_np_from_gt != [] assert np.allclose(flat_primals_np, flat_primals_np_from_gt) assert np.allclose(jvp_np, jvp_np_from_gt) # write a test for vjp @pytest.mark.parametrize( "x_", [[[[4.6, 2.1, 5], [2.8, 1.3, 6.2]], [[4.6, 2.1], [5, 2.8], [1.3, 6.2]]]] ) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("func_str", ["square", "cos"]) @pytest.mark.parametrize( "nested_structs", ["nested_input_nested_output", "nested_input_flat_output", "none"] ) def test_vjp(x_, dtype, func_str, backend_fw, nested_structs): if backend_fw == "numpy": pytest.skip() with BackendHandler.update_backend(backend_fw) as ivy_backend: base_func = ivy_backend.__dict__[func_str] if nested_structs == "none": primals, tangents = _get_primals_and_tangents( x_, dtype, ivy_backend, False, False ) func = base_func elif nested_structs == "nested_input_nested_output": primals, tangents = _get_primals_and_tangents( x_, dtype, ivy_backend, True, True ) func = base_func elif nested_structs == "nested_input_flat_output": primals, tangents = _get_primals_and_tangents( x_, dtype, ivy_backend, True, False ) def func(x): return base_func(x["l"]["a"]) + base_func(x["l"]["b"]) primals = (primals,) tangents = (tangents,) primals_out, vjp_fn = ivy_backend.vjp(func, *primals) vjp = vjp_fn(*tangents) flat_primals_np = helpers.flatten_and_to_np(ret=primals_out, backend=backend_fw) vjp_np = helpers.flatten_and_to_np(ret=vjp, backend=backend_fw) assert vjp_np != [] with BackendHandler.update_backend("jax") as gt_backend: base_func = gt_backend.__dict__[func_str] if nested_structs == "none": primals, tangents = _get_primals_and_tangents( x_, dtype, gt_backend, False, False ) func = base_func elif nested_structs == "nested_input_nested_output": primals, tangents = _get_primals_and_tangents( x_, dtype, gt_backend, True, True ) func = base_func elif nested_structs == "nested_input_flat_output": primals, tangents = _get_primals_and_tangents( x_, dtype, gt_backend, True, False ) def func(x): return base_func(x["l"]["a"]) + base_func(x["l"]["b"]) primals = (primals,) tangents = (tangents,) primals_out_gt, vjp_fn = gt_backend.vjp(func, *primals) vjp = vjp_fn(*tangents) flat_primals_np_from_gt = helpers.flatten_and_to_np( ret=primals_out_gt, backend="jax" ) vjp_np_from_gt = helpers.flatten_and_to_np(ret=vjp, backend="jax") assert vjp_np_from_gt != [] assert np.allclose(flat_primals_np, flat_primals_np_from_gt) assert np.allclose(vjp_np, vjp_np_from_gt)
ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_gradients.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_gradients.py", "repo_id": "ivy", "token_count": 4713 }
66
# global from hypothesis import strategies as st import numpy as np # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_test # --- Helpers --- # # --------------- # @st.composite def _hinge_embedding_loss_input( draw, min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10 ): # determine the shape for both arrays (input and target) shape = draw( st.shared( helpers.get_shape( min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ), key="shared_shape", ) ) # Generate an array of -1 and 1 with the given shape (target_array) def _arrays_of_neg1_and_1(shape): value_strategy = st.sampled_from([-1, 1]) prod_shape = int(np.prod(shape)) # Convert np.int64 to int array_data = draw( st.lists(value_strategy, min_size=prod_shape, max_size=prod_shape) ) return np.asarray(array_data).reshape(shape) # input_array dtype, xx = draw( helpers.dtype_and_values( shape=shape, available_dtypes=helpers.get_dtypes("valid"), safety_factor_scale="linear", large_abs_safety_factor=2, small_abs_safety_factor=2, min_value=1, max_value=10, min_dim_size=1, min_num_dims=1, max_num_dims=5, max_dim_size=5, ) ) # generate the target array 'yy' containing either 1 or -1 yy = _arrays_of_neg1_and_1(shape=shape) return dtype, xx, yy # --- Main --- # # ------------ # # hinge_embedding_loss @handle_test( fn_tree="functional.ivy.experimental.hinge_embedding_loss", dtype_and_inputs=_hinge_embedding_loss_input(), margin=st.floats(min_value=1, max_value=5), reduction=st.sampled_from(["none", "sum", "mean"]), test_gradients=st.just( False ), # Gradients are failing for "jax" and "paddle" backend. test_with_out=st.just(False), ground_truth_backend="torch", ) def test_hinge_embedding_loss( dtype_and_inputs, margin, reduction, test_flags, backend_fw, fn_name, on_device, ): dtype, xx, yy = dtype_and_inputs helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, input=xx[0], target=yy, margin=margin, reduction=reduction, rtol_=1e-05, atol_=1e-05, ) # huber_loss @handle_test( fn_tree="functional.ivy.experimental.huber_loss", dtype_and_true=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-10, max_value=10, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), dtype_and_pred=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-10, max_value=10, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), reduction=st.sampled_from(["none", "sum", "mean"]), delta=helpers.floats(min_value=0.01, max_value=2.0), ) def test_huber_loss( dtype_and_true, dtype_and_pred, reduction, delta, test_flags, backend_fw, fn_name, on_device, ): true_dtype, true = dtype_and_true pred_dtype, pred = dtype_and_pred helpers.test_function( input_dtypes=true_dtype + pred_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, true=true[0], pred=pred[0], reduction=reduction, delta=delta, ) # kl_div @handle_test( fn_tree="functional.ivy.experimental.kl_div", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1e-04, max_value=1, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), dtype_and_target=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1e-04, max_value=1, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), reduction=st.sampled_from(["none", "sum", "batchmean", "mean"]), log_target=st.booleans(), ) def test_kl_div( dtype_and_input, dtype_and_target, reduction, log_target, test_flags, backend_fw, fn_name, on_device, ): input_dtype, input = dtype_and_input input[0] = np.log(input[0]) target_dtype, target = dtype_and_target if log_target: target[0] = np.log(target[0]) helpers.test_function( input_dtypes=input_dtype + target_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, atol_=1e-02, input=input[0], target=target[0], reduction=reduction, log_target=log_target, ) @handle_test( fn_tree="functional.ivy.experimental.l1_loss", dtype_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1, max_value=100, allow_inf=False, ), dtype_target=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1, max_value=100, allow_inf=False, ), reduction=st.sampled_from(["sum", "mean", "none"]), ) def test_l1_loss( *, dtype_input, dtype_target, reduction, test_flags, backend_fw, fn_name, on_device, ): dtype_input, input = dtype_input dtype_target, target = dtype_target helpers.test_function( input_dtypes=dtype_input + dtype_target, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, atol_=1e-02, input=input[0], target=target[0], reduction=reduction, ) # log_poisson_loss @handle_test( fn_tree="functional.ivy.experimental.log_poisson_loss", dtype_and_targets=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=3, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), dtype_and_log_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), small_abs_safety_factor=4, safety_factor_scale="log", min_value=0, max_value=3, allow_inf=False, exclude_min=True, exclude_max=True, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), compute_full_loss=st.sampled_from([True, False]), test_with_out=st.just(False), ) def test_log_poisson_loss( *, dtype_and_targets, dtype_and_log_input, compute_full_loss, test_flags, backend_fw, fn_name, on_device, ): targets_dtype, targets = dtype_and_targets log_input_dtype, log_input = dtype_and_log_input helpers.test_function( input_dtypes=targets_dtype + log_input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, targets=targets[0], log_input=log_input[0], compute_full_loss=compute_full_loss, atol_=1e-2, ) # poisson_nll_loss @handle_test( fn_tree="functional.ivy.experimental.poisson_nll_loss", dtype_input_target=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_dim_size=1, min_num_dims=1, min_value=0, max_value=100, num_arrays=2, shared_dtype=True, ), log_input=st.booleans(), full=st.booleans(), epsilon=st.sampled_from([1e-8, 1e-5, 1e-3]), reduction=st.sampled_from(["none", "sum", "mean"]), test_with_out=st.just(False), test_gradients=st.just( False ), # value_test are failing if this is set to `True` # noqa ground_truth_backend="torch", ) def test_poisson_nll_loss( dtype_input_target, log_input, full, epsilon, reduction, test_flags, backend_fw, fn_name, on_device, ): dtype, inputs = dtype_input_target helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, input=inputs[0], target=inputs[1], log_input=log_input, full=full, eps=epsilon, reduction=reduction, rtol_=1e-05, atol_=1e-05, ) # smooth_l1_loss # all loss functions failing for paddle backend due to # "There is no grad op for inputs:[0] or it's stop_gradient=True." @handle_test( fn_tree="functional.ivy.experimental.smooth_l1_loss", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-10.0, max_value=10.0, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), dtype_and_target=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-10.0, max_value=10.0, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), beta=helpers.floats(min_value=0.0, max_value=1.0), reduction=st.sampled_from(["none", "sum", "mean"]), ground_truth_backend="torch", ) def test_smooth_l1_loss( dtype_and_input, dtype_and_target, beta, reduction, test_flags, backend_fw, fn_name, on_device, ): dtype_input, input = dtype_and_input dtype_target, target = dtype_and_target helpers.test_function( input_dtypes=dtype_input + dtype_target, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, input=input[0], target=target[0], beta=beta, reduction=reduction, ) # soft_margin_loss @handle_test( fn_tree="functional.ivy.experimental.soft_margin_loss", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1e-04, max_value=1, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), dtype_and_target=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1e-04, max_value=1, allow_inf=False, min_num_dims=1, max_num_dims=3, min_dim_size=3, ), reduction=st.sampled_from(["none", "sum", "mean"]), ) def test_soft_margin_loss( dtype_and_input, dtype_and_target, reduction, test_flags, backend_fw, fn_name, on_device, ): input_dtype, input = dtype_and_input target_dtype, target = dtype_and_target helpers.test_function( input_dtypes=input_dtype + target_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-02, atol_=1e-02, pred=input[0], target=target[0], reduction=reduction, )
ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_losses.py", "repo_id": "ivy", "token_count": 5891 }
67
import ivy import numpy as np import pytest @pytest.mark.parametrize( ("shape", "rank"), [ ( (3, 4, 5), 4, ) ], ) def test_cp_flip_sign(shape, rank): cp_tensor = ivy.random_cp(shape, rank) weights, factors = ivy.CPTensor.cp_flip_sign(cp_tensor) assert ivy.all(ivy.mean(factors[1], axis=0) > 0) assert ivy.all(ivy.mean(factors[2], axis=0) > 0) assert cp_tensor.rank == cp_tensor.rank assert np.allclose(cp_tensor.weights, weights) assert np.allclose( ivy.CPTensor.cp_to_tensor((weights, factors)), ivy.CPTensor.cp_to_tensor(cp_tensor), ) @pytest.mark.parametrize( ("shape", "rank"), [ ( (8, 5, 6, 4), 25, ) ], ) def test_cp_lstsq_grad(shape, rank): """Validate the gradient calculation between a CP and dense tensor.""" cp_tensor = ivy.random_cp(shape, rank, normalise_factors=False) # If we're taking the gradient of comparison with self it should be 0 cp_grad = ivy.CPTensor.cp_lstsq_grad( cp_tensor, ivy.CPTensor.cp_to_tensor(cp_tensor) ) assert ivy.CPTensor.cp_norm(cp_grad) <= 10e-5 # Check that we can solve for a direction of descent dense = ivy.random_cp(shape, rank, full=True, normalise_factors=False) cost_before = ivy.sqrt( ivy.sum(ivy.square(ivy.CPTensor.cp_to_tensor(cp_tensor) - dense)) ) cp_grad = ivy.CPTensor.cp_lstsq_grad(cp_tensor, dense) cp_new = ivy.CPTensor(cp_tensor) for ii in range(len(shape)): cp_new.factors[ii] = cp_tensor.factors[ii] - 1e-3 * cp_grad.factors[ii] cost_after = ivy.sqrt( ivy.sum(ivy.square(ivy.CPTensor.cp_to_tensor(cp_new) - dense)) ) assert cost_before > cost_after @pytest.mark.parametrize( ("shape", "rank"), [ ( (5, 4, 6), 3, ) ], ) def test_cp_mode_dot(shape, rank): cp_ten = ivy.random_cp(shape, rank, orthogonal=True, full=False) full_tensor = ivy.CPTensor.cp_to_tensor(cp_ten) # matrix for mode 1 matrix = ivy.random_uniform(shape=(7, shape[1])) # vec for mode 2 vec = ivy.random_uniform(shape=shape[2]) # Test cp_mode_dot with matrix res = ivy.CPTensor.cp_mode_dot(cp_ten, matrix, mode=1, copy=True) # Note that if copy=True is not respected, factors will be changes # And the next test will fail res = ivy.CPTensor.cp_to_tensor(res) true_res = ivy.mode_dot(full_tensor, matrix, mode=1) assert np.allclose(true_res, res, atol=1e-3, rtol=1e-3) # Check that the data was indeed copied rec = ivy.CPTensor.cp_to_tensor(cp_ten) assert np.allclose(full_tensor, rec) # Test cp_mode_dot with vec res = ivy.CPTensor.cp_mode_dot(cp_ten, vec, mode=2, copy=True) res = ivy.CPTensor.cp_to_tensor(res) true_res = ivy.mode_dot(full_tensor, vec, mode=2) assert res.shape == true_res.shape assert np.allclose(true_res, res) @pytest.mark.parametrize( ("shape", "rank", "tol"), [ ( (8, 5, 6, 4), 25, 10e-5, ) ], ) def test_cp_norm(shape, rank, tol): cp_tensor = ivy.random_cp(shape, rank, full=False, normalise_factors=True) rec = ivy.CPTensor.cp_to_tensor(cp_tensor) true_res = ivy.sqrt(ivy.sum(ivy.square(rec))) res = ivy.CPTensor.cp_norm(cp_tensor) assert ivy.abs(true_res - res) <= tol # These tests have been adapetd from Tensorly # https://github.com/tensorly/tensorly/blob/main/tensorly/tests/test_cp_tensor.py @pytest.mark.parametrize( ("shape", "rank"), [ ( (3, 4, 5), 4, ) ], ) def test_cp_normalize(shape, rank): cp_tensor = ivy.random_cp(shape, rank) weights, factors = ivy.CPTensor.cp_normalize(cp_tensor) expected_norm = ivy.ones((rank,)) for f in factors: norm = ivy.sqrt(ivy.sum(ivy.square(f), axis=0)) assert np.allclose(norm, expected_norm) assert np.allclose( ivy.CPTensor.cp_to_tensor((weights, factors)), ivy.CPTensor.cp_to_tensor(cp_tensor), ) @pytest.mark.parametrize( ("shapeU1", "shapeU2", "shapeU3", "shapeU4", "true_res", "columns", "rows"), [ ( (3, 3), (4, 3), (2, 3), (2, 3), [ [ [[46754.0, 51524.0], [52748.0, 58130.0]], [[59084.0, 65114.0], [66662.0, 73466.0]], [[71414.0, 78704.0], [80576.0, 88802.0]], [[83744.0, 92294.0], [94490.0, 104138.0]], ], [ [[113165.0, 124784.0], [127790.0, 140912.0]], [[143522.0, 158264.0], [162080.0, 178730.0]], [[173879.0, 191744.0], [196370.0, 216548.0]], [[204236.0, 225224.0], [230660.0, 254366.0]], ], [ [[179576.0, 198044.0], [202832.0, 223694.0]], [[227960.0, 251414.0], [257498.0, 283994.0]], [[276344.0, 304784.0], [312164.0, 344294.0]], [[324728.0, 358154.0], [366830.0, 404594.0]], ], ], 4, (3, 4, 2), ) ], ) def test_cp_to_tensor(shapeU1, shapeU2, shapeU3, shapeU4, true_res, columns, rows): U1 = ivy.reshape(ivy.arange(1, 10, dtype=float), shapeU1) U2 = ivy.reshape(ivy.arange(10, 22, dtype=float), shapeU2) U3 = ivy.reshape(ivy.arange(22, 28, dtype=float), shapeU3) U4 = ivy.reshape(ivy.arange(28, 34, dtype=float), shapeU4) U = [ivy.array(t) for t in [U1, U2, U3, U4]] true_res = ivy.array(true_res) res = ivy.CPTensor.cp_to_tensor((ivy.ones(shape=(3,)), U)) assert np.allclose(res, true_res) matrices = [ ivy.arange(k * columns, dtype=float).reshape((k, columns)) for k in rows ] tensor = ivy.CPTensor.cp_to_tensor((ivy.ones(shape=(columns,)), matrices)) for i in range(len(rows)): unfolded = ivy.unfold(tensor, mode=i) U_i = matrices.pop(i) reconstructed = ivy.matmul( U_i, ivy.permute_dims(ivy.khatri_rao(matrices), (1, 0)) ) assert np.allclose(reconstructed, unfolded) matrices.insert(i, U_i) @pytest.mark.parametrize(("shape", "expected"), [((2, 2), [[-2, -2], [6, 10]])]) def test_cp_to_tensor_with_weights(shape, expected): A = ivy.reshape(ivy.arange(1, 5, dtype=float), shape) B = ivy.reshape(ivy.arange(5, 9, dtype=float), shape) weights = ivy.array([2, -1], dtype=A.dtype) out = ivy.CPTensor.cp_to_tensor((weights, [A, B])) expected = ivy.array(expected) # computed by hand assert np.allclose(out, expected) (weights, factors) = ivy.random_cp((5, 5, 5), 5, normalise_factors=True, full=False) true_res = ivy.matmul( ivy.matmul(factors[0], ivy.diag(weights)), ivy.permute_dims(ivy.khatri_rao(factors[1:]), (1, 0)), ) true_res = ivy.fold(true_res, 0, (5, 5, 5)) res = ivy.CPTensor.cp_to_tensor((weights, factors)) assert np.allclose(true_res, res) @pytest.mark.parametrize( ("shapeU1", "shapeU2", "shapeU3", "shapeU4"), [((3, 3), (4, 3), (2, 3), (2, 3))] ) def test_cp_to_unfolded(shapeU1, shapeU2, shapeU3, shapeU4): U1 = ivy.reshape(ivy.arange(1, 10, dtype=float), shapeU1) U2 = ivy.reshape(ivy.arange(10, 22, dtype=float), shapeU2) U3 = ivy.reshape(ivy.arange(22, 28, dtype=float), shapeU3) U4 = ivy.reshape(ivy.arange(28, 34, dtype=float), shapeU4) U = [ivy.array(t) for t in [U1, U2, U3, U4]] cp_tensor = ivy.CPTensor((ivy.ones((3,)), U)) full_tensor = ivy.CPTensor.cp_to_tensor(cp_tensor) for mode in range(4): true_res = ivy.unfold(full_tensor, mode) res = ivy.CPTensor.cp_to_unfolded(cp_tensor, mode) assert np.allclose( true_res, res, ) @pytest.mark.parametrize( ("shapeU1", "shapeU2", "shapeU3", "shapeU4"), [((3, 3), (4, 3), (2, 3), (2, 3))] ) def test_cp_to_vec(shapeU1, shapeU2, shapeU3, shapeU4): """Test for cp_to_vec.""" U1 = np.reshape(np.arange(1, 10, dtype=float), shapeU1) U2 = np.reshape(np.arange(10, 22, dtype=float), shapeU2) U3 = np.reshape(np.arange(22, 28, dtype=float), shapeU3) U4 = np.reshape(np.arange(28, 34, dtype=float), shapeU4) U = [ivy.array(t) for t in [U1, U2, U3, U4]] cp_tensor = ivy.CPTensor( ( ivy.ones( (3), ), U, ) ) full_tensor = ivy.CPTensor.cp_to_tensor(cp_tensor) true_res = ivy.reshape(full_tensor, (-1)) res = ivy.CPTensor.cp_to_vec(cp_tensor) assert np.allclose(true_res, res) @pytest.mark.parametrize( ("shape", "rank"), [ ( (10, 10, 10, 4), 5, ) ], ) def test_unfolding_dot_khatri_rao(shape, rank): tensor = ivy.random_uniform(shape=shape) weights, factors = ivy.random_cp(shape, rank, full=False, normalise_factors=True) for mode in range(4): # Version forming explicitly the khatri-rao product unfolded = ivy.unfold(tensor, mode) kr_factors = ivy.khatri_rao(factors, weights=weights, skip_matrix=mode) true_res = ivy.matmul(unfolded, kr_factors) # Efficient sparse-safe version res = ivy.CPTensor.unfolding_dot_khatri_rao(tensor, (weights, factors), mode) assert np.allclose(true_res, res) @pytest.mark.parametrize("size", [4]) def test_validate_cp_rank(size): tensor_shape = tuple(ivy.randint(1, 100, shape=(size,))) n_param_tensor = ivy.prod(tensor_shape) # Rounding = floor rank = ivy.CPTensor.validate_cp_rank(tensor_shape, rank="same", rounding="floor") n_param = ivy.CPTensor.cp_n_param(tensor_shape, rank) assert n_param <= n_param_tensor # Rounding = ceil rank = ivy.CPTensor.validate_cp_rank(tensor_shape, rank="same", rounding="ceil") n_param = ivy.CPTensor.cp_n_param(tensor_shape, rank) assert n_param >= n_param_tensor @pytest.mark.parametrize( ("true_shape", "true_rank"), [ ( (3, 4, 5), 3, ) ], ) def test_validate_cp_tensor(true_shape, true_rank): cp_tensor = ivy.random_cp(true_shape, true_rank) (weights, factors) = ivy.CPTensor.cp_normalize(cp_tensor) # Check correct rank and shapes are returned shape, rank = ivy.CPTensor.validate_cp_tensor((weights, factors)) np.testing.assert_equal( true_shape, shape, err_msg=f"Returned incorrect shape (got {shape}, expected {true_shape})", ) np.testing.assert_equal( rank, true_rank, err_msg=f"Returned incorrect rank (got {rank}, expected {true_rank})", ) # One of the factors has the wrong rank factors[0], copy = ivy.random_uniform(shape=(4, 4)), factors[0] with np.testing.assert_raises(ValueError): ivy.CPTensor.validate_cp_tensor((weights, factors)) # Not the correct amount of weights factors[0] = copy wrong_weights = weights[1:] with np.testing.assert_raises(ValueError): ivy.CPTensor.validate_cp_tensor((wrong_weights, factors)) # Not enough factors with np.testing.assert_raises(ValueError): ivy.CPTensor.validate_cp_tensor((weights[:1], factors[:1]))
ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_cp_tensor.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_misc/test_factorized_tensor/test_cp_tensor.py", "repo_id": "ivy", "token_count": 5767 }
68
"""Collection of tests for unified neural network layers.""" # global import numpy as np from hypothesis import assume from hypothesis import strategies as st # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy.data_classes.container import Container from ivy.functional.ivy.gradients import _variable from ivy.functional.ivy.layers import _deconv_length from ivy_tests.test_ivy.helpers import handle_method from ivy_tests.test_ivy.helpers.assertions import assert_same_type_and_shape from ivy_tests.test_ivy.test_functional.test_experimental.test_nn import ( test_layers as exp_layers_tests, ) from ivy_tests.test_ivy.test_functional.test_experimental.test_nn.test_layers import ( _valid_dct, ) all_constant_initializers = (ivy.Zeros, ivy.Ones) all_gaussian_initializers = (ivy.KaimingNormal, ivy.Siren) all_uniform_initializers = (ivy.GlorotUniform, ivy.FirstLayerSiren, ivy.Siren) all_initializers = ( all_constant_initializers + all_uniform_initializers + all_gaussian_initializers ) # --- Helpers --- # # --------------- # # Linear # # -------# @st.composite def _bias_flag_and_initializer(draw): with_bias = draw(st.booleans()) if with_bias: return with_bias, draw(_sample_initializer()) return with_bias, None # Embedding @st.composite def _get_embedding_args(draw): num_embeddings = draw(st.integers(min_value=1, max_value=10)) embedding_dim = draw(st.integers(min_value=1, max_value=10)) dtype_indices, indices = draw( helpers.dtype_and_values( available_dtypes=["int32", "int64"], min_num_dims=2, min_dim_size=1, min_value=0, max_value=num_embeddings - 1, ).filter(lambda x: x[1][0].shape[-1] == embedding_dim) ) padding_idx = draw(st.integers(min_value=0, max_value=num_embeddings - 1)) max_norm = draw(st.one_of(st.none(), st.floats(min_value=1, max_value=5))) return ( num_embeddings, embedding_dim, dtype_indices, indices, padding_idx, max_norm, ) @st.composite def _input_channels_and_dtype_and_values(draw): input_channels = draw(st.integers(min_value=1, max_value=2)) x_shape = draw(helpers.get_shape()) + (input_channels,) dtype, vals = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=x_shape, min_value=0, max_value=1, small_abs_safety_factor=4, safety_factor_scale="log", ) ) return input_channels, dtype, vals # LSTM @st.composite def _input_channels_and_dtype_and_values_lstm(draw): input_channels = draw(st.integers(min_value=1, max_value=10)) t = draw(st.integers(min_value=1, max_value=3)) x_shape = draw(helpers.get_shape()) + (t, input_channels) dtype, vals = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float", full=True), shape=x_shape ) ) return input_channels, dtype, vals @st.composite def _sample_initializer(draw): return draw(st.sampled_from(all_initializers))() # Attention # # ----------# @st.composite def _x_and_mha(draw): dtype = draw( helpers.get_dtypes("float", full=False).filter(lambda x: x != ["float16"]) ) with_to_q_fn = draw(st.booleans()) with_to_kv_fn = draw(st.booleans()) with_to_out_fn = draw(st.booleans()) query_dim = draw(st.integers(min_value=1, max_value=3)) num_heads = draw(st.integers(min_value=1, max_value=3)) head_dim = draw(st.integers(min_value=1, max_value=3)) dropout_rate = draw(st.floats(min_value=0.0, max_value=0.9)) context_dim = draw(st.integers(min_value=1, max_value=3)) scale = draw(st.integers(min_value=1, max_value=3)) num_queries = draw(st.integers(min_value=1, max_value=3)) # x_feats = draw(st.integers(min_value=1, max_value=3)) # cont_feats = draw(st.integers(min_value=1, max_value=3)) num_keys = draw(st.integers(min_value=1, max_value=3)) if with_to_q_fn: inputs_shape = (num_queries, query_dim) else: inputs_shape = (num_queries, num_heads * head_dim) if with_to_kv_fn: context_shape = (num_keys, context_dim) else: context_shape = (num_keys, num_heads * head_dim * 2) mask_shape = (num_queries, num_keys) x_mha = draw( helpers.array_values( dtype=dtype[0], shape=inputs_shape, min_value=0.0999755859375, max_value=1, ) ) context = draw( helpers.array_values( dtype=dtype[0], shape=context_shape, min_value=0.0999755859375, max_value=1, ) ) mask = draw( helpers.array_values( dtype=dtype[0], shape=mask_shape, min_value=0.0999755859375, max_value=1, ) ) return ( dtype, x_mha, scale, num_heads, context, mask, query_dim, head_dim, dropout_rate, context_dim, with_to_q_fn, with_to_kv_fn, with_to_out_fn, ) # Convolutions # # -------------# @st.composite def _x_ic_oc_f_d_df(draw, dim: int = 2, transpose: bool = False, depthwise=False): strides = draw(st.integers(min_value=1, max_value=3)) padding = draw(st.sampled_from(["SAME", "VALID"])) batch_size = draw(st.integers(1, 1)) filter_shape = draw( helpers.get_shape( min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5 ) ) input_channels = draw(st.integers(1, 3)) output_channels = draw(st.integers(1, 3)) dilations = 1 x_dim = [] for i in range(dim): min_x = filter_shape[i] + (filter_shape[i] - 1) * (dilations - 1) x_dim.append(draw(st.integers(min_x, 20))) if dim == 2: data_format = draw(st.sampled_from(["NCHW"])) elif dim == 1: data_format = draw(st.sampled_from(["NWC", "NCW"])) else: data_format = draw(st.sampled_from(["NDHWC", "NCDHW"])) if data_format in ["NHWC", "NWC", "NDHWC"]: x_shape = [batch_size] + x_dim + [input_channels] else: x_shape = [batch_size] + [input_channels] + x_dim if transpose: output_shape = [] for i in range(dim): output_shape.append( _deconv_length(x_dim[i], strides, filter_shape[i], padding, dilations) ) filter_shape = list(filter_shape) if dim == 1: filter_shape = filter_shape[0] dtype, vals = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float", full=True), shape=x_shape, min_value=0, max_value=1, ).filter(lambda x: x[0] != ["float16"]) ) if transpose: return ( dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, output_shape, ) return ( dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, ) # AdaptiveAveragePool2d @st.composite def array_for_adaptive( draw, num_dims=3, max_dim_size=8, min_dim_size=3, num_out_size=2, ): dtypes, arrays = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=num_dims, max_num_dims=num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ) ) size = draw( helpers.list_of_size( x=helpers.ints(min_value=3, max_value=5), size=num_out_size, ) ) output_size = size[0] if num_out_size == 1 else size data_format = draw(st.sampled_from(["NCHW", "NHWC"])) return dtypes, arrays, output_size, data_format # --- Main --- # # ------------ # @handle_method( method_tree="AdaptiveAvgPool1d.__call__", dt_arr_size=array_for_adaptive(max_dim_size=3, min_dim_size=2, num_out_size=1), ) def test_adaptive_avg_pool1d_layer( *, dt_arr_size, test_gradients, on_device, class_name, method_name, ground_truth_backend, init_flags, method_flags, backend_fw, ): input_dtype, x, out_size, _ = dt_arr_size helpers.test_method( ground_truth_backend=ground_truth_backend, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "output_size": out_size, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) @handle_method( method_tree="AdaptiveAvgPool2d.__call__", dt_arr_size=array_for_adaptive(), ) def test_adaptive_avg_pool2d_layer( *, dt_arr_size, test_gradients, on_device, class_name, method_name, ground_truth_backend, init_flags, method_flags, backend_fw, ): input_dtype, x, out_size, data_format = dt_arr_size helpers.test_method( ground_truth_backend=ground_truth_backend, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "output_size": out_size, "data_format": data_format, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # AvgPool1D @handle_method( method_tree="AvgPool1D.__call__", x_k_s_p=helpers.arrays_for_pooling(min_dims=3, max_dims=3, min_side=1, max_side=4), ) def test_avgpool1d_layer( *, x_k_s_p, test_gradients, on_device, class_name, method_name, ground_truth_backend, init_flags, method_flags, backend_fw, ): input_dtype, x, kernel_size, stride, padding = x_k_s_p helpers.test_method( ground_truth_backend=ground_truth_backend, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "kernel_size": kernel_size, "stride": stride, "padding": padding, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # AvgPool2D @handle_method( method_tree="AvgPool2D.__call__", x_k_s_p=helpers.arrays_for_pooling(min_dims=4, max_dims=4, min_side=1, max_side=4), ) def test_avgpool2d_layer( *, x_k_s_p, test_gradients, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x, kernel_size, stride, padding = x_k_s_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "kernel_size": kernel_size, "stride": stride, "padding": padding, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # ToDo : Add gradient testing once random number generation is unified @handle_method( method_tree="AvgPool3D.__call__", x_k_s_p=helpers.arrays_for_pooling(min_dims=5, max_dims=5, min_side=1, max_side=4), count_include_pad=st.booleans(), ceil_mode=st.booleans(), divisor_override=st.one_of(st.none(), st.integers(min_value=1, max_value=4)), ) def test_avgpool3d_layer( *, x_k_s_p, count_include_pad, ceil_mode, divisor_override, test_gradients, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x, kernel_size, stride, padding = x_k_s_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "kernel_size": kernel_size, "stride": stride, "padding": padding, "count_include_pad": count_include_pad, "ceil_mode": ceil_mode, "divisor_override": divisor_override, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # conv1d @handle_method( method_tree="Conv1D.__call__", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(dim=1), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), ) def test_conv1d_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, ) = _x_ic_oc_f_s_d_df_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # conv1d transpose @handle_method( method_tree="Conv1DTranspose.__call__", ground_truth_backend="jax", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(dim=1, transpose=True), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), method_num_positional_args=helpers.num_positional_args( fn_name="Conv1DTranspose._forward" ), ) def test_conv1d_transpose_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, output_shape, ) = _x_ic_oc_f_s_d_df_p assume(backend_fw != "tensorflow" or on_device != "cpu" or dilations <= 1) helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "output_shape": output_shape, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # conv2d @handle_method( method_tree="Conv2D.__call__", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), ) def test_conv2d_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, ) = _x_ic_oc_f_s_d_df_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # # conv2d transpose @handle_method( method_tree="Conv2DTranspose.__call__", ground_truth_backend="jax", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(transpose=True), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), init_num_positional_args=helpers.num_positional_args( fn_name="Conv2DTranspose.__init__" ), method_num_positional_args=helpers.num_positional_args( fn_name="Conv2DTranspose._forward" ), ) def test_conv2d_transpose_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, output_shape, ) = _x_ic_oc_f_s_d_df_p assume(backend_fw != "tensorflow" or on_device != "cpu" or dilations <= 1) helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "output_shape": output_shape, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # conv3d @handle_method( method_tree="Conv3D.__call__", ground_truth_backend="jax", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(dim=3), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), ) def test_conv3d_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, ) = _x_ic_oc_f_s_d_df_p assume(backend_fw != "tensorflow" or on_device != "cpu" or dilations <= 1) helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # conv3d transpose @handle_method( method_tree="Conv3DTranspose.__call__", ground_truth_backend="jax", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(dim=3, transpose=True), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), init_num_positional_args=helpers.num_positional_args( fn_name="Conv3DTranspose.__init__" ), method_num_positional_args=helpers.num_positional_args( fn_name="Conv3DTranspose._forward" ), ) def test_conv3d_transpose_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, output_shape, ) = _x_ic_oc_f_s_d_df_p assume(backend_fw != "tensorflow" or on_device != "cpu" or dilations <= 1) helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "output_shape": output_shape, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) @handle_method( method_tree="Dct.__call__", dtype_x_and_args=_valid_dct(), ) def test_dct( *, dtype_x_and_args, test_gradients, on_device, class_name, method_name, ground_truth_backend, init_flags, method_flags, backend_fw, ): dtype, x, type, n, axis, norm = dtype_x_and_args helpers.test_method( ground_truth_backend=ground_truth_backend, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "dtype": dtype[0], "type": type, "n": n, "axis": axis, "norm": norm, "device": on_device, }, method_input_dtypes=dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # # depthwise conv2d @handle_method( method_tree="DepthwiseConv2D.__call__", ground_truth_backend="jax", _x_ic_oc_f_s_d_df_p=_x_ic_oc_f_d_df(depthwise=True), weight_initializer=_sample_initializer(), bias_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), method_num_positional_args=helpers.num_positional_args( fn_name="DepthwiseConv2D._forward" ), ) def test_depthwise_conv2d_layer( _x_ic_oc_f_s_d_df_p, weight_initializer, bias_initializer, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, vals, input_channels, output_channels, filter_shape, strides, dilations, data_format, padding, ) = _x_ic_oc_f_s_d_df_p assume(backend_fw != "tensorflow" or dilations <= 1 or strides <= 1) helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "num_channels": input_channels, "filter_shape": filter_shape, "strides": strides, "padding": padding, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "data_format": data_format, "dilations": dilations, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": vals[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # Dropout # # --------# # dropout @handle_method( method_tree="Dropout.__call__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=50, allow_inf=False, min_num_dims=1, max_num_dims=1, min_dim_size=2, ), prob=helpers.floats(min_value=0, max_value=0.9), scale=st.booleans(), ) def test_dropout_layer( *, dtype_and_x, prob, scale, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x = dtype_and_x ret = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "prob": prob, "scale": scale, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_values=False, on_device=on_device, ) ret = helpers.flatten_and_to_np(ret=ret, backend=backend_fw) for u in ret: # cardinality test assert u.shape == x[0].shape @handle_method( method_tree="Embedding.__call__", embedding_args=_get_embedding_args(), weight_initializer=_sample_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), seed=helpers.seed(), ) def test_embedding_layer( *, embedding_args, weight_initializer, init_with_v, method_with_v, seed, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ivy.seed(seed_value=seed) ( num_embeddings, embedding_dim, dtype_indices, indices, padding_idx, max_norm, ) = embedding_args dtype = dtype_indices helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "num_embeddings": num_embeddings, "embedding_dim": embedding_dim, "padding_idx": padding_idx, "max_norm": max_norm, "device": on_device, "dtype": dtype[0], }, method_all_as_kwargs_np={"indices": indices[0]}, method_input_dtypes=dtype, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) # FFT @handle_method( method_tree="FFT.__call__", x_and_fft=exp_layers_tests._x_and_fft(), ) def test_fft_layer( *, x_and_fft, test_gradients, on_device, class_name, method_name, ground_truth_backend, init_flags, method_flags, backend_fw, ): dtype, x, dim, norm, n = x_and_fft helpers.test_method( ground_truth_backend=ground_truth_backend, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "dim": dim, "norm": norm, "n": n, "device": on_device, "dtype": dtype[0], }, method_input_dtypes=dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # Identity @handle_method( method_tree="Identity.__call__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, max_num_dims=5, ), init_with_v=st.booleans(), method_with_v=st.booleans(), ) def test_identity_layer( *, dtype_and_x, init_with_v, method_with_v, test_gradients, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x = dtype_and_x helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "device": on_device, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-03, atol_=1e-03, test_gradients=test_gradients, on_device=on_device, ) # IFFT @handle_method( method_tree="IFFT.__call__", x_and_ifft=exp_layers_tests._x_and_ifft(), ) def test_ifft_layer( *, x_and_ifft, test_gradients, on_device, class_name, method_name, ground_truth_backend, init_flags, method_flags, backend_fw, ): dtype, x, dim, norm, n = x_and_ifft helpers.test_method( ground_truth_backend=ground_truth_backend, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "dim": dim, "norm": norm, "n": n, "device": on_device, "dtype": dtype[0], }, method_input_dtypes=dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # linear @handle_method( method_tree="Linear.__call__", ic_n_dtype_n_vals=_input_channels_and_dtype_and_values(), output_channels=st.shared( st.integers(min_value=1, max_value=2), key="output_channels" ), weight_initializer=_sample_initializer(), wb_n_b_init=_bias_flag_and_initializer(), init_with_v=st.booleans(), method_with_v=st.booleans(), seed=helpers.seed(), ) def test_linear_layer( *, ic_n_dtype_n_vals, output_channels, weight_initializer, wb_n_b_init, init_with_v, method_with_v, seed, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ivy.seed(seed_value=seed) input_channels, input_dtype, x = ic_n_dtype_n_vals with_bias, bias_initializer = wb_n_b_init helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "weight_initializer": weight_initializer, "bias_initializer": bias_initializer, "with_bias": with_bias, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-02, atol_=1e-02, on_device=on_device, ) @handle_method( method_tree="LSTM.__call__", input_dtype_val=_input_channels_and_dtype_and_values_lstm(), output_channels=st.shared( st.integers(min_value=1, max_value=10), key="output_channels" ), weight_initializer=_sample_initializer(), num_layers=st.integers(min_value=1, max_value=3), return_sequence=st.booleans(), return_state=st.booleans(), init_with_v=st.booleans(), method_with_v=st.booleans(), ) def test_lstm_layer( input_dtype_val, output_channels, weight_initializer, num_layers, return_sequence, return_state, init_with_v, method_with_v, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_channels, input_dtype, vals = input_dtype_val return_sequence = return_sequence return_state = return_state helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "input_channels": input_channels, "output_channels": output_channels, "weight_initializer": weight_initializer, "num_layers": num_layers, "return_sequence": return_sequence, "return_state": return_state, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": np.asarray(vals[0], dtype=input_dtype[0])}, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-01, atol_=1e-01, on_device=on_device, ) # MaxPool1D @handle_method( method_tree="MaxPool1D.__call__", x_k_s_p=helpers.arrays_for_pooling(min_dims=3, max_dims=3, min_side=1, max_side=4), ) def test_maxpool1d_layer( *, x_k_s_p, test_gradients, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x, kernel_size, stride, padding = x_k_s_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "kernel_size": kernel_size, "stride": stride, "padding": padding, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # # Pooling # # MaxPool2D @handle_method( method_tree="MaxPool2D.__call__", x_k_s_p=helpers.arrays_for_pooling(min_dims=4, max_dims=4, min_side=1, max_side=4), ) def test_maxpool2d_layer( *, x_k_s_p, test_gradients, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x, kernel_size, stride, padding = x_k_s_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "kernel_size": kernel_size, "stride": stride, "padding": padding, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"inputs": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # MaxPool3D @handle_method( method_tree="MaxPool3D.__call__", x_k_s_p=helpers.arrays_for_pooling(min_dims=5, max_dims=5, min_side=1, max_side=4), ) def test_maxpool3d_layer( *, x_k_s_p, test_gradients, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): input_dtype, x, kernel_size, stride, padding = x_k_s_p helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "kernel_size": kernel_size, "stride": stride, "padding": padding, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"x": x[0]}, class_name=class_name, method_name=method_name, test_gradients=test_gradients, on_device=on_device, ) # multi_head_attention @handle_method( method_tree="MultiHeadAttention.__call__", dtype_mha=_x_and_mha(), init_with_v=st.booleans(), method_with_v=st.booleans(), method_num_positional_args=helpers.num_positional_args( fn_name="MultiHeadAttention._forward" ), build_mode=st.just("on_init"), ) def test_multi_head_attention_layer( dtype_mha, init_with_v, method_with_v, build_mode, on_device, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, ): ( input_dtype, x_mha, scale, num_heads, context, mask, query_dim, head_dim, dropout_rate, context_dim, with_to_q_fn, with_to_kv_fn, with_to_out_fn, ) = dtype_mha ret_np_flat, ret_np_from_gt_flat = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_all_as_kwargs_np={ "query_dim": query_dim, "num_heads": num_heads, "head_dim": head_dim, "dropout_rate": dropout_rate, "context_dim": context_dim, "with_to_q_fn": with_to_q_fn, "with_to_kv_fn": with_to_kv_fn, "with_to_out_fn": with_to_out_fn, "build_mode": build_mode, "device": on_device, "dtype": input_dtype[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "inputs": np.asarray(x_mha, dtype=input_dtype[0]), "context": np.asarray(context, dtype=input_dtype[0]), "mask": np.asarray(mask, dtype=input_dtype[0]), }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, rtol_=1e-2, atol_=1e-2, test_values=False, return_flat_np_arrays=True, on_device=on_device, ) assert_same_type_and_shape([ret_np_flat, ret_np_from_gt_flat]) # # Sequential # @handle_method( method_tree="Sequential.__call__", bs_c_target=st.sampled_from( [ ( [1, 2], 5, [ [ [-0.34784955, 0.47909835, 0.7241975, -0.82175905, -0.43836743], [-0.34784955, 0.47909835, 0.7241975, -0.82175905, -0.43836743], ] ], ) ] ), with_v=st.booleans(), seq_v=st.booleans(), dtype=helpers.get_dtypes("float", full=False), ) def test_sequential_layer( bs_c_target, with_v, seq_v, dtype, method_flags, on_device, backend_fw ): with ivy.utils.backend.ContextManager(backend_fw): dtype = dtype[0] if backend_fw == "torch": assume("float16" not in dtype) if backend_fw == "paddle": assume(dtype != "float16") # smoke test batch_shape, channels, target = bs_c_target tolerance_dict = { "bfloat16": 1e-1, "float16": 1e-2, "float32": 1e-2, "float64": 1e-2, } if method_flags.as_variable[0]: x = _variable( ivy.asarray( ivy.linspace( ivy.zeros(batch_shape), ivy.ones(batch_shape), channels ), dtype=dtype, ) ) else: x = ivy.asarray( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), channels), dtype=dtype, ) if with_v: np.random.seed(0) wlim = (6 / (channels + channels)) ** 0.5 v = Container( { "submodules": { "v0": { "w": _variable( ivy.array( np.random.uniform( -wlim, wlim, (channels, channels) ), dtype=dtype, device=on_device, ) ), "b": _variable( ivy.zeros([channels], device=on_device, dtype=dtype) ), }, "v2": { "w": _variable( ivy.array( np.random.uniform( -wlim, wlim, (channels, channels) ), dtype=dtype, device=on_device, ) ), "b": _variable( ivy.zeros([channels], device=on_device, dtype=dtype) ), }, } } ) else: v = None if seq_v: seq = ivy.Sequential( ivy.Linear(channels, channels, device=on_device, dtype=dtype), ivy.Dropout(0.0, dtype=dtype), ivy.Linear(channels, channels, device=on_device, dtype=dtype), device=on_device, v=v if with_v else None, dtype=dtype, ) else: seq = ivy.Sequential( ivy.Linear( channels, channels, device=on_device, v=v["submodules"]["v0"] if with_v else None, dtype=dtype, ), ivy.Dropout(0.0, dtype=dtype), ivy.Linear( channels, channels, device=on_device, v=v["submodules"]["v2"] if with_v else None, dtype=dtype, ), device=on_device, dtype=dtype, ) ret = seq(x) # type test assert ivy.is_ivy_array(ret) # cardinality test assert ret.shape == ivy.Shape(batch_shape + [channels]) # value test if not with_v: return assert np.allclose( ivy.to_numpy(seq(x)), np.array(target), rtol=tolerance_dict[dtype] )
ivy/ivy_tests/test_ivy/test_stateful/test_layers.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_layers.py", "repo_id": "ivy", "token_count": 24826 }
69
from dataclasses import dataclass @dataclass class BackendNativeObject: name: str namespace: str def full_name(self): if self.namespace == "": return self.name return f"{self.namespace}.{self.name}"
ivy/scripts/backend_generation/shared.py/0
{ "file_path": "ivy/scripts/backend_generation/shared.py", "repo_id": "ivy", "token_count": 101 }
70
submodules = ( "test_paddle", "test_tensorflow", "test_torch", "test_jax", "test_numpy", "test_functional", "test_experimental", "test_stateful", "test_misc", "test_scipy", "test_pandas", "test_mindspore", "test_onnx", "test_sklearn", "test_xgboost", ) db_dict = { "test_functional/test_core": ["core", 10], "test_experimental/test_core": ["exp_core", 11], "test_functional/test_nn": ["nn", 12], "test_experimental/test_nn": ["exp_nn", 13], "test_stateful": ["stateful", 14], "test_torch": ["torch", 15], "test_jax": ["jax", 16], "test_tensorflow": ["tensorflow", 17], "test_numpy": ["numpy", 18], "test_misc": ["misc", 19], "test_paddle": ["paddle", 20], "test_scipy": ["scipy", 21], "test_pandas": ["pandas", 22], "test_mindspore": ["mindspore", 23], "test_onnx": ["onnx", 24], "test_sklearn": ["sklearn", 25], "test_xgboost": ["xgboost", 26], } result_config = { "success": "https://img.shields.io/badge/-success-success", "failure": "https://img.shields.io/badge/-failure-red", } def make_clickable(url, name): return ( f'<a href="{url}" rel="noopener noreferrer" ' + f'target="_blank"><img src={name}></a>' ) def get_submodule(test_path): test_path = test_path.split("/") for name in submodules: if name in test_path: if name == "test_functional": if len(test_path) > 3 and test_path[3] == "test_experimental": coll = db_dict[f"test_experimental/{test_path[4]}"] else: coll = db_dict[f"test_functional/{test_path[-2]}"] else: coll = db_dict[name] break submod_test = test_path[-1] submod, test_fn = submod_test.split("::") submod = submod.replace("test_", "").replace(".py", "") return coll, submod, test_fn def update_individual_test_results( collection, id, submod, backend, test, result, backend_version=None, frontend_version=None, device=None, ): key = f"{submod}.{backend}" if backend_version is not None: backend_version = backend_version.replace(".", "_") key += f".{backend_version}" if frontend_version is not None: frontend_version = frontend_version.replace(".", "_") key += f".{frontend_version}" key += f".{test}" if device: key += f".{device}" collection.update_one( {"_id": id}, {"$set": {key: result}}, upsert=True, )
ivy/scripts/run_tests/old_run_test_helpers.py/0
{ "file_path": "ivy/scripts/run_tests/old_run_test_helpers.py", "repo_id": "ivy", "token_count": 1231 }
71
jq -c '.compiler[]' available_configs.json | while read config; do export TAG=${config:1:${#config}-2} export CLEAN=true python -m build python3 scripts/rename_wheels.py done python3 -m twine upload dist/* -u "__token__" -p "$PYPI_PASSWORD" --verbose
ivy/scripts/shell/deploy_pypi.sh/0
{ "file_path": "ivy/scripts/shell/deploy_pypi.sh", "repo_id": "ivy", "token_count": 106 }
72
# lint as: python3 # Copyright 2021 The Ivy Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.. # ============================================================================== __version__ = None import setuptools from setuptools import setup from pathlib import Path from urllib import request import os import json import re def _get_paths_from_binaries(binaries, root_dir=""): """Get all the paths from the binaries.json into a list.""" paths = [] ext = "pyd" if os.name == "nt" else "so" if isinstance(binaries, str): return [os.path.join(root_dir, binaries + "." + ext)] elif isinstance(binaries, dict): for k, v in binaries.items(): paths += _get_paths_from_binaries(v, os.path.join(root_dir, k)) else: for i in binaries: paths += _get_paths_from_binaries(i, root_dir) return paths def _strip(line): return line.split(" ")[0].split("#")[0].split(",")[0] # Download all relevant binaries in binaries.json binaries_dict = json.load(open("binaries.json")) available_configs = json.load(open("available_configs.json")) binaries_paths = _get_paths_from_binaries(binaries_dict) version = os.environ.get("VERSION", "main") fixed_tag = os.environ.get("TAG", None) clean = os.environ.get("CLEAN", None) terminate = False all_tags, python_tag, plat_name, options = None, None, None, None if fixed_tag: python_tag, _, plat_name = str(fixed_tag).split("-") options = {"bdist_wheel": {"python_tag": python_tag, "plat_name": plat_name}} all_tags = [fixed_tag] else: from pip._vendor.packaging import tags all_tags = list(tags.sys_tags()) # download binaries for the tag with highest precedence for tag in all_tags: if terminate: break for path in binaries_paths: module = path.split(os.sep)[1] if (os.path.exists(path) and not clean) or str(tag) not in available_configs[ module ]: continue folders = path.split(os.sep) folder_path, file_path = os.sep.join(folders[:-1]), folders[-1] ext = "pyd" if os.name == "nt" else "so" file_name = f"{file_path[:-(len(ext)+1)]}_{tag}.{ext}" search_path = f"{module}/{file_name}" try: response = request.urlopen( f"https://github.com/unifyai/binaries/raw/{version}/{search_path}", timeout=40, ) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "wb") as f: f.write(response.read()) terminate = path == binaries_paths[-1] except request.HTTPError: break this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text(encoding="utf-8") # Remove img tags that have class "only-dark" long_description = re.sub( r"<img [^>]*class=\"only-dark\"[^>]*>", "", long_description, flags=re.MULTILINE, ) # Remove a tags that have class "only-dark" long_description = re.sub( r"<a [^>]*class=\"only-dark\"[^>]*>((?:(?!<\/a>).)|\s)*<\/a>\n", "", long_description, flags=re.MULTILINE, ) # Apply version with open("ivy/_version.py") as f: exec(f.read(), __version__) setup( name="ivy", version=__version__, author="Unify", author_email="[email protected]", description=( "The unified machine learning framework, enabling framework-agnostic " "functions, layers and libraries." ), long_description=long_description, long_description_content_type="text/markdown", url="https://unify.ai/ivy", project_urls={ "Docs": "https://unify.ai/docs/ivy/", "Source": "https://github.com/unifyai/ivy", }, include_package_data=True, packages=setuptools.find_packages(), install_requires=[ _strip(line) for line in open("requirements/requirements.txt", "r", encoding="utf-8") ], classifiers=[ "License :: OSI Approved :: Apache Software License", ], license="Apache 2.0", options=options, )
ivy/setup.py/0
{ "file_path": "ivy/setup.py", "repo_id": "ivy", "token_count": 1824 }
73
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <!-- Created with Inkscape (http://www.inkscape.org/) --> <svg xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" width="1000" height="1000" viewBox="0 0 264.58384 264.58335" version="1.1" id="svg8" inkscape:version="0.92.3 (2405546, 2018-03-11)" sodipodi:docname="ivy_logo_only.svg" inkscape:export-filename="/media/djl11/Drive/Ivy/ivy_logo_only.png" inkscape:export-xdpi="96" inkscape:export-ydpi="96"> <defs id="defs2"> <linearGradient inkscape:collect="always" id="linearGradient927"> <stop style="stop-color:#009100;stop-opacity:1;" offset="0" id="stop923" /> <stop style="stop-color:#009100;stop-opacity:0;" offset="1" id="stop925" /> </linearGradient> <inkscape:path-effect effect="bspline" id="path-effect861" is_visible="true" weight="33.333333" steps="2" helper_size="0" apply_no_weight="true" apply_with_weight="true" only_selected="false" /> <inkscape:path-effect effect="bspline" id="path-effect857" is_visible="true" weight="33.333333" steps="2" helper_size="0" apply_no_weight="true" apply_with_weight="true" only_selected="false" /> <radialGradient inkscape:collect="always" xlink:href="#linearGradient927" id="radialGradient929" cx="45.866776" cy="282.20438" fx="45.866776" fy="282.20438" r="36.088917" gradientTransform="matrix(6.8295096,-6.6029816,9.9543908,10.295894,-2975.9284,-2423.9445)" gradientUnits="userSpaceOnUse" /> </defs> <sodipodi:namedview id="base" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:pageopacity="0.0" inkscape:pageshadow="2" inkscape:zoom="0.5" inkscape:cx="482.00807" inkscape:cy="302.50583" inkscape:document-units="mm" inkscape:current-layer="layer1" showgrid="false" showguides="false" units="px" inkscape:window-width="1920" inkscape:window-height="1052" inkscape:window-x="3840" inkscape:window-y="0" inkscape:window-maximized="1" /> <metadata id="metadata5"> <rdf:RDF> <cc:Work rdf:about=""> <dc:format>image/svg+xml</dc:format> <dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage" /> <dc:title></dc:title> </cc:Work> </rdf:RDF> </metadata> <g inkscape:label="Layer 1" inkscape:groupmode="layer" id="layer1" transform="translate(0,-32.416256)"> <path sodipodi:type="spiral" style="fill:none;fill-opacity:1;fill-rule:evenodd;stroke:url(#radialGradient929);stroke-width:22.19278336;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" id="path815" sodipodi:cx="146.49182" sodipodi:cy="178.74434" sodipodi:expansion="1" sodipodi:revolution="2.1525362" sodipodi:radius="125.46326" sodipodi:argument="-14.982467" sodipodi:t0="0" d="m 146.49182,178.74434 c -6.55931,-5.81706 4.60515,-11.71175 9.66834,-10.90201 13.72092,2.19436 16.93783,19.40766 12.13567,30.23868 -8.58993,19.37414 -33.69062,23.08884 -50.80901,13.36935 C 92.364898,197.18661 88.078734,163.28809 102.8838,140.071 c 19.73282,-30.94472 62.70022,-35.79816 91.94969,-15.83669 36.80748,25.11947 42.22472,77.26962 17.07037,112.52003 C 181.43857,279.44732 120.04717,285.4267 78.813483,255.05838 30.221011,219.27037 23.680253,148.60361 59.275772,101.39767 82.917837,70.044115 121.56528,52.545551 160.65312,54.082852" /> <ellipse style="opacity:1;fill:#009600;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:43.72273254;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" id="path865" cx="148.9328" cy="179.2731" rx="13.30783" ry="11.510118" /> </g> </svg>
ivy/.idea/icon.svg/0
{ "file_path": "ivy/.idea/icon.svg", "repo_id": "ivy", "token_count": 2250 }
0
Copyright 2021 The Ivy Authors. All rights reserved. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 10. The software in this directory and its subdirectories is licensed under the Apache License, Version 2.0, except for the software contained within the ivy/compiler directory, which is subject to the license set forth in the LICENSE file located within that directory. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
ivy/LICENSE/0
{ "file_path": "ivy/LICENSE", "repo_id": "ivy", "token_count": 3253 }
1
# flake8: noqa import os import subprocess import sys # install requests only for build, and uninstall it later subprocess.run( f"pip3 install requests", shell=True, ) import requests def get_latest_package_version(package_name): try: url = f"https://pypi.org/pypi/{package_name}/json" response = requests.get(url, timeout=10) response.raise_for_status() package_info = response.json() return package_info["info"]["version"] except requests.exceptions.RequestException as e: print(f"Error: Failed to fetch package information for {package_name}.") return None def directory_generator(req, base="/opt/fw/"): for versions in req: if "/" in versions: pkg, ver = versions.split("/") path = base + pkg + "/" + ver if not os.path.exists(path): install_pkg(path, pkg + "==" + ver) else: install_pkg(base + versions, versions) def install_pkg(path, pkg, base="fw/"): if pkg.split("==")[0] if "==" in pkg else pkg == "torch": subprocess.run( f"yes |pip3 install --upgrade {pkg} --target" f" {path} --default-timeout=100 --extra-index-url" " --no-cache-dir", shell=True, ) subprocess.run( f"yes |pip3 install --upgrade torchvision --index-url" f" https://download.pytorch.org/whl/cu121 --target" f" {path} --default-timeout=100 --extra-index-url" " --no-cache-dir", shell=True, ) elif pkg.split("==")[0] if "==" in pkg else pkg == "jax": subprocess.run( f"yes |pip install --upgrade --target {path} 'jax[cuda12_pip]' -f" " https://storage.googleapis.com/jax-releases/jax_cuda_releases.html " " --no-cache-dir", shell=True, ) elif pkg.split("==")[0] if "==" in pkg else pkg == "paddle": subprocess.run( "yes |pip install " f" paddlepaddle-gpu=={get_latest_package_version('paddlepaddle')}" f" --target {path} -f https://mirror.baidu.com/pypi/simple " " --no-cache-dir", shell=True, ) elif pkg.split("==")[0] if "==" in pkg else pkg == "tensorflow": subprocess.run( f"yes |pip install tensorflow[and-cuda] --target {path}", shell=True, ) else: subprocess.run( f"yes |pip3 install --upgrade {pkg} --target" f" {path} --default-timeout=100 --no-cache-dir", shell=True, ) if __name__ == "__main__": arg_lis = sys.argv if len(arg_lis) > 1: # we have specified what frameworks to install directory_generator(arg_lis[1:], "") else: directory_generator(["tensorflow", "jax", "torch", "paddle"]) # uninstall requests when done # install requests only for build, and uninstall it later subprocess.run( f"yes |pip3 uninstall requests", shell=True, )
ivy/docker/gpu_framework_directory.py/0
{ "file_path": "ivy/docker/gpu_framework_directory.py", "repo_id": "ivy", "token_count": 1460 }
2
#!/bin/bash -e docker run --rm -v "$(pwd)"/..:/project unifyai/doc-builder:latest
ivy/docs/make_docs.sh/0
{ "file_path": "ivy/docs/make_docs.sh", "repo_id": "ivy", "token_count": 33 }
3
Continuous Integration ====================== .. _`continuous integration channel`: https://discord.com/channels/799879767196958751/1189908611208597544 .. _`discord`: https://discord.gg/sXyFF8tDtm We follow the practice of Continuous Integration (CI), in order to regularly build and test code at Ivy. This makes sure that: #. Developers get feedback on their code soon, and Errors in the Code are detected quickly. ✅ #. The developer can easily debug the code when finding the source of an error, and rollback changes in case of Issues. 🔍 In order to incorporate Continuous Integration in the Ivy Repository, we follow a three-fold technique, which involves: #. Commit Triggered Testing #. Periodic Testing #. Manual Testing .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/continuous_integration/CI.png?raw=true :alt: CI Overview We use GitHub Actions in order to implement and automate the process of testing. GitHub Actions allow implementing custom workflows that can build the code in the repository and run the tests. All the workflows used by Ivy are defined in the `.github/workflows <https://github.com/unifyai/ivy/tree/main/.github/workflows>`_ directory. Commit (Push/PR) Triggered Testing ---------------------------------- A small subset of the following tests are triggered in case of a Commit (Push/PR) made to the Ivy Repository: #. Ivy Tests #. Array API Tests Ivy Tests --------- A test is defined as the triplet of (submodule, function, backend). We follow the following notation to identify each test: :code:`submodule::function,backend` For example, :code:`ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py::test_torch_instance_arctan_,numpy` The Number of such Ivy tests running on the Repository (without taking any Framework/Python Versioning into account) is 12500 (as of writing this documentation), and we are adding tests daily. Therefore, triggering all the tests on each commit is neither desirable (as it will consume a huge lot of Compute Resources, as well as take a large amount of time to run) nor feasible (as Each Job in Github Actions has a time Limit of 360 Minutes, and a Memory Limit as well). Further, When we consider versioning, for a single Python version, and ~40 frontend and backend versions, the tests would shoot up to 40 * 40 * 12500 = 20,000,000, and we obviously don't have resources as well as time to run those many tests on each commit. Thus, We need to prune the tests that run on each push to the Github Repository. The ideal situation, here, is to trigger only the tests that are impacted by the changes made in a push. The tests that are not impacted by the changes made in a push, are wasteful to trigger, as their results don’t change (keeping the same Hypothesis Configuration). For example, Consider the `commit <https://github.com/unifyai/ivy/commit/29cc90dda9e9a8d64789ed28e6eab0f41257a435>`_ The commit changes the :code:`_reduce_loss` function and the :code:`binary_cross_entropy` functions in the ivy/functional/ivy/losses.py file. The only tests that must be triggered (for all 5 backends) are: :code:`ivy_tests/test_ivy/test_functional/test_nn/test_losses.py::test_binary_cross_entropy_with_logits` :code:`ivy_tests/test_ivy/test_functional/test_nn/test_losses.py::test_cross_entropy` :code:`ivy_tests/test_ivy/test_functional/test_nn/test_losses.py::test_binary_cross_entropy` :code:`ivy_tests/test_ivy/test_functional/test_nn/test_losses.py::test_sparse_cross_entropy` :code:`ivy_tests/test_ivy/test_frontends/test_torch/test_loss_functions.py::test_torch_binary_cross_entropy` :code:`ivy_tests/test_ivy/test_frontends/test_torch/test_loss_functions.py::test_torch_cross_entropy` Ivy’s Functional API functions :code:`binary_cross_entropy_with_logits`, :code:`test_cross_entropy`, :code:`test_binary_cross_entropy`, :code:`test_sparse_cross_entropy`, are precisely the ones impacted by the changes in the commit, and since the torch Frontend Functions torch_binary_cross_entropy, and torch_cross_entropy are wrapping these, the corresponding frontend tests are also impacted. No other Frontend function calls these underneath and hence should not be triggered. How do we (or at least try to) achieve this? Implementation -------------- A Top-Down View --------------- In order to implement this, we use the magic of Test Coverage! Test Coverage refers to finding statements (lines) in your code that are executed (or could have been executed), on running a particular test. We use the Python Coverage Package (https://coverage.readthedocs.io/en/7.0.0/) for determining the Test Coverage of our tests. The way it works is by running a particular pytest, and then logging each line (of our code) that was executed (or could have been executed) by the test. Computing Test Coverage for all Ivy tests, allows us to find, for each line of code, which tests affect the same. We create a Dictionary (Mapping) to store this information as follows (The actual Mapping we prepare is a bit different from this design, but we will follow this for now due to Pedagogical Purposes): .. math:: \begin{equation} \begin{aligned} &\{ \\ & \ \ \ \ "f_1": [\{\}, \{"t_1", "t_3", "t_7"\}, ..., \{"t_{10}", "t_{11}", "t_{15}"\}], \\ & \ \ \ \ ... \\ & \ \ \ \ "f_m": [\{\}, \{"t_{11}", "t_{23}", "t_{37}"\}, ..., \{"t_{32}", "t_{54}", "t_{65}"\}] \\ &\} \\ \end{aligned} \end{equation} The dictionary thus stores a list for each file :math:`f_1 … f_m`. The list is a sequence encapsulating the lines of the file. Each index of the list contains a set of tests, which are mapped to the corresponding line in the file. Given this Mapping for a commit, We can just follow the below procedure: 1. Find the files which are changed in the commit, and check for lines that are added/deleted/updated in the file. 2. Determine the Tests that impact the lines, and trigger just those tests, and no others. But, there’s a fundamental issue here, Computing the Mapping requires determining the coverage for all tests, which involves running all the tests. Doesn’t this sound cyclical? After all, We are doing all this to avoid running all the tests. Now assume that we had some way to update the Mapping for a commit from the previous Mapping without having to run all the tests. Then, Given the Mapping for a single commit, we could follow this to determine and run the relevant tests for each commit as follows: .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/continuous_integration/ITRoadmap.png?raw=true :alt: Intelligent Testing Roadmap This is exactly what we do in order to implement Intelligent Testing. The “Update Mapping” Logic works as follows for each changed file: 1. For each deleted line, we remove the corresponding entry from the list corresponding to the file in the Mapping. .. code-block:: python tests_file = tests[file_name] for line in sorted(deleted, reverse=True): if line < len(tests_file): del tests_file[line] 2. For each line added, we compute the tests as an intersection of the set of tests on the line above and below the line. .. code-block:: python for line in added: top = -1 bottom = -1 if 0 <= line - 1 < len(tests_file): top = tests_file[line - 1] if 0 <= line + 1 < len(tests_file): bottom = tests_file[line + 1] tests_line = set() if top != -1 and bottom != -1: tests_line = top.intersection(bottom) elif top != -1: tests_line = top elif bottom != -1: tests_line = bottom tests_file.insert(line, tests_line) tests[file_name] = tests_file 3. Finally, For newly added tests, we compute the coverage of the new tests (limited to 10 per commit), and update the Mapping correspondingly. Once the Mapping has been updated, the “Determine & Run Tests” Logic works as follows: 1. For each deleted line, we collect the tests corresponding to the line as: .. code-block:: python for line in deleted: tests_to_run = determine_tests_line(tests_file, line, tests_to_run) 2. For each line updated, we collect the tests corresponding to the line as: .. code-block:: python for line in updated: tests_to_run = determine_tests_line(tests_file, line, tests_to_run) 3. For each line added, we collect the tests corresponding to the line as: .. code-block:: python for line in added: tests_to_run = determine_tests_line(tests_file, line, tests_to_run) 4. Further, All the new tests added in a commit are collected (up to a max limit of 10, any more tests added are taken up in subsequent commits). 5. Finally, All the collected tests are triggered by the scripts/run_tests/run_tests.py script, and the corresponding entry in the MongoDB Database is updated with the Test Result (Details on this in the Dashboard Section below). Storing (and retrieving) the Mapping ------------------------------------ As we see in the overview section, we compute a mapping of lines to tests, for each commit to the Ivy Repository. This mapping has to be stored somewhere, in order to be used by a future commit to determine the corresponding mapping (and therefore, trigger the required tests). Therefore, we need a mechanism to store and retrieve the Mapping. We use the unifyai/Mapping GitHub Repository for this purpose. We use a GitHub Repository for the following reasons: #. Unlike Specialized Databases (like Google Cloud), we need not store any specialized secrets to access the Database (separately for reading and writing), and no separate API Keys are required for updating the DB, saving us from exposing our secret key Files (from GitHub Actions). In fact, We just except for a single SSH Deploy Key (secrets.SSH_DEPLOY_KEY) required for pushing the DB. #. The Repository is a Public Repository, and thus can be read by anyone, while the push can be restricted. This makes it helpful to expose the Mapping to run tests on the PRs, while allowing only the Push Commits to update the Mapping. #. We don’t need to make any specialized API Calls to Read/Write/Update the Mapping (Cloning and Pushing to the Repo suffices). #. Finally, It saves us from a Massive Race Condition Issue (which we highlight below). A GitHub Repository is not the best DB, obviously, with its own set of constraints (ex. 100 MB Space Limit), but works well enough for our requirements. Cloning and Pushing to the Repository ------------------------------------- For Push triggered testing (intelligent-tests.yml Workflow), we use the SSH Cloning Method in order to felicitate the clone and push commands to the Repository, as follows: .. code-block:: source ./ivy/scripts/shell/clone_mapping.sh master Determine and Run Tests, and Update the Mapping ... git add . git commit -m "Update Mapping" git push origin master The clone_mapping file works as follows: It creates a Directory called .ssh in the HOME folder of the VM hosted by GitHub, and copies the Deploy Key into the deploy_key file within the folder. Further, it adds github.com to the list of SSH Known Hosts. Now, that the SSH key of the Runner has permissions to push and clone the Mapping repository, it simply calls the git clone command. It does so with fetch depth set to 1, in order to just clone the latest commit, and no other. .. code-block:: USER_EMAIL="[email protected]" USER_NAME="ivy-branch" TARGET_BRANCH=$1 GITHUB_SERVER="github.com" mkdir --parents "$HOME/.ssh" DEPLOY_KEY_FILE="$HOME/.ssh/deploy_key" echo "${SSH_DEPLOY_KEY}" > "$DEPLOY_KEY_FILE" chmod 600 "$DEPLOY_KEY_FILE" SSH_KNOWN_HOSTS_FILE="$HOME/.ssh/known_hosts" ssh-keyscan -H "$GITHUB_SERVER" > "$SSH_KNOWN_HOSTS_FILE" export GIT_SSH_COMMAND="ssh -i "$DEPLOY_KEY_FILE" -o UserKnownHostsFile=$SSH_KNOWN_HOSTS_FILE" # Setup git git config --global user.email "$USER_EMAIL" git config --global user.name "$USER_NAME" git clone --single-branch --depth 1 --branch "$TARGET_BRANCH" [email protected]:unifyai/Mapping.git In the case of, Pull Requests, we do not have access to :code:`SSH_DEPLOY_KEY` secret (and we don’t even want to give PRs that access), and thus we don’t use the SSH Clone Methodology and instead use the HTTP Clone Method, as follows: .. code-block:: git clone -b master1 https://github.com/unifyai/Mapping.git --depth 1 Determine and Run the Tests ... PRs should not update the Mapping on the Repository, and thus no Push is required in case of PRs. Implementational Nitty Gritties ------------------------------- Storage Space (unifyai/Mapping) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The GitHub Repository allows only storing 100 MB of files per commit. The current design of the mapping takes a huge space as test names are long strings and are stored repeatedly for each line that is impacted by the tests. In order to reduce the space requirement for storing the Mapping, we restructure the Mapping as follows: .. math:: \begin{equation} \begin{aligned} &\{ \\ & \ \ \ \ "index\_mapping": ["t_{1}", "t_{2}", ..., "t_{n}"\}], \\ & \ \ \ \ "test\_mapping": \{"t_1": 1, "t_2": 2, ..., "t_n": n\}, \\ & \ \ \ \ "f_1": [\{\}, \{1, 3, 7\}, ..., \{10, 11, 15\}], & \ \ \ \ ... \\ & \ \ \ \ "f_m": [\{\}, \{11, 23, 37\}, ..., \{32, 54, 65\}] \\ &\} \\ \end{aligned} \end{equation} We include the :code:`index_mapping` and the :code:`test_mapping` fields, which map indices to tests and tests to indices, respectively. This allows us to just store the test index for each line in the Mapping, reducing the storage requirement significantly. Determine Test Coverage Workflow ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since each of our Update Mapping routine is not precisely correct, the Mapping would keep aggregating incorrections as commits keep coming to the GitHub Repository. In order to prevent this snowball effect from running completely irrelevant tests on each commit, we need to recalibrate the Mapping periodically. This is done by the Determine Test Coverage Workflow (implemented in det-test-coverage.yml). .. code-block:: name: determine-test-coverage on: workflow_dispatch: schedule: - cron: "30 20 * * 6" Notice that the workflow triggers every Saturday Night at 8.30 PM (Fun Fact: It’s just my gut feeling that there are relatively lesser commits on the Repository on a Saturday Night, and we get access to the Resources quickly, LoL!). The workflow runs all the Ivy tests, determines their coverage, computes the Mapping, and pushes it to the unifyai/Mapping Repository. Multiple Runners ^^^^^^^^^^^^^^^^ The Determine Test Coverage workflow takes about ~60 hours to complete if run with a single runner. The GitHub Action rules don't allow running a single Job for more than 6 hours. Further, Determining the Coverage Therefore, we need to split the Workflow based on the Tests (into 32 runners). Each runner caters to its own subset of tests, and is responsible for determining the coverage for only those tests, and creates the Mapping based on these tests. Therefore, we have 32 branches (master1, master2, …, master32), on the unifyai/Mapping Repository, and also 32 runners on the intelligent-tests and intelligent-tests-pr Workflows. Everything sounds good, but Can you think of a potential Race Condition here? Race Condition ^^^^^^^^^^^^^^ The Synchronized Object here is the unifyai/Mapping Repository, and is accessed through push (Write) and pull (Read) to the Repository. The Determine Test Coverage Workflow and the Intelligent Tests Workflow can run concurrently, while both of them write to the Mapping Repository. Consider the following Case for Runner 2: #. The Determine Test Coverage workflow has been running, and is about to complete for Runner 2. Meanwhile, a commit made on the master triggers the intelligent-tests workflow. #. The runner 2 in the intelligent-tests workflow, pulls the Mapping from the master2 branch of the unifyai/Mapping repository, and starts running the determined tests (based on changes made in the commit). #. The det-test-coverage workflow completes for runner2, which makes a push to the corresponding branch in the unifyai/Mapping Repository. #. The runner 2 in the intelligent-tests workflow also completes, and pushes the updated repository Thus, in the end, the push from the det-test-coverage would be completely ignored, and the system would not be recalibrated. Further, For some other Runner(s), the final push may be done by the Determine Test Coverage Workflow, and thus, the test distribution in itself might be corrupted (Overlapping Tests and Missing Tests). We handle the Race Condition as follows: #. The Intelligent Tests workflow is allowed to push to the repository only when there is no merge conflict, while the Determine Test Coverage Workflow makes a force push (-f) push. #. Therefore, when the above situation occurs, the Push from Intelligent Tests workflow is discarded, while the recalibration push stays in place, and leads to consistency among runners, as well as, corrects the Coverage. Array API Tests --------------- The `array-api-intelligent-tests.yml (Push) <https://github.com/unifyai/ivy/blob/main/.github/workflows/array-api-intelligent-tests.yml>`_ and the `array-api-intelligent-tests-pr.yml (Pull Request) <https://github.com/unifyai/ivy/blob/main/.github/workflows/array-api-intelligent-tests-pr.yml>`_ workflows run the Array API Tests. Similar to Ivy Tests, The Array API tests are also determined intelligently and only relevant tests are triggered on each commit. More details about the Array API Tests are available `here <array_api_tests.rst>`_. Periodic Testing ---------------- In order to make sure that none of the Ivy Tests are left ignored for a long time, and to decouple the rate of testing to the rate of committing to the repository, we implement periodic testing on the Ivy Repository. The `Test Ivy Cron Workflow <https://github.com/unifyai/ivy/blob/main/.github/workflows/test-ivy-cron.yml>`_ is responsible for implementing this behavior by running Ivy tests every hour. In Each Run, It triggers 150 Ivy Tests, cycling through all of the tests. This number of 150 is chosen in order to make sure that the Action completes in 1 hour most of the time. The Test Results update the corresponding cell on the Dashboards. Manually Dispatched Workflows ----------------------------- In order to trigger any particular test for any reason (maybe Intelligent Testing missed the Test), you can follow the following steps: #. Visit `GitHub Actions <https://github.com/unifyai/ivy/actions/workflows/manual-tests.yml>`_ #. Click on Run Workflow #. Add the Name of the test as: :code:`ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py::test_torch_instance_arctan_` #. If you want the test to be triggered for a particular Backend, append it with a “,” as: :code:`ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py::test_torch_instance_arctan_,tensorflow` #. Leave the Version Based Testing and GPU Testing Options as false. #. Check the result there and then itself, or wait for the dashboard to update. Manual Tests are also available for PRs. You can also run the Manual Tests Workflow on a Fork Repository (while reviewing PRs), as follows: 1. Visit the “Actions” Tab on the Fork, and selecting the manual-tests-pr workflow from the left pane. 2. Trigger the Workflow by following Steps 2-4 described above. This might take some time to run as the Fork may have limited runners. CI Pipeline ➡️ --------------- The below subsections provide the roadmap for running workflows and interpreting results in case a push or a pull request is made to the repository. Push ^^^^ Whenever a push is made to the repository, a variety of workflows are triggered automatically (as described above). This can be seen on the GitHub Repository Page, with the commit message followed by a yellow dot, indicating that some workflows have been queued to run following this commit, as shown below: .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/continuous_integration/push.png?raw=true :alt: Push Clicking on the yellow dot (🟡) (which changes to a tick (✔) or cross (❌), when the tests have been completed) yields a view of the test-suite results as shown below: .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/continuous_integration/push1.png?raw=true :alt: Test-Suite Click on the "Details" link corresponding to the failing tests, in order to identify the cause of the failure. It redirects to the Actions Tab, showing details of the failure, as shown below: .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/continuous_integration/push2.png?raw=true :alt: Workflow Result Click on the "Run Tests" section in order to see the logs of the failing tests for Array API Tests. For Ivy Tests, head to the "Combined Test Results" Section of the display-test-results Job, which shows the Test Logs for each of the tests in the following format: \*************************************************** Test 1 \*************************************************** Hypothesis Logs for Test 1 (Indicates Failure/Success) \*************************************************** Test 2 \*************************************************** Hypothesis Logs for Test 2 (Indicates Failure/Success) … \*************************************************** Test n \*************************************************** Hypothesis Logs for Test n (Indicates Failure/Success) You can ignore the other sections of the Workflow, as they are for book-keeping and implementation purposes. You can also directly refer to the Dashboard (available at https://ivy-dynamical-dashboards.onrender.com), to check the result of your test. Pull Request ^^^^^^^^^^^^ In case of a pull request, the test suite is available on the Pull Request Page on Github, as shown below: .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/continuous_integration/pull-request1.png?raw=true :alt: PR Test-Suite Clicking on the "Details" link redirects to the Action Log. The rest of the procedure remains the same as given in the Push section above. As an added feature, the Intelligent Tests for PR Workflow has a section on "New Failures Introduced" in the display-test-results jos, which lists the details of tests that are failing on the PR Fork/Branch but not on the master branch. When creating a PR, make sure that your PR does not introduce any new failures. Dashboard --------- In order to view the status of the tests, at any point in time, we have implemented a dashboard application that shows the results of the latest Workflow that ran each test. The Dashboards are available at the link: https://ivy-dynamical-dashboards.onrender.com You can filter tests by selecting choices from the various dropdowns. The link can also be saved for redirecting straight to the filtered tests in the future. The status badges are clickable, and will take you directly to the Action log of the latest workflow that ran the corresponding test. **Round Up** This should have hopefully given you a good feel for how Continuous Integration works in Ivy. If you have any questions, please feel free to reach out on `discord`_ in the `continuous integration thread`_! **Video** .. raw:: html <iframe width="420" height="315" allow="fullscreen;" src="https://www.youtube.com/embed/eO268nc8WH4" class="video"> </iframe>
ivy/docs/overview/deep_dive/continuous_integration.rst/0
{ "file_path": "ivy/docs/overview/deep_dive/continuous_integration.rst", "repo_id": "ivy", "token_count": 6817 }
4
Ivy Tests ========= .. _`test suite`: https://github.com/data-apis/array-api-tests .. _`Hypothesis`: https://hypothesis.readthedocs.io/en/latest/ .. _`test_array_api`: https://github.com/unifyai/ivy/tree/20d07d7887766bb0d1707afdabe6e88df55f27a5/ivy_tests .. _`test_ivy`: https://github.com/unifyai/ivy/tree/0fc4a104e19266fb4a65f5ec52308ff816e85d78/ivy_tests/test_ivy .. _`commit`: https://github.com/unifyai/ivy/commit/8e6074419c0b6ee27c52e8563374373c8bcff30f .. _`uploading`: https://github.com/unifyai/ivy/blob/0fc4a104e19266fb4a65f5ec52308ff816e85d78/.github/workflows/test-array-api-torch.yml#L30 .. _`downloading`: https://github.com/unifyai/ivy/blob/0fc4a104e19266fb4a65f5ec52308ff816e85d78/.github/workflows/test-array-api-torch.yml#L14 .. _`continuous integration`: https://github.com/unifyai/ivy/tree/0fc4a104e19266fb4a65f5ec52308ff816e85d78/.github/workflows .. _`search strategies`: https://hypothesis.readthedocs.io/en/latest/data.html .. _`methods`: https://hypothesis.readthedocs.io/en/latest/data.html .. _`finfo`: https://github.com/unifyai/ivy/blob/d8f1ffe8ebf38fa75161c1a9459170e95f3c82b6/ivy/functional/ivy/data_type.py#L276 .. _`data generation`: https://github.com/unifyai/ivy/blob/7063bf4475b93f87a4a96ef26c56c2bd309a2338/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py#L337 .. _`Function Types`: function_types.rst .. _`test_default_int_dtype`: https://github.com/unifyai/ivy/blob/7063bf4475b93f87a4a96ef26c56c2bd309a2338/ivy_tests/test_ivy/test_functional/test_core/test_dtype.py#L835 .. _`sampled_from`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.sampled_from .. _`lists`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.lists .. _`default`: https://github.com/unifyai/ivy/blob/aef5ef5620bb6ad194030276e9c00118d006091b/ivy_tests/test_ivy/helpers/test_parameter_flags.py#L28 .. _`booleans`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.booleans .. _`integers`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.integers .. _`floats`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.floats .. _`none`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.none .. _`tuples`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.tuples .. _`one_of`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.one_of .. _`shared`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.shared .. _`sets`: https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.sets .. _`map`: https://hypothesis.readthedocs.io/en/latest/data.html#mapping .. _`filter`: https://hypothesis.readthedocs.io/en/latest/data.html#filtering .. _`flatmap`: https://hypothesis.readthedocs.io/en/latest/data.html#chaining-strategies-together .. _`data`: https://hypothesis.readthedocs.io/en/latest/data.html?highlight=strategies.data#hypothesis.strategies.data .. _`composite`: https://hypothesis.readthedocs.io/en/latest/data.html?highlight=strategies.composite#hypothesis.strategies.composite .. _`line`: https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py#L477 .. _`here`: https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py#L392 .. _`this`: https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/test_functional/test_core/test_sorting.py#L18 .. _`example`: https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/helpers.py#L1085 .. _`test_concat`: https://github.com/unifyai/ivy/blob/1281a2baa15b8e43a06df8926ceef1a3d7605ea6/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py#L51 .. _`test_device`: https://github.com/unifyai/ivy/blob/main/ivy_tests/test_ivy/test_functional/test_core/test_device.py .. _`test_manipulation`: https://github.com/unifyai/ivy/blob/main/ivy_tests/test_ivy/test_functional/test_core/test_manipulation.py .. _`test_layers`: https://github.com/unifyai/ivy/blob/main/ivy_tests/test_ivy/test_functional/test_nn/test_layers.py .. _`keyword`:https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/helpers.py#L1108 .. _`arguments`: https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/helpers.py#L1354 .. _`documentation`: https://hypothesis.readthedocs.io/en/latest/quickstart.html .. _`test_gelu`: https://github.com/unifyai/ivy/blob/b2305d1d01528c4a6fa9643dfccf65e33b8ecfd8/ivy_tests/test_ivy/test_functional/test_nn/test_activations.py#L104 .. _`test_array_function`: https://github.com/unifyai/ivy/blob/0fc4a104e19266fb4a65f5ec52308ff816e85d78/ivy_tests/test_ivy/helpers.py#L401 .. _`artifact`: https://docs.github.com/en/actions/using-workflows/storing-workflow-data-as-artifacts .. _`repo`: https://github.com/unifyai/ivy .. _`discord`: https://discord.gg/sXyFF8tDtm .. _`ivy tests thread`: https://discord.com/channels/799879767196958751/1189907526226034698 .. _`test helpers`: https://github.com/unifyai/ivy/tree/main/ivy_tests/test_ivy/helpers/hypothesis_helpers .. _`get_dtypes`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/dtype_helpers.py#L60 .. _`dtype_and_values`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/array_helpers.py#L83 .. _`dtype_values_axis`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/array_helpers.py#L235 .. _`array_values`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/array_helpers.py#L543 .. _`array_dtypes`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/dtype_helpers.py#L15 .. _`array_bools`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/array_helpers.py#L17 .. _`reshape_shapes`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/general_helpers.py#L16 .. _`get_axis`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/general_helpers.py#L178 .. _`get_shape`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/general_helpers.py#L67 .. _`get_bounds`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/general_helpers.py#L145 .. _`subsets`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/hypothesis_helpers/general_helpers.py#L48 .. _`num_positional_args`: https://github.com/unifyai/ivy/blob/e50f71e283313caa9737f3c284496022ac67b58b/ivy_tests/test_ivy/helpers/testing_helpers.py#L78 .. _`CI Pipeline`: continuous_integration.rst .. _`Hypothesis docs`: https://hypothesis.readthedocs.io/en/latest/data.html#core-strategies .. _`this`: https://github.com/unifyai/ivy/blob/8dcc33b895240395686db165c710ac31708aa691/ivy_tests/test_ivy/test_functional/test_core/test_general.py#L1650 On top of the Array API `test suite`_, which is included as a submodule mapped to the folder :code:`test_array_api`, there is also a collection of Ivy tests, located in subfolder `test_ivy`_. These tests serve two purposes: #. test functions and classes which are *not* part of the standard #. test additional required behaviour for functions which *are* part of the standard. The standard only mandates a subset of required behaviour, which the Ivy functions generally extend upon. As done in the `test suite`_, we also make use of `hypothesis`_ for performing property based testing. Testing Pipeline ---------------- .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/deep_dive/ivy_tests/testing_pipeline.png?raw=true :align: center :width: 100% :class: dark-light *An abstract look at Ivy testing cycle.* 1. **Test Data Generation**: At this stage, we generate our test data for the testing function, using `Hypothesis`_ and `test helpers`_ strategies. This is the most **important** step, we should ensure that our data generation is complete and covers all of the possible inputs. We generate the input data inside the :code:`@given` decorator that wraps every test. 2. **Pre-execution Test Processing**: After the data is generated, more input processing is needed before testing the function. This is more specific to which functions are we testing, `core functions <https://github.com/unifyai/ivy/blob/e1acb3228d15697acb6f1e14602336fef6d23bd5/ivy_tests/test_ivy/helpers/function_testing.py#L37>`_ require a different input processing from `frontend functions <https://github.com/unifyai/ivy/blob/e1acb3228d15697acb6f1e14602336fef6d23bd5/ivy_tests/test_ivy/helpers/function_testing.py#L379>`_. One of the required pre-processing steps for any test function is converting the array input to a valid framework specific array, later in the testing process we call the backend framework function, for example TensorFlow's :code:`abs` function requires the input to be a :code:`tf.Tensor`, not an `ivy.Array`. 3. **Test Execution**: After the input data is generated and processed, we assert that the result of the functions is correct, this includes, asserting the result has the correct values, shape, and data type. And that this is consistent across all of our backends. .. note:: Some functions are not tested for values when this is not possible, for example, we can not assert that random functions produce the same values, in this case, we should assert that the data has some properties, asserting that the values have specified bounds is a good start. 4. **Test Results**: If a test fails, `Hypothesis`_ and `test helpers`_ will print an exhaustive log. Including the generated test case, the results of the function, etc. Hypothesis ---------- Using pytest fixtures (such as the ones removed in this `commit`_) causes a grid search to be performed for all combinations of parameters. This is great when we want the test to be very thorough, but can make the entire test suite very time consuming. Before the changes in this commit, there were 300+ separate tests being run in total, just for this :func:`ivy.abs` function. If we take this approach for every function, we might hit the runtime limit permitted by GitHub actions. A more elegant and efficient solution is to use the `Hypothesis`_ module, which intelligently samples from all of the possible combinations within user-specified ranges, rather than grid searching all of them every single time. The intelligent sampling is possible because Hypothesis enables the results of previous test runs to be cached, and then the new samples on subsequent runs are selected intelligently, avoiding samples which previously passed the tests, and sampling for unexplored combinations. Combinations which are known to have failed on previous runs are also repeatedly tested for. With the `uploading`_ and `downloading`_ of the :code:`.hypothesis` cache as an `artifact`_, these useful properties are also true in Ivy's GitHub Action `continuous integration`_ (CI) tests. Rather than making use of :code:`pytest.mark.parametrize`, the Ivy tests make use of Hypothesis `search strategies`_. This reference `commit`_ outlines the difference between using pytest parametrizations and Hypothesis, for :func:`ivy.abs`. Among other changes, all :code:`pytest.skip()` calls were replaced with return statements, as pytest skipping does not play nicely with Hypothesis testing. Data Generation --------------- We aim to make the data generation for three out of the four kinds of ivy functions exhaustive; primary, compositional, and mixed. Exhaustive data generation implies that all possible inputs and combinations of inputs are covered. Take `finfo`_ , for example. It can take either arrays or dtypes as input, hence the `data generation`_ reflects this using the bespoke search strategy :code:`_array_or_type`. However, such rigorous testing is not necessary for standalone functions (those that are entirely self-contained in the Ivy codebase without external references). These kinds of functions may only require standard Pytest testing using :code:`parametrize`, e.g. `test_default_int_dtype`_. For further clarity on the various `Function Types`_ in ivy. The way data is generated is described by the :code:`hypothesis.strategies` module which contains a variety of `methods`_ that have been used widely in each of Ivy's functional and stateful submodule tests. An initialized strategy is an object that is used by Hypothesis to generate data for the test. For example, let's write a strategy that generates a random data type -: Let’s define a template function for printing examples generated by the Hypothesis integrated test functions. .. code-block:: python >>> def print_hypothesis_examples(st: st.SearchStrategy, n = 3): >>> for i in range(n): >>> print(st.example()) .. code-block:: python >>> dtypes = ("int32", "uint32", "float32", "bool") >>> custom_strategy = st.sampled_from(dtypes) >>> print_hypothesis_examples(custom_strategy) float32 bool uint32 **Note** - : The output will be randomised in each run. This is quite a simplistic example and does not cover the intricacies behind the helper functions in the *test_ivy* directory. We are simply sampling a random data type from the set :code:`dtypes`, for example this can be used to generate data for the parameter :code:`dtype` for :code:`ivy.ones`. To call an example from the strategy, we use the method :code:`example()` to generate a random example from the strategy, this is only for experimenting purposes, we should not use it during the actual test. In the example above, :code:`st.sampled_from` is what we call a strategy. To briefly describe -: * `sampled_from`_ accepts a collection of objects. This strategy will return a value that is sampled from this collection. * `lists`_ accepts another strategy which describes the elements of the list being generated. This is best used when a sequence of varying lengths is required to be generated, with elements that are described by other strategies. Writing your own strategy ^^^^^^^^^^^^^^^^^^^^^^^^^ We will not be covering all of the strategies that Hypothesis provide, but to give you a glance of what they're capable of, we will briefly explain some of the strategies and write a new strategy to be used later for testing. Read more about strategies on `Hypothesis docs`_. 1. `integers`_ - generates integers values within a given range. 2. `none`_ - returns a strategy which only generates None. 3. `one_of`_ - This allows us to specify a collection of strategies and any given datum will be drawn from “one of” them. Hypothesis has the *pipe* operator overloaded as a shorthand for :code:`one_of`. 4. `composite`_ - This provides a decorator, which permits us to form our own strategies for describing data by composing Hypothesis built-in strategies Suppose you need to generate a 1-D array or a scaler value, which also generates an index of an element if an array is generated, otherwise None. .. code-block:: python @st.composite def array_or_scaler(draw): values = draw(st.integers() | st.lists(st.integers())) if isinstance(values, list) and values: len_of_array = len(values) index = draw(st.integers(min_value=0, max_value=len_of_array-1)) else: index = st.none() return values, index we can then later use this strategy in any of our tests. Writing Hypothesis Tests ^^^^^^^^^^^^^^^^^^^^^^^ Writing Hypothesis tests are intuitive and simple, as an example, we've implemented our own :code:`add` function, which takes in 2 parameters :code:`x` and :code:`y`. We would like to run a test and compare it to Python :code:`+` operator, and **assert** it returns the same values. .. code-block:: python def add(x, y): return y + x @given( x=st.integers() y=st.integers() ) def test_add(x, y): assert x + y == add(x, y) 1. At first, we define our function :code:`add`, which simply returns :code:`y + x`. 2. Defining a test function, which basically **assert** that the result of :code:`x + y` is exactly equal to :code:`add(x, y)`. 3. Add Hypothesis :code:`@given` decorator, passing two keyword arguments, :code:`x` and :code:`y` each corresponds to the variables we are going to run the test on, :code:`@given` is our entry point to Hypothesis, it expects a :class:`strategy` to be passed in, describing what kind of data to generate, for our example, we choose to only test for :code:`integers` using :code:`st.integers()` strategy. Ivy Test Decorators ^^^^^^^^^^^^^^^^^^^^ - Why do we need to handle test decorators? In order to run a test, a lot of pre-processing must be done, e.g. import the function, does it support complex data type? does it run on CPU? how many parameters does it take? are they positional or keyword only, or both? and a lot of information about the function that is being tested, this allows us later to run the test efficiently and in a **complete** way. All of this happens at collecting time. - What do the handle test decorators do? 1. Generate the test flags: 1. :code:`native_array` flags 2. :code:`as_variable` flags 3. :code:`with_out` flag 2. Generate :code:`num_positional_args` The flags that the decorators would generate, may be more or less depending on the function, **Ivy Functional API** requires :code:`gradient_test` flag, some test functions like :code:`test_gpu_is_available` does not require any of these flags, and therefore the decorator will not generate any of these. 3. Generate test specific parameters, :code:`fn_name`, :code:`fn_tree`, :code:`method_tree`. 4. Check for the function's supported data types and devices. 5. Implicitly wraps the test function using Hypothesis :code:`@given` decorator, this allows us to write less code, more readable, and easy to update and maintain. This is not an exhaustive list of what the :code:`handle_test` decorators actually do, they may do more or less in the future, to summarize, the test decorators do some of the **Pretest-processing** part in the testing pipeline. - Why do we have multiple handle test decorators? Having multiple test decorators is mainly for efficiency, `handle_test` could do what `handle_frontend_test` does, it just handles the parameters slightly different, and this can be inferred at run time, but we choose to separate the decorator for general different usages, currently we have 5 separate decorators 1. :code:`handle_test` 2. :code:`handle_method` 3. :code:`handle_frontend_test` 4. :code:`handle_frontend_method` 5. :code:`handle_example` One of the few differences between the 5 decorators is that they generate different kinds of flags, some generate more or less, but they all share the same general structure. - Integration Our test decorators actually transforms to :code:`@given` decorators at PyTest collecting time, therefore this allows us to use other **Hypothesis** decorators like, :code:`@reproduce_failure`, :code:`@settings`, :code:`@seed`. Writing Ivy Tests ^^^^^^^^^^^^^^^^^ As mentioned previously, testing Ivy functions needs a lot of pre-processing and past-processing, using only :code:`given` decorator would not be sufficient to write an effective test, the following example describes how to implement a test for the function :code:`ivy.abs`, using our test decorators and test helpers. .. code-block:: python @handle_test( fn_tree="functional.ivy.abs", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric") ), ) def test_abs( *, dtype_and_x, test_flags, backend_fw, fn_name, on_device, ground_truth_backend, ): input_dtype, x = dtype_and_x helpers.test_function( ground_truth_backend=ground_truth_backend, input_dtypes=input_dtype, test_flags=test_flags, fw=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], ) Integration of Strategies into Ivy Tests ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Once a strategy is initialised the :code:`@given` decorator is added to the test function for drawing values from the strategy and passing them as inputs to the test. For example, in this code snippet here -: .. code-block:: python @handle_test( dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("numeric")), ) Let's take a deeper look at :code:`ivy.abs`, according to the function signature, it accepts two arguments, :code:`x` which can be a Python numeric or an ivy.Array of numeric data type, and an :code:`out` optional output array. Using a lot of help from `test helpers`_, we can simply generate a random input that covers all the possible combinations using :code:`dtype_and_values` composite strategy, specifying the list of data types to sample from by also using another composite strategy :code:`get_dtypes` which samples a valid data types according to the backend that is tested. For :code:`out` keyword argument, the :code:`@handle_test` decorator generates a boolean for whether we should provide an :code:`out` argument or not, thankfully, the `test_function` helper function does a lot under the hood to properly create an array for the :code:`out` argument. If the function does not support the :code:`out`, we should explicitly specify that we should not generate boolean flags for :code:`out` by setting :code:`with_out=False`, the :code:`@handle_test` in this case will not generate a value for :code:`with_out`. As discussed above, the helper functions use the composite decorator, which helps in defining a series of custom strategies. It can be seen that :code:`dtype_and_x` uses the code:`dtype_and_values` strategy to generate numeric data types(for more details, see the section below) and corresponding array elements, whose shapes can be specified manually or are randomized by default. The generated data is returned as a tuple. One thing to note here is the :code:`test_flags` variable in the test function. This is basically an object which is initialized internally, which captures all the flags mentioned above for the test during collection time. These flags are then available for the helper function at test time. The test flags can also be generated explicitly like this -: .. code-block:: python @handle_test( as_variable_flags = st.lists(st.booleans(), min_size = <any>, max_size = <any>), native_array_flags = st.lists(st.booleans(), min_size = <any>, max_size = <any> ), container_flags = st.lists(st.booleans(), min_size= <any>, max_size= <any>), # <any> integer value can be passed test_instance_method = st.just(<bool>), # <bool> can either be True or False test_with_out = st.just(<bool>), test_gradients = st.just(<bool>), test_inplace = st.just(<bool>), ) In the test above :code:`test_abs`, one can assume that these flags are automatically loaded inside the :code:`test_flags` object with `default`_ values. Test flags are mostly similar across decorators with slight differences in the variable names. This is how we generate them for method testing. .. code-block:: python @handle_method( init_native_arrays = st.lists(st.booleans(), min_size = <any>, max_size = <any>), init_as_variable_flags = st.lists(st.booleans(), min_size = <any>, max_size = <any>), init_container_flags = st.lists(st.booleans(), min_size = <any>, max_size = <any>), method_native_arrays = st.lists(st.booleans(), min_size = <any>, max_size = <any>), method_as_variable_flags = st.lists(st.booleans(), min_size = <any>, max_size = <any>), method_container_flags = st.lists(st.booleans(), min_size = <any>, max_size = <any>), test_gradients = st.just(<bool>) ) def test_some_method( *, init_flags, method_flags, ): pass The only difference here is that the :code:`test_flags` object here is divided in two, the :code:`init_flags` and the :code:`method_flags`. The above standards are extended to the `handle_frontend_test` and `handle_frontend_method` respectively. Let's look at the data produced by this strategy -: .. code-block:: python >>> print_hypothesis_examples(dtype_and_values(), 2) (['int8'], [array(69, dtype=int8)]) (['int8'], [array([-23, -81], dtype=int8)]) These values are then unpacked, converted to :class:`ivy.Array` class, with corresponding dtypes. The test then runs on the newly created arrays with specified data types. Adding Explicit Examples to tests ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In certain cases where we'd like to test certain examples explicitly which are outliers and it isn't feasible to define them as a strategy, we can use the :code:`@handle_example` decorator. One such example is `this`_ where we need to test `ivy.set_item` with slice objects. Hypothesis allows us to test with an explicit example deterministically using the `@example`_ decorator. Our :code:`@handle_example` decorator is a wrapper around this. Which helps us to use the default values of the test_flags, method_flags allowing us to easily test explicit examples for the ivy functional tests, frontend_tests, methods, and frontend_methods. We have to pass one of the following 4 arguments as `True` to the :code:`@handle_example` decorator depending on what test we are dealing with. 1. `test_example` 2. `test_frontend_example` 3. `test_method_example` 4. `test_frontend_method_example` The following example shows, how we can use the :code:`@handle_example` decorator to test the one of the frontend functions by adding an explicit example. .. code-block:: python @handle_frontend_test( fn_tree="paddle.acos", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float"),), ) @handle_example( test_frontend_example=True, dtype_and_x=(["float32"], [np.array(9.0, dtype=np.float32)]), fn_tree="ivy.functional.frontends.paddle.acos", test_flags={ "native_arrays": [True], "with_copy": True, "with_out": True, }, ) def test_some_function( *, dtype_and_x, fn_tree, frontend, test_flags, backend_fw, ): pass Why do we need helper functions? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It is usually the case that any ivy function should run seamlessly on ‘all the possible varieties, as well as the edge cases’ encountered by the following parameters -: * All possible data types - **composite** * Boolean array types if the function expects one - **composite** * Possible range of values within each data type - **composite** * When input is a container - **boolean** * When the function can also be called as an instance method - **boolean** * When the input is a native array - **boolean** * Out argument support, if the function has one - **boolean** **Note** -: Each test function has its own requirements and the parameter criterion listed above does not cover everything. Sometimes the function requirements are straight-forward, for instance, generating integers, boolean values, and float values. Whereas, in the case of specific parameters like -: * array_values * data_types * valid_axes * lists or tuples or sequence of varied input types * generating subsets * generating arbitrary shapes of arrays * getting axes at We need a hand-crafted data generation policy (composite). For this purpose ad-hoc functions have been defined in the `test helpers`_. It might be appropriate now, to bring them up and discuss their use. A detailed overview of their working is as follows-: 1. `get_dtypes`_ - draws a list of valid data types for the test at run time, valid data types are not only data types that are supported by the backend framework. For frontend functions, these are the intersection of the frontend framework and the backend framework supported data types. We should be **always** using this helper function whenever we need to sample a data type. .. code-block:: python >>> print_hypothesis_examples(helpers.get_dtypes(kind="integer"), 1) ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] >>> print_hypothesis_examples(helpers.get_dtypes(kind="numeric", full=False), 3) ['uint64'] ['float16'] ['int8'] 2. `dtype_and_values`_ - This function generates a tuple of NumPy arrays and their data types. Number of arrays to generate is specified using :code:`num_arrays` parameter, generates 1 array by default. .. code-block:: python >>> print_hypothesis_examples(helpers.dtype_and_values(), 3) (['bool'], [array([ True, True, True, False])]) (['float64'], [array(-2.44758124e-308)]) (['int16'], [array([[-11228, 456], [-11228, -268]], dtype=int16)]) This function contains a list of keyword arguments. To name a few, available_dtypes, max_value, allow_inf, min_num_dims etc. It can be used wherever an array of values is expected. That would again be a list of functions which expects at least one :class:`ivy.Array`. 3. `dtype_values_axis`_ - Similar to `dtype_and_values`_, generates an associated valid axis for the array. .. code-block:: python >>> print_hypothesis_examples(helpers.dtype_values_axis(), 3) (['int16'], [array([ -9622, 28136, 6375, -12720, 21354 -4], dtype=int16)], 0) (['float16'], [array([-1.900e+00, 5.955e+04, -1.900e+00, -5.955e+04], dtype=float16)], 1) (['int8'], [array([[14], [10]], dtype=int8)], 1) 4. `array_values`_ - It works in a similar way as the `dtype_and_values`_ function, with the only difference being, here an extensive set of parameters and sub-strategies are used to generate array values. For example-: .. code-block:: python >>> strategy = helpers.array_values( dtype="int32", shape=(3,), min_value=0, exclude_min=True, large_abs_safety_factor=2, safety_factor_scale="linear") >>> print_hypothesis_examples(strategy, 2) array([57384, 25687, 248], dtype=int32) array([1, 1, 1], dtype=int32) 5. `array_dtypes`_ - As the name suggests, this will generate arbitrary sequences of valid float data types. The sequence parameters like *min_size*, and *max_size*, are specified at test time based on the function. This is what the function returns -: .. code-block:: python # A sequence of floats with arbitrary lengths ranging from [1,5] >>> print_hypothesis_examples(array_dtypes(helpers.ints(min_value=1, max_value=5))) ['float16', 'float32', 'float16', 'float16', 'float32'] ['float64', 'float64', 'float32', 'float32', 'float16'] This function should be used whenever we are testing an ivy function that accepts at least one array as an input. 6. `array_bools`_ - This function generates a sequence of boolean values. For example-: .. code-block:: python >>> print_hypothesis_examples(array_bools(na = helpers.ints(min_value=1, max_value=5))) [False, True, True, False, True] [False] This function should be used when a boolean value is to be associated for each value of the other parameter, when generated by a sequence. For example, in `test_concat`_, we are generating a list of inputs of the dimension (2,3), and for each input we have three boolean values associated with it that define additional parameters(container, as_variable, native_array). Meaning if the input is to be treated as a container, at the same time, is it a variable or a native array. 7. `lists`_ - As the name suggests, we use it to generate lists composed of anything, as specified by the user. For example in `test_device`_ file, it is used to generate a list of array_shapes, in `test_manipulation`_, it is used to generate a list of common_shapes, and more in `test_layers`_. The function takes in 3 arguments, first is the strategy by which the elements are to be generated, in majority of the cases this is **helpers.ints**, with range specified, and the other arguments are sequence arguments as specified in **array_dtypes**. For example -: .. code-block:: python >>> print_hypothesis_examples(lists(helpers.ints(min_value=1, max_value=6), min_size = 0,max_size = 5)) [2, 5, 6] [1] The generated values are then passed to the array creation functions inside the test function as tuples. 9. valid_axes - This function generates valid axes for a given array dimension. For example -: .. code-block:: python >>> print_hypothesis_examples(valid_axes(helpers.ints(min_value=2, max_value=3), size_bounds = [1,3])) (-3, 1, -1) (1, -2) It should be used in functions which expect axes as a required or an optional argument. 10. `integers`_ - This is similar to the :code:`helpers.ints` strategy, with the only difference being that here the range can either be specified manually, or a shared key can be provided. The way shared keys work has been discussed in the *Important Strategies* sections above. 11. `reshape_shapes`_ - This function returns a valid shape after a reshape operation is applied given as input of any arbitrary shape. For example-: .. code-block:: python >>> print_hypothesis_examples(reshape_shapes([3,3]), 3) (9, 1) (9,) (-1,) It should be used in places where broadcast operations are run, either as a part of a larger computation or in a stand-alone fashion. 12. `subsets`_ - As the function name suggests, it generates subsets of any sequence, and returns that subset as a tuple. For example-: .. code-block:: python >>> some_sequence = ['tensorflow', 1, 3.06, 'torch', 'ivy', 0] >>> print_hypothesis_examples(subsets(some_sequence), 4) ('tensorflow', 'ivy', 0) ('tensorflow', 1, 3.06, 'torch', 'ivy') ('tensorflow', 1, 'torch', 0) (1, 3.06) It ensures full coverage of the values that an array can have, given certain parameters like *allow_nan, allow_subnormal, allow_inf*. Such parameters usually test the function for edge cases. This function should be used in places where the result doesn’t depend on the kind of value an array contains. 13. `get_shape`_ - This is used to generate any arbitrary shape.If *allow_none* is set to :code:`True`, then an implicit *st.one_of* strategy is used, wherein the function will either generate :code:`None` as shape or it will generate a shape based on the keyword `arguments`_ of the function. For example -: .. code-block:: python >>> print_hypothesis_examples( get_shape( allow_none = True, min_num_dims = 2, max_num_dims = 7, min_dim_size = 2 ), 3 ) (5, 5, 8) (4, 3, 3, 4, 9, 9, 8) (9, 9, 3, 5, 6) 14. `get_bounds`_ - It’s often the case that we need to define a lower and an upper limit for generating certain values, like floats, sequences, arrays_values etc. This strategy can be put to use when we want our function to pass on values in any range possible, or we’re unsure about the limits. We can also use the function to generate a list of possible bounds wherein the function fails. For example-: .. code-block:: python >>> input_dtype = helpers.get_dtypes("integer").example() >>> print_hypothesis_examples(get_bounds(input_dtype.example())) (73, 36418) (213, 21716926) **Note** - Under the hood, **array_values** strategy is called if the data type is *integer*, and **none_or_list_of_floats** is called when the data type is *float*. 15. get_probs - This is used to generate a tuple containing two values. The first one being the *unnormalized probabilities* for all elements in a population, the second one being the *population size*. For example-: .. code-block:: python >>> input_dtype = helpers.get_dtypes("float").example() >>> print_hypothesis_examples(get_probs(input_dtype.example())) ([[6.103515625e-05, 1.099609375], [1.0, 6.103515625e-05], [1.0, 1.0], [0.5, 6.103515625e-05]], 2) Such strategies can be used to test statistical and probabilistic functions in Ivy. 16. `get_axis`_ - Similar to the **valid_axes** strategy, it generates an axis given any arbitrary shape as input. For example-: .. code-block:: python >>> print_hypothesis_examples(get_axis(shape = (3,3,2))) (-1,) (-2, -1) 17. `num_positional_args`_ - A helper function which generates the number of positional arguments, provided a function name from any ivy submodule. For example -: .. code-block:: python >>> print_hypothesis_examples(num_positional_args("matmul"), 3) 2 0 0 This function generates any number of positional arguments within the range [0, number_positional_arguments]. It can be helpful when we are testing a function with a varied number of arguments. How to write Hypothesis Tests effectively ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ It would be helpful to keep in mind the following points while writing test -: - Don't use :code:`data.draw` in the function body. - Don't use any unreproducible data generation (i.e. np.random_uniform) in the function body. - Don't skip anything or use return statement in the function body. - The function should only call helpers.test_function, and then possibly perform a custom value test if :code:`test_values=False` in the arguments. - We should add as many possibilities as we can while generating data, covering all the function arguments. - If you find yourself using repeating some logic which is specific to a particular submodule, then create a private helper function and add this to the submodule. - If the logic is general enough, this can instead be added to the :code:`helpers`, enabling it to be used for tests in other submodules Testing Partial Mixed Functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As explained in the `Function Types <function_types.rst>`_ section, partial mixed functions are a special type of mixed functions that either utilize the compositional implementation or the primary implementation depending on some conditions on the input. Therefore, the data-types supported by partial mixed functions depend on which implementation will be used for the given input. For example, when :code:`function_supported_dtypes` is called with respect to `ivy.linear` with torch backend, the following output is returned: .. code-block:: python {'compositional': ('float32', 'int8', 'uint8', 'float64', 'int16', 'int32', 'int64'), 'primary': ('bool', 'float32', 'int8', 'uint8', 'float64', 'int64', 'int16', 'int32')} As can be seen from the above output that the data-types supported will depend on the implementation used for the given input. It's because of this reason that we need a slightly different pipeline for testing partial mixed functions. Basically, while writing the strategies for the tests of these functions, we need to first determine which implementation will be used and then based on that generate the data to test the function. Here's an example from the test of :code:`ivy.linear` function: .. code-block:: python def x_and_linear(draw): mixed_fn_compos = draw(st.booleans()) is_torch_backend = ivy.current_backend_str() == "torch" dtype = draw( helpers.get_dtypes("numeric", full=False, mixed_fn_compos=mixed_fn_compos) ) in_features = draw( helpers.ints(min_value=1, max_value=2, mixed_fn_compos=mixed_fn_compos) ) out_features = draw( helpers.ints(min_value=1, max_value=2, mixed_fn_compos=mixed_fn_compos) ) x_shape = ( 1, 1, in_features, ) weight_shape = (1,) + (out_features,) + (in_features,) # if backend is torch and we're testing the primary implementation # weight.ndim should be equal to 2 if is_torch_backend and not mixed_fn_compos: weight_shape = (out_features,) + (in_features,) bias_shape = ( 1, out_features, ) x = draw( helpers.array_values(dtype=dtype[0], shape=x_shape, min_value=0, max_value=10) ) weight = draw( helpers.array_values( dtype=dtype[0], shape=weight_shape, min_value=0, max_value=10 ) ) bias = draw( helpers.array_values( dtype=dtype[0], shape=bias_shape, min_value=0, max_value=10 ) ) return dtype, x, weight, bias As can be seen from the above code, a boolean parameter :code:`mixed_fn_compos` is generated first to determine whether to generate test data for the compositional implementation or the primary one. When it is equal to :code:`True`, the relevant data for the compositional implementation should be generated and when :code:`False`, data corresponding to the primary implementation should be generated. Another boolean, :code:`is_torch_backend` is to be used to determine if the current backend is :code:`torch`. Then these booleans are used together in this :code:`if` condition: :code:`if is_torch_backend and not mixed_fn_compos` and :code:`weight_shape` is updated to be 2 dimensional because the torch backend implementation only supports 2 dimensional weights. Notice that the parameter :code:`mixed_fn_compos` is also be passed to :code:`helpers.get_dtypes` and :code:`helpers.ints` functions so that the dtypes corresponding to the implementation to be tested are returned. In general, :code:`helpers.get_dtypes`, :code:`helpers.ints`, :code:`helpers.floats`, and :code:`helpers.numbers` all have the `mixed_fn_compos` argument which must be supplied for the correct dtypes to be returned. In case the backend has a partial mixed implementation, the dtypes corresponding to either the compositional or the primary implementation are returned, depending on the value of the parameter, and otherwise the parameter is ignored. Rest of the testing pipeline is the same is as other functions. Bonus: Hypothesis' Extended Features ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 1. Hypothesis performs *Automated Test-Case Reduction*. That is, the *given* decorator strives to report the simplest set of input values that produce a given error. For the code block below-: .. code-block:: python @given( data = st.data(), input_dtype = st.sampled_from(ivy_np.valid_float_dtypes), as_variable=st.booleans() ) def test_demo( data, input_dtype, as_variable, ): shape = data.draw(get_shape(min_num_dims=1)) #failing assertions assert as_variable == False assert shape == 0 test_demo() Hypothesis reports the following -: .. code-block:: python Falsifying example: failing_test( data=data(...), input_dtype='float16', as_variable=True, ) Draw 1: (1,) Traceback (most recent call last): File "<file_name>.py" line "123", in test_demo assert as_variable == False AssertionError Falsifying example: failing_test( data=data(...), input_dtype='float16', as_variable=False, ) Draw 1: (1,) assert shape == 0 AssertionError As can be seen from the output above, the given decorator will report the *simplest* set of input values that produce a given error. This is done through the process of **Shrinking**. Each of the Hypothesis’ strategies has it’s own prescribed shrinking behavior. For integers, it will identify the integer closest to 0 that produces the error at hand. Checkout the `documentation`_ for more information on shrinking behaviors of other strategies. Hypothesis doesn’t search for falsifying examples from scratch every time the test is run. Instead, it saves a database of these examples associated with each of the project’s test functions. In the case of Ivy, the :code:`.hypothesis` cache folder is generated if one doesn’t exist, otherwise the existing one is added to it. We just preserve this folder on the CI, so that each commit uses the same folder, and so it is ignored by git, thereby never forming part of the :code:`commit`. 2. **--hypothesis-show-statistics** This feature helps in debugging the tests, with methods like **note()**, custom **event()s** where addition to the summary, and a variety of performance details are supported. Let’s look at the function `test_gelu`_ -: **run** :code:`pytest —hypothesis-show-statistics <test_file>.py` This test runs for every backend, and the output is shown below-: * **Jax** .. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/deep_dive/ivy_tests/Jax_data_gen.png :width: 600 * **Numpy** .. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/deep_dive/ivy_tests/numpy_data_gen.png :width: 600 * **Tensorflow** .. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/deep_dive/ivy_tests/tensorflow_data_gen.png :width: 600 * **Torch** .. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/deep_dive/ivy_tests/torch_data_gen.png :width: 600 It can be seen that the function doesn’t fail for **Jax**, **Numpy**, and **Torch**, which is clearly not the case with **Tensorflow**, wherein 7 examples failed the test. One important thing to note is the number of values for which **Shrinking** (discussed in brief above) happened. Statistics for both *generate phase*, and *shrink phase* if the test fails are printed in the output. If the tests are re-run, *reuse phase* statistics are printed as well where notable examples from previous runs are displayed. Another argument which can be specified for a more detailed output is **hypothesis-verbosity = verbose**. Let’s look at the newer output, for the same example -: .. image:: https://raw.githubusercontent.com/unifyai/unifyai.github.io/main/img/externally_linked/deep_dive/ivy_tests/test_run_data_gen.png :width: 600 Like the output above, Hypothesis will print all the examples for which the test failed, when **verbosity** is set. 3. Some performance related settings which might be helpful to know are-: a. **max_examples** - The number of valid examples Hypothesis will run. It usually defaults to 100. Turning it up or down will have an impact on the speed as well as the rigorousness of the tests. b. **deadline** - If an input takes longer than expected, it should be treated as an error. It is useful to detect weird performance issues. Self-Consistent and Explicit Testing ------------------------------------ The Hypothesis data generation strategies ensure that we test for arbitrary variations in the function inputs, but this makes it difficult to manually verify ground truth results for each input variation. Therefore, we instead opt to test for self-consistency against the same Ivy function with a NumPy backend. This is handled by :func:`test_array_function`, which is a helper function most unit tests defer to. This function is explained in more detail in the following sub-section. For *primary* functions, this approach works well. Each backend implementation generally wraps an existing backend function, and under the hood these implementations vary substantially. This approach then generally suffices to correctly catch bugs for most *primary* functions. However, for *compositional* and *mixed* functions, then it's more likely that a bug could be missed. With such functions, it's possible that the bug exists in the shared *compositional* implementation, and then the bug would be systematic across all backends, including the *ground truth* NumPy which the value tests for all backends compare against. Therefore, for all *mixed* and *compositional* functions, the test should also be appended with known inputs and known ground truth outputs, to safeguard against this inability for :func:`test_array_function` to catch systematic errors. These should be added using :code:`pytest.mark.parametrize`. However, we should still also include :func:`test_array_function` in the test, so that we can still test for arbitrary variations in the input arguments. test_array_function ------------------- The helper `test_array_function`_ tests that the function: #. can handle the :code:`out` argument correctly #. can be called as an instance method of the ivy.Array class #. can accept ivy.Container instances in place of any arguments for *nestable* functions, applying the function to the leaves of the container, and returning the resultant container #. can be called as an instance method on the ivy.Container #. is self-consistent with the function return values when using a NumPy backend :code:`array` in the name :func:`test_array_function` simply refers to the fact that the function in question consumes arrays in the arguments. So when should :func:`test_array_function` be used? The rule is simple, if the test should not pass any arrays in the input, then we should not use the helper :func:`test_array_function`. For example, :func:`ivy.num_gpus` does not receive any arrays in the input, and so we should not make use of :func:`test_array_function` in the test implementation. Running Ivy Tests ----------------- The CI Pipeline runs the entire collection of Ivy Tests for the module that is being updated on every push to the repo. You will need to make sure the Ivy Test is passing for each Ivy function you introduce/modify. If a test fails on the CI, you can see details about the failure under `Details -> Run Ivy <module> Tests` as shown in `CI Pipeline`_. You can also run the tests locally before making a PR. The instructions differ according to the IDE you are using. For PyCharm and Visual Studio Code you can refer to the :ref:`overview/contributing/setting_up:Setting Up Testing in PyCharm` section and :ref:`overview/contributing/setting_up:Setting up for Free` section respectively. Re-Running Failed Ivy Tests --------------------------- When a Hypothesis test fails, the falsifying example is printed on the console by Hypothesis. For example, in the :code:`test_result_type` Test, we find the following output on running the test: .. code-block:: Falsifying example: test_result_type( dtype_and_x=(['bfloat16', 'int16'], [-0.9090909090909091, -1]), as_variable=False, num_positional_args=2, native_array=False, container=False, instance_method=False, fw='torch', ) It is always efficient to fix this particular example first, before running any other examples. In order to achieve this functionality, we can use the :code:`@example` Hypothesis decorator. The :code:`@example` decorator ensures that a specific example is always tested, on running a particular test. The decorator requires the test arguments as parameters. For the :code:`test_result_type` Test, we can add the decorator as follows: .. code-block:: @example( dtype_and_x=(['bfloat16', 'int16'], [-0.9090909090909091, -1]), as_variable=False, num_positional_args=2, native_array=False, container=False, instance_method=False, fw='torch', ) This ensures that the given example is always tested while running the test, allowing one to debug the failure efficiently. **Round Up** This should have hopefully given you a good feel for how the tests are implemented in Ivy. If you have any questions, please feel free to reach out on `discord`_ in the `ivy tests thread`_! **Video** .. raw:: html <iframe width="420" height="315" allow="fullscreen;" src="https://www.youtube.com/embed/2AwWuHIe2h8" class="video"> </iframe>
ivy/docs/overview/deep_dive/ivy_tests.rst/0
{ "file_path": "ivy/docs/overview/deep_dive/ivy_tests.rst", "repo_id": "ivy", "token_count": 17619 }
5
ML Explosion ============ The number of open source ML projects has grown considerably in recent years, especially Deep Learning, as can be seen from the rapidly increasing number of GitHub repos containing the term “Deep Learning” over time. These projects are written in many different frameworks. .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/background/ml_explosion/num_dl_repos_over_time.png?raw=true :align: center :width: 80% While this is a wonderful thing for researchers and developers, when we also consider the speed at which the frameworks are evolving, the shareability of code is significantly hindered, with projects and libraries becoming outdated in a matter of months if not rigorously maintained against the newest frameworks and also the newest framework versions. For software development pipelines where rapid prototyping and collaboration are vital, this is a significant bottleneck. As new future frameworks become available, backend-specific code quickly becomes outdated and obsolete, and users of these frameworks are constantly re-inventing the wheel. .. image:: https://github.com/unifyai/unifyai.github.io/blob/main/img/externally_linked/background/ml_explosion/ml_framework_evolution.png?raw=true :align: center :width: 80% If our desire is to provide a new framework that simultaneously supports all of the modern frameworks in a simple and scalable manner, then we must determine exactly where the common ground lies between them. Finding common ground between the existing frameworks is essential in order to design a simple, scalable, and universal abstraction. In the search for common ground, considering the language first, we can see that Python has become the clear front-runner. Looking a little deeper at these python frameworks, we find that all of these follow the same core principles of operation, exposing almost identical core functional APIs, but with unique syntax and arguments. There are only so many ways to manipulate a tensor, and unsurprisingly these fundamental tensor operations are consistent between frameworks. The functions exposed by each framework follow very similar conventions to those of Numpy, first introduced in 2006. A simple and scalable abstraction layer therefore presents itself. The functional APIs of all existing ML frameworks are all cut from the same cloth, adhering to similar sets of functions but with differing syntax and semantics. **Round Up** Hopefully, this has painted a clear picture of how many different ML frameworks have exploded onto the scene 🙂 Please reach out on `discord <https://discord.gg/sXyFF8tDtm>`_ if you have any questions!
ivy/docs/overview/motivation/ml_explosion.rst/0
{ "file_path": "ivy/docs/overview/motivation/ml_explosion.rst", "repo_id": "ivy", "token_count": 599 }
6
.. _`RWorks Vendor-Specific Compilers`: Vendor-Specific Compilers ========================= .. _`Intel C++ Compiler Classic (ICC)`: https://www.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference .. _`Intel oneAPI DPC++/C++ Compiler (ICX)`: https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/dpc-compiler.html .. _`Intel`: https://www.intel.com/ .. _`ICC`: https://www.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference .. _`Khronos Group`: https://www.khronos.org/ .. _`LLVM`: https://llvm.org/ .. _`Nvidia CUDA Compiler (NVCC)`: https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc .. _`NVIDIA`: https://www.nvidia.com/ .. _`CUDA`: https://developer.nvidia.com/cuda-toolkit .. _`GCC`: https://gcc.gnu.org/ .. _`Microsoft Visual C++ Compiler`: https://docs.microsoft.com/en-us/cpp/ .. _`discord`: https://discord.gg/sXyFF8tDtm Below the vendor-specific APIs are the vendor specific compilers. As far as modern machine learning practitioners go, these compilers are very rarely interacted with directly. As for our own representation of the ML stack, these compilers are the lowest level building blocks that we consider. Of course, we could talk about assembly languages and byte code, but this is outside the scope of what is really relevant for ML practitioners when considering their software stack. ICC --- `Intel C++ Compiler Classic (ICC)`_ is the first of `Intel`_’s two C, C++, SYCL, and Data Parallel C++ (DPC++) compilers for Intel processor-based systems. It is available for Windows, Linux, and macOS operating systems. It targets general-purpose Intel x86-64 architecture CPUs. ICX --- `Intel oneAPI DPC++/C++ Compiler (ICX)`_ is the second of `Intel`_’s two C, C++, SYCL, and Data Parallel C++ (DPC++) compilers for Intel processor-based systems. Again, it is available for Windows, Linux, and macOS operating systems. Unlike `ICC`_, It generates code for both Intel’s general-purpose x86-64 CPUs and also GPUs. Specifically, it targets Intel IA-32, Intel 64 (aka x86-64), Core, Xeon, and Xeon Scalable processors, as well as GPUs including Intel Processor Graphics Gen9 and above, Intel Xe architecture, and Intel Programmable Acceleration Card with Intel Arria 10 GX FPGA. It builds on the SYCL specification from The `Khronos Group`_. It is designed to allow developers to reuse code across hardware targets (CPUs and accelerators such as GPUs and FPGAs) and perform custom tuning for a specific accelerator. ICX adopts `LLVM`_ for faster build times, and benefits from supporting the latest C++ standards. NVCC ---- The `Nvidia CUDA Compiler (NVCC)`_ is a proprietary compiler by `NVIDIA`_ intended for use with `CUDA`_. CUDA code runs on both the CPU and GPU. NVCC separates these two parts and sends host code (the part of code which will be run on the CPU) to a C compiler like `GCC`_ or `Intel C++ Compiler Classic (ICC)`_ or `Microsoft Visual C++ Compiler`_, and sends the device code (the part which will run on the GPU) to the GPU. The device code is further compiled by NVCC. Like `ICX`_, NVCC is also based on `LLVM`_.
ivy/docs/overview/related_work/vendor_specific_compilers.rst/0
{ "file_path": "ivy/docs/overview/related_work/vendor_specific_compilers.rst", "repo_id": "ivy", "token_count": 954 }
7
"""Ivy wrapping functions for conversions. Collection of Ivy functions for wrapping functions to accept and return ivy.Array instances. """ # global import numpy as np from typing import Any, Union, Tuple, Dict, Iterable, Optional # local import ivy # Helpers # # --------# ARRAY_TO_BACKEND = { "ndarray": "numpy", "Tensor": ["torch", "paddle"], "Parameter": "torch", "EagerTensor": "tensorflow", "ResourceVariable": "tensorflow", "DeviceArray": "jax", "Array": "jax", "ArrayImpl": "jax", "EagerParamBase": "paddle", } def _array_to_new_backend( x: Union[ivy.Array, ivy.NativeArray], native: bool = False ) -> Union[ivy.Array, ivy.NativeArray]: # Frontend instances if hasattr(x, "_ivy_array"): return x # ivy.Array instances native_x = x.data if isinstance(x, ivy.Array) else x native_x_type = type(native_x).__name__ # Modify native_type here since @tf.function converts tf.EagerTensor into # tf.Tensor when running @tf.function enclosed function if ivy.backend == "tensorflow": import tensorflow as tf native_x_type = ( "EagerTensor" if not tf.executing_eagerly() and isinstance(native_x, tf.Tensor) else native_x_type ) if native_x_type not in ARRAY_TO_BACKEND: return x # Check if the other possible backends match with the native data type native_x_backend = ARRAY_TO_BACKEND[native_x_type] # Handle the `Tensor` name clash in paddle and torch if not isinstance(native_x_backend, str): native_x_backend = "torch" if "torch" in str(native_x.__class__) else "paddle" # If the current backend and the backend for the given array match, # simply return the array as is if ivy.backend == native_x_backend: if native: return native_x np_intermediary = ivy.to_numpy(native_x) return ivy.array(np_intermediary) # Otherwise, convert to the new backend else: native_x_backend = ivy.with_backend(native_x_backend) # Handle native variable instances here if native_x_backend.gradients._is_variable(native_x): x_data = native_x_backend.gradients._variable_data(native_x) # x_data = _array_to_new_backend(x_data, native=True) from ivy.functional.ivy.gradients import _variable return _variable(x_data).data if native else _variable(x_data) np_intermediary = native_x_backend.to_numpy(native_x) ret = ivy.array(np_intermediary) return ret.data if native else ret def _to_new_backend( x: Any, native: bool = False, inplace: bool = False, to_ignore: tuple = (), ) -> Any: if isinstance(x, ivy.Container): to_ignore = ivy.default(to_ignore, ()) return x.cont_map( lambda x_, _: _to_new_backend( x_, native=native, inplace=inplace, to_ignore=to_ignore ), inplace=inplace, ) return _array_to_new_backend(x, native=native) def _to_native(x: Any, inplace: bool = False, to_ignore: tuple = ()) -> Any: to_ignore = ivy.default(to_ignore, ()) if isinstance(x, to_ignore): return x if isinstance(x, ivy.Array): return x.data # to prevent the graph from breaking for the time being elif type(x) is ivy.Shape: return x.shape elif isinstance(x, ivy.Container): return x.cont_map( lambda x_, _: _to_native(x_, inplace=inplace, to_ignore=to_ignore), inplace=inplace, ) return x def _to_ivy(x: Any) -> Any: if isinstance(x, ivy.Array): return x elif isinstance(x, ivy.NativeShape): return ivy.Shape(x) elif isinstance(x, ivy.Container): return x.to_ivy() if ivy.is_native_array(x) or isinstance(x, np.ndarray): return ivy.Array(x) return x # Wrapped # # --------# def to_ivy( x: Union[ivy.Array, ivy.NativeArray, Iterable], nested: bool = False, include_derived: Optional[Dict[str, bool]] = None, ) -> Union[ivy.Array, ivy.NativeArray, Iterable]: """Return the input array converted to an ivy.Array instance if it is a native array type, otherwise the input is returned unchanged. If nested is set, the check is applied to all nested leafs of tuples, lists and dicts contained within x. Parameters ---------- x The input to be converted. nested Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and tuples will be traversed to their lowest leaves in search of ivy.Array instances. Default is ``False``. include_derived Whether to also recursive for classes derived from tuple, list and dict. Default is False. Returns ------- ret the input in its native framework form in the case of ivy.Array or instances. """ if nested: return ivy.nested_map(_to_ivy, x, include_derived, shallow=False) return _to_ivy(x) def args_to_ivy( *args: Iterable[Any], include_derived: Optional[Dict[str, bool]] = None, **kwargs: Dict[str, Any], ) -> Tuple[Iterable[Any], Dict[str, Any]]: """Return args and keyword args in their ivy.Array or form for all nested instances, otherwise the arguments are returned unchanged. Parameters ---------- args The positional arguments to check include_derived Whether to also recursive for classes derived from tuple, list and dict. Default is ``False``. kwargs The key-word arguments to check Returns ------- ret the same arguments, with any nested arrays converted to ivy.Array or instances. """ native_args = ivy.nested_map(_to_ivy, args, include_derived, shallow=False) native_kwargs = ivy.nested_map(_to_ivy, kwargs, include_derived, shallow=False) return native_args, native_kwargs def to_native( x: Union[ivy.Array, ivy.NativeArray, Iterable], nested: bool = False, include_derived: Optional[Dict[str, bool]] = None, cont_inplace: bool = False, to_ignore: Optional[Union[type, Tuple[type]]] = None, ) -> Union[ivy.Array, ivy.NativeArray, Iterable]: """Return the input item in its native backend framework form if it is an ivy.Array instance, otherwise the input is returned unchanged. If nested is set, the check is applied to all nested leaves of tuples, lists and dicts contained within ``x``. Parameters ---------- x The input to maybe convert. nested Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and tuples will be traversed to their lowest leaves in search of ivy.Array instances. Default is ``False``. include_derived Whether to also recursive for classes derived from tuple, list and dict. Default is ``False``. cont_inplace Whether to update containers in place. Default is ``False`` to_ignore Types to ignore when deciding whether to go deeper into the nest or not Returns ------- ret the input in its native framework form in the case of ivy.Array instances. """ if nested: return ivy.nested_map( lambda x: _to_native(x, inplace=cont_inplace, to_ignore=to_ignore), x, include_derived, shallow=False, ) return _to_native(x, inplace=cont_inplace, to_ignore=to_ignore) def args_to_native( *args: Iterable[Any], include_derived: Optional[Dict[str, bool]] = None, cont_inplace: bool = False, to_ignore: Optional[Union[type, Tuple[type]]] = None, **kwargs: Dict[str, Any], ) -> Tuple[Iterable[Any], Dict[str, Any]]: """Return args and keyword args in their native backend framework form for all nested ivy.Array instances, otherwise the arguments are returned unchanged. Parameters ---------- args The positional arguments to check include_derived Whether to also recursive for classes derived from tuple, list and dict. Default is ``False``. cont_inplace Whether to update containers in place. Default is ``False`` to_ignore Types to ignore when deciding whether to go deeper into the nest or not kwargs The key-word arguments to check Returns ------- ret the same arguments, with any nested ivy.Array or instances converted to their native form. """ native_args = ivy.nested_map( lambda x: _to_native(x, inplace=cont_inplace, to_ignore=to_ignore), args, include_derived, shallow=False, ) native_kwargs = ivy.nested_map( lambda x: _to_native(x, inplace=cont_inplace, to_ignore=to_ignore), kwargs, include_derived, shallow=False, ) return native_args, native_kwargs def to_new_backend( x: Union[ivy.Array, ivy.NativeArray, Iterable], native: bool = True, nested: bool = False, include_derived: Optional[Dict[str, bool]] = None, cont_inplace: bool = False, to_ignore: Optional[Union[type, Tuple[type]]] = None, ) -> Union[ivy.Array, ivy.NativeArray, Iterable]: """Return the input array converted to new backend framework form if it is an `ivy.Array`, `ivy.NativeArray` or NativeVariable instance. If nested is set, the check is applied to all nested leaves of tuples, lists and dicts contained within ``x``. Parameters ---------- x The input to maybe convert. native Whether to return the new array as a `ivy.NativeArray`, NativeVariable or an `ivy.Array`. Default is ``True``. nested Whether to apply the conversion on arguments in a nested manner. If so, all dicts, lists and tuples will be traversed to their lowest leaves in search of ivy.Array instances. Default is ``False``. include_derived Whether to also recursive for classes derived from tuple, list and dict. Default is ``False``. cont_inplace Whether to update containers in place. Default is ``False`` to_ignore Types to ignore when deciding whether to go deeper into the nest or not Returns ------- ret the input in the new backend framework form in the case of array instances. """ if nested: return ivy.nested_map( lambda x: _to_new_backend( x, native=native, inplace=cont_inplace, to_ignore=to_ignore ), x, include_derived, shallow=False, ) return _to_new_backend(x, native=native, inplace=cont_inplace, to_ignore=to_ignore) def args_to_new_backend( *args: Iterable[Any], native: bool = True, shallow: bool = True, include_derived: Optional[Dict[str, bool]] = None, cont_inplace: bool = False, to_ignore: Optional[Union[type, Tuple[type]]] = None, **kwargs: Dict[str, Any], ) -> Tuple[Iterable[Any], Dict[str, Any]]: """Return args and keyword args in the new current backend framework for all nested ivy.Array, ivy.NativeArray or NativeVariable instances. Parameters ---------- args The positional arguments to check native Whether to return the new array as a ivy.NativeArray, NativeVariable or an ivy.Array. Default is ``True``. include_derived Whether to also recursive for classes derived from tuple, list and dict. Default is ``False``. cont_inplace Whether to update containers in place. Default is ``False`` to_ignore Types to ignore when deciding whether to go deeper into the nest or not shallow Whether to inplace update the input nest or not Only works if nest is a mutable type. Default is ``True``. kwargs The key-word arguments to check Returns ------- ret The same arguments, with any nested array instances converted to the new backend. """ new_args = ivy.nested_map( lambda x: _to_new_backend( x, native=native, inplace=cont_inplace, to_ignore=to_ignore ), args, include_derived, shallow=shallow, ) new_kwargs = ivy.nested_map( lambda x: _to_new_backend( x, native=native, inplace=cont_inplace, to_ignore=to_ignore ), kwargs, include_derived, shallow=shallow, ) return new_args, new_kwargs
ivy/ivy/data_classes/array/conversions.py/0
{ "file_path": "ivy/ivy/data_classes/array/conversions.py", "repo_id": "ivy", "token_count": 4995 }
8
# global import abc from typing import Optional, Union, Tuple, List, Sequence, Literal # local import ivy class _ArrayWithLinearAlgebraExperimental(abc.ABC): def eigh_tridiagonal( self: Union[ivy.Array, ivy.NativeArray], beta: Union[ivy.Array, ivy.NativeArray], /, *, eigvals_only: bool = True, select: str = "a", select_range: Optional[ Union[Tuple[int, int], List[int], ivy.Array, ivy.NativeArray] ] = None, tol: Optional[float] = None, ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array]]: """ivy.Array instance method variant of ivy.eigh_tridiagonal. This method simply wraps the function, and so the docstring for ivy.eigh_tridiagonal also applies to this method with minimal changes. Parameters ---------- self An array of real or complex arrays each of shape (n), the diagonal elements of the matrix. beta An array or of real or complex arrays each of shape (n-1), containing the elements of the first super-diagonal of the matrix. eigvals_only If False, both eigenvalues and corresponding eigenvectors are computed. If True, only eigenvalues are computed. Default is True. select Optional string with values in {'a', 'v', 'i'} (default is 'a') that determines which eigenvalues to calculate: 'a': all eigenvalues. 'v': eigenvalues in the interval (min, max] given by select_range. 'i': eigenvalues with indices min <= i <= max. select_range Size 2 tuple or list or array specifying the range of eigenvalues to compute together with select. If select is 'a', select_range is ignored. tol Optional scalar. Ignored when backend is not Tensorflow. The absolute tolerance to which each eigenvalue is required. An eigenvalue (or cluster) is considered to have converged if it lies in an interval of this width. If tol is None (default), the value eps*|T|_2 is used where eps is the machine precision, and |T|_2 is the 2-norm of the matrix T. Returns ------- eig_vals The eigenvalues of the matrix in non-decreasing order. eig_vectors If eigvals_only is False the eigenvectors are returned in the second output argument. Examples -------- >>> alpha = ivy.array([0., 1., 2.]) >>> beta = ivy.array([0., 1.]) >>> y = alpha.eigh_tridiagonal(beta) >>> print(y) ivy.array([0., 0.38196, 2.61803]) """ return ivy.eigh_tridiagonal( self._data, beta, eigvals_only=eigvals_only, select=select, select_range=select_range, tol=tol, ) def diagflat( self: Union[ivy.Array, ivy.NativeArray], /, *, offset: int = 0, padding_value: float = 0, align: str = "RIGHT_LEFT", num_rows: int = -1, num_cols: int = -1, out: Optional[Union[ivy.Array, ivy.NativeArray]] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.diagflat. This method simply wraps the function, and so the docstring for ivy.diagflat also applies to this method with minimal changes. Examples -------- >>> x = ivy.array([1,2]) >>> x.diagflat(k=1) ivy.array([[0, 1, 0], [0, 0, 2], [0, 0, 0]]) """ return ivy.diagflat( self._data, offset=offset, padding_value=padding_value, align=align, num_rows=num_rows, num_cols=num_cols, out=out, ) def kron( self: ivy.Array, b: ivy.Array, /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.kron. This method simply wraps the function, and so the docstring for ivy.kron also applies to this method with minimal changes. Examples -------- >>> a = ivy.array([1,2]) >>> b = ivy.array([3,4]) >>> a.diagflat(b) ivy.array([3, 4, 6, 8]) """ return ivy.kron(self._data, b, out=out) def matrix_exp(self: ivy.Array, /, *, out: Optional[ivy.Array] = None) -> ivy.Array: """ivy.Array instance method variant of ivy.kron. This method simply wraps the function, and so the docstring for ivy.matrix_exp also applies to this method with minimal changes. Examples -------- >>> x = ivy.array([[[1., 0.], [0., 1.]], [[2., 0.], [0., 2.]]]) >>> ivy.matrix_exp(x) ivy.array([[[2.7183, 1.0000], [1.0000, 2.7183]], [[7.3891, 1.0000], [1.0000, 7.3891]]]) """ return ivy.matrix_exp(self._data, out=out) def eig( self: ivy.Array, /, ) -> Tuple[ivy.Array, ...]: """ivy.Array instance method variant of ivy.eig. This method simply wraps the function, and so the docstring for ivy.eig also applies to this method with minimal changes. Examples -------- >>> x = ivy.array([[1,2], [3,4]]) >>> x.eig() ( ivy.array([-0.37228132+0.j, 5.37228132+0.j]), ivy.array([[-0.82456484+0.j, -0.41597356+0.j], [ 0.56576746+0.j, -0.90937671+0.j]]) ) """ return ivy.eig(self._data) def eigvals( self: ivy.Array, /, ) -> ivy.Array: """ivy.Array instance method variant of ivy.eigvals. This method simply wraps the function, and so the docstring for ivy.eigvals also applies to this method with minimal changes. Examples -------- >>> x = ivy.array([[1,2], [3,4]]) >>> x.eigvals() ivy.array([-0.37228132+0.j, 5.37228132+0.j]) """ return ivy.eigvals(self._data) def adjoint( self: ivy.Array, /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.adjoint. This method simply wraps the function, and so the docstring for ivy.adjoint also applies to this method with minimal changes. Examples -------- >>> x = np.array([[1.-1.j, 2.+2.j], [3.+3.j, 4.-4.j]]) >>> x = ivy.array(x) >>> x.adjoint() ivy.array([[1.+1.j, 3.-3.j], [2.-2.j, 4.+4.j]]) """ return ivy.adjoint( self._data, out=out, ) def multi_dot( self: ivy.Array, x: Sequence[Union[ivy.Array, ivy.NativeArray]], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.multi_dot. This method simply wraps the function, and so the docstring for ivy.multi_dot also applies to this method with minimal changes. Examples -------- >>> A = ivy.arange(2 * 3).reshape((2, 3)) >>> B = ivy.arange(3 * 2).reshape((3, 2)) >>> C = ivy.arange(2 * 2).reshape((2, 2)) >>> A.multi_dot((B, C)) ivy.array([[ 26, 49], [ 80, 148]]) """ return ivy.multi_dot((self._data, *x), out=out) def cond( self: ivy.Array, /, *, p: Optional[Union[int, float, str]] = None ) -> ivy.Array: """ivy.Array instance method variant of ivy.cond. This method simply wraps the function, and so the docstring for ivy.cond also applies to this method with minimal changes. Examples -------- >>> x = ivy.array([[1,2], [3,4]]) >>> x.cond() ivy.array(14.933034373659268) >>> x = ivy.array([[1,2], [3,4]]) >>> x.cond(p=ivy.inf) ivy.array(21.0) """ return ivy.cond(self._data, p=p) def mode_dot( self: Union[ivy.Array, ivy.NativeArray], /, matrix_or_vector: Union[ivy.Array, ivy.NativeArray], mode: int, transpose: Optional[bool] = False, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.mode_dot. This method simply wraps the function, and so the docstring for ivy.mode_dot also applies to this method with minimal changes. Parameters ---------- self tensor of shape ``(i_1, ..., i_k, ..., i_N)`` matrix_or_vector 1D or 2D array of shape ``(J, i_k)`` or ``(i_k, )`` matrix or vectors to which to n-mode multiply the tensor mode int in the range(1, N) transpose If True, the matrix is transposed. For complex tensors, the conjugate transpose is used. out optional output array, for writing the result to. It must have a shape that the result can broadcast to. Returns ------- ivy.Array `mode`-mode product of `tensor` by `matrix_or_vector` * of shape :math:`(i_1, ..., i_{k-1}, J, i_{k+1}, ..., i_N)` if matrix_or_vector is a matrix * of shape :math:`(i_1, ..., i_{k-1}, i_{k+1}, ..., i_N)` if matrix_or_vector is a vector """ return ivy.mode_dot(self._data, matrix_or_vector, mode, transpose, out=out) def multi_mode_dot( self: Union[ivy.Array, ivy.NativeArray], mat_or_vec_list: Sequence[Union[ivy.Array, ivy.NativeArray]], /, modes: Optional[Sequence[int]] = None, skip: Optional[Sequence[int]] = None, transpose: Optional[bool] = False, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""ivy.Array instance method variant of ivy.multi_mode_dot. This method simply wraps the function, and so the docstring for ivy.multi_mode_dot also applies to this method with minimal changes. Parameters ---------- self the input tensor mat_or_vec_list sequence of matrices or vectors of length ``tensor.ndim`` skip None or int, optional, default is None If not None, index of a matrix to skip. modes None or int list, optional, default is None transpose If True, the matrices or vectors in in the list are transposed. For complex tensors, the conjugate transpose is used. out optional output array, for writing the result to. It must have a shape that the result can broadcast to. Returns ------- ivy.Array tensor times each matrix or vector in the list at mode `mode` Notes ----- If no modes are specified, just assumes there is one matrix or vector per mode and returns: :math:`\\text{x }\\times_0 \\text{ matrix or vec list[0] }\\times_1 \\cdots \\times_n \\text{ matrix or vec list[n] }` """ # noqa: E501 return ivy.multi_mode_dot( self._data, mat_or_vec_list, modes, skip, transpose, out=out ) def svd_flip( self: Union[ivy.Array, ivy.NativeArray], V: Union[ivy.Array, ivy.NativeArray], /, u_based_decision: Optional[bool] = True, ) -> Tuple[ivy.Array, ivy.Array]: """ivy.Array instance method variant of ivy.svd_flip. This method simply wraps the function, and so the docstring for ivy.svd_flip also applies to this method with minimal changes. Parameters ---------- self left singular matrix output of SVD V right singular matrix output of SVD u_based_decision If True, use the columns of u as the basis for sign flipping. Otherwise, use the rows of v. The choice of which variable to base the decision on is generally algorithm dependent. Returns ------- u_adjusted, v_adjusted : arrays with the same dimensions as the input. """ return ivy.svd_flip(self._data, V, u_based_decision) def make_svd_non_negative( self: Union[ivy.Array, ivy.NativeArray], U: Union[ivy.Array, ivy.NativeArray], S: Union[ivy.Array, ivy.NativeArray], V: Union[ivy.Array, ivy.NativeArray], /, *, nntype: Optional[Literal["nndsvd", "nndsvda"]] = "nndsvd", ) -> Tuple[ivy.Array, ivy.Array]: """ivy.Array instance method variant of ivy.make_svd_non_negative. This method simply wraps the function, and so the docstring for ivy.make_svd_non_negative also applies to this method with minimal changes. Parameters ---------- self tensor being decomposed. U left singular matrix from SVD. S diagonal matrix from SVD. V right singular matrix from SVD. nntype whether to fill small values with 0.0 (nndsvd), or the tensor mean (nndsvda, default). [1]: Boutsidis & Gallopoulos. Pattern Recognition, 41(4): 1350-1362, 2008. """ return ivy.make_svd_non_negative(self._data, U, S, V, nntype=nntype) def tensor_train( self: Union[ivy.Array, ivy.NativeArray], rank: Union[int, Sequence[int]], /, svd: Optional[Literal["truncated_svd"]] = "truncated_svd", verbose: Optional[bool] = False, ) -> ivy.TTTensor: """ivy.Array instance method variant of ivy.tensor_train. This method simply wraps the function, and so the docstring for ivy.tensor_train also applies to this method with minimal changes. Parameters ---------- self input tensor rank maximum allowable TT rank of the factors if int, then this is the same for all the factors if int list, then rank[k] is the rank of the kth factor svd function to use to compute the SVD verbose level of verbosity Returns ------- ivy.TTTensor """ return ivy.tensor_train(self._data, rank, svd=svd, verbose=verbose) def truncated_svd( self: Union[ivy.Array, ivy.NativeArray], /, compute_uv: bool = True, n_eigenvecs: Optional[int] = None, ) -> Union[ivy.Array, Tuple[ivy.Array, ivy.Array, ivy.Array]]: """ivy.Array instance method variant of ivy.make_svd_non_negative. This method simply wraps the function, and so the docstring for ivy.make_svd_non_negative also applies to this method with minimal changes. Parameters ---------- x 2D-array compute_uv If ``True`` then left and right singular vectors will be computed and returnedv in ``U`` and ``Vh`` respectively. Otherwise, only the singular values will be computed, which can be significantly faster. n_eigenvecs if specified, number of eigen[vectors-values] to return else full matrices will be returned Returns ------- ret a namedtuple ``(U, S, Vh)`` Each returned array must have the same floating-point data type as ``x``. """ return ivy.truncated_svd(self._data, compute_uv, n_eigenvecs) def initialize_tucker( self: Union[ivy.Array, ivy.NativeArray], rank: Sequence[int], modes: Sequence[int], /, *, init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd", seed: Optional[int] = None, svd: Optional[Literal["truncated_svd"]] = "truncated_svd", non_negative: Optional[bool] = False, mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None, svd_mask_repeats: Optional[int] = 5, ) -> Tuple[ivy.Array, Sequence[ivy.Array]]: """ivy.Array instance method variant of ivy.initialize_tucker. This method simply wraps the function, and so the docstring for ivy.initialize_tucker also applies to this method with minimal changes. Parameters ---------- self input tensor rank number of components modes modes to consider in the input tensor seed Used to create a random seed distribution when init == 'random' init initialization scheme for tucker decomposition. svd function to use to compute the SVD non_negative if True, non-negative factors are returned mask array of booleans with the same shape as ``tensor`` should be 0 where the values are missing and 1 everywhere else. Note: if tensor is sparse, then mask should also be sparse with a fill value of 1 (or True). svd_mask_repeats number of iterations for imputing the values in the SVD matrix when mask is not None Returns ------- core initialized core tensor factors list of factors """ return ivy.initialize_tucker( self._data, rank, modes, seed=seed, init=init, svd=svd, non_negative=non_negative, mask=mask, svd_mask_repeats=svd_mask_repeats, ) def partial_tucker( self: Union[ivy.Array, ivy.NativeArray], rank: Optional[Sequence[int]] = None, modes: Optional[Sequence[int]] = None, /, *, n_iter_max: Optional[int] = 100, init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd", svd: Optional[Literal["truncated_svd"]] = "truncated_svd", seed: Optional[int] = None, mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None, svd_mask_repeats: Optional[int] = 5, tol: Optional[float] = 10e-5, verbose: Optional[bool] = False, return_errors: Optional[bool] = False, ) -> Tuple[ivy.Array, Sequence[ivy.Array]]: """ivy.Array instance method variant of ivy.partial_tucker. This method simply wraps the function, and so the docstring for ivy.partial_tucker also applies to this method with minimal changes. Parameters ---------- self the input tensor rank size of the core tensor, ``(len(ranks) == tensor.ndim)`` if int, the same rank is used for all modes if None, original tensors size will be preserved. modes list of the modes on which to perform the decomposition n_iter_max maximum number of iteration init {'svd', 'random'}, or TuckerTensor optional if a TuckerTensor is provided, this is used for initialization svd str, default is 'truncated_svd' function to use to compute the SVD, seed Used to create a random seed distribution when init == 'random' mask array of booleans with the same shape as ``tensor`` should be 0 where the values are missing and 1 everywhere else. Note: if tensor is sparse, then mask should also be sparse with a fill value of 1 (or True). svd_mask_repeats number of iterations for imputing the values in the SVD matrix when mask is not None tol tolerance: the algorithm stops when the variation in the reconstruction error is less than the tolerance. verbose if True, different in reconstruction errors are returned at each iteration. return_erros if True, list of reconstruction errors are returned. Returns ------- core : ndarray core tensor of the Tucker decomposition factors : ndarray list list of factors of the Tucker decomposition. with ``core.shape[i] == (tensor.shape[i], ranks[i]) for i in modes`` """ return ivy.partial_tucker( self._data, rank, modes, n_iter_max=n_iter_max, init=init, svd=svd, seed=seed, mask=mask, svd_mask_repeats=svd_mask_repeats, tol=tol, verbose=verbose, return_errors=return_errors, ) def tucker( self: Union[ivy.Array, ivy.NativeArray], rank: Optional[Sequence[int]] = None, /, *, fixed_factors: Optional[Sequence[int]] = None, n_iter_max: Optional[int] = 100, init: Optional[Union[Literal["svd", "random"], ivy.TuckerTensor]] = "svd", svd: Optional[Literal["truncated_svd"]] = "truncated_svd", seed: Optional[int] = None, mask: Optional[Union[ivy.Array, ivy.NativeArray]] = None, svd_mask_repeats: Optional[int] = 5, tol: Optional[float] = 10e-5, verbose: Optional[bool] = False, return_errors: Optional[bool] = False, ): """ivy.Array instance method variant of ivy.tucker. This method simply wraps the function, and so the docstring for ivy.tucker also applies to this method with minimal changes. Parameters ---------- x input tensor rank size of the core tensor, ``(len(ranks) == tensor.ndim)`` if int, the same rank is used for all modes fixed_factors if not None, list of modes for which to keep the factors fixed. Only valid if a Tucker tensor is provided as init. n_iter_max maximum number of iteration init {'svd', 'random'}, or TuckerTensor optional if a TuckerTensor is provided, this is used for initialization svd str, default is 'truncated_svd' function to use to compute the SVD, seed Used to create a random seed distribution when init == 'random' mask array of booleans with the same shape as ``tensor`` should be 0 where the values are missing and 1 everywhere else. Note: if tensor is sparse, then mask should also be sparse with a fill value of 1 (or True). svd_mask_repeats number of iterations for imputing the values in the SVD matrix when mask is not None tol tolerance: the algorithm stops when the variation in the reconstruction error is less than the tolerance verbose if True, different in reconstruction errors are returned at each iteration. return_errors Indicates whether the algorithm should return all reconstruction errors and computation time of each iteration or not Default: False Returns ------- ivy.TuckerTensor or ivy.TuckerTensor and list of reconstruction errors if return_erros is True. References ---------- .. [1] tl.G.Kolda and B.W.Bader, "Tensor Decompositions and Applications", SIAM REVIEW, vol. 51, n. 3, pp. 455-500, 2009. """ return ivy.tucker( self._data, rank, fixed_factors=fixed_factors, n_iter_max=n_iter_max, init=init, return_errors=return_errors, seed=seed, mask=mask, svd=svd, svd_mask_repeats=svd_mask_repeats, tol=tol, verbose=verbose, ) def tt_matrix_to_tensor( self: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Ivy.Array instance method variant of ivy.tt_matrix_to_tensor. This method simply wraps the function, and so the docstring for ivy.tt_matrix_to_tensor also applies to this method with minimal changes. Parameters ---------- self array of 4D-arrays TT-Matrix factors (known as core) of shape (rank_k, left_dim_k, right_dim_k, rank_{k+1}) out Optional output array. If provided, the output array to store the result. Returns ------- output_tensor: array tensor whose TT-Matrix decomposition was given by 'factors' -------- >>> a = ivy.array([[[[[0.49671414], ... [-0.1382643]], ... ... [[0.64768857], ... [1.5230298]]]], ... [[[[-0.23415337], ... [-0.23413695]], ... ... [[1.57921278], ... [0.76743472]]]]]) >>> a.tt_matrix_to_tensor() ivy.array([[[[-0.1163073 , -0.11629914], [ 0.03237505, 0.03237278]], [[ 0.78441733, 0.38119566], [-0.21834874, -0.10610882]]], [[[-0.15165846, -0.15164782], [-0.35662258, -0.35659757]], [[ 1.02283812, 0.49705869], [ 2.40518808, 1.16882598]]]]) """ return ivy.tt_matrix_to_tensor(self._data, out=out) def dot( self: Union[ivy.Array, ivy.NativeArray], b: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ): """Compute the dot product between two arrays `a` and `b` using the current backend's implementation. The dot product is defined as the sum of the element- wise product of the input arrays. Parameters ---------- self First input array. b Second input array. out Optional output array. If provided, the output array to store the result. Returns ------- ret The dot product of the input arrays. Examples -------- With :class:`ivy.Array` inputs: >>> a = ivy.array([1, 2, 3]) >>> b = ivy.array([4, 5, 6]) >>> result = ivy.dot(a, b) >>> print(result) ivy.array(32) >>> a = ivy.array([[1, 2], [3, 4]]) >>> b = ivy.array([[5, 6], [7, 8]]) >>> c = ivy.empty_like(a) >>> ivy.dot(a, b, out=c) >>> print(c) ivy.array([[19, 22], [43, 50]]) >>> a = ivy.array([[1.1, 2.3, -3.6]]) >>> b = ivy.array([[-4.8], [5.2], [6.1]]) >>> c = ivy.zeros((1, 1)) >>> ivy.dot(a, b, out=c) >>> print(c) ivy.array([[-15.28]]) """ return ivy.dot(self._data, b, out=out) def general_inner_product( self: Union[ivy.Array, ivy.NativeArray], b: Union[ivy.Array, ivy.NativeArray], n_modes: Optional[int] = None, /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.general_inner_product. This method simply wraps the function, and so the docstring for ivy.general_inner_product also applies to this method with minimal changes. Parameters ---------- self first input tensor. b second input tensor. n_modes int, default is None. If None, the traditional inner product is returned (i.e. a float) otherwise, the product between the `n_modes` last modes of `a` and the `n_modes` first modes of `b` is returned. The resulting tensor's order is `len(a) - n_modes`. out Optional output array. If provided, the output array to store the result. Returns ------- The inner product of the input arrays. Examples -------- With :class:`ivy.Array` inputs: >>> a = ivy.array([1, 2, 3]) >>> b = ivy.array([4, 5, 6]) >>> result = a.general_inner_product(b, 1) >>> print(result) ivy.array(32) >>> a = ivy.array([1, 2]) >>> b = ivy.array([4, 5]) >>> result = a.general_inner_product(b) >>> print(result) ivy.array(14) >>> a = ivy.array([[1, 1], [1, 1]]) >>> b = ivy.array([[1, 2, 3, 4],[1, 1, 1, 1]]) >>> result = a.general_inner_product(b, 1) >>> print(result) ivy.array([[2, 3, 4, 5], [2, 3, 4, 5]]) """ return ivy.general_inner_product(self, b, n_modes, out=out) def higher_order_moment( self: Union[ivy.Array, ivy.NativeArray], order: int, /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.higher_order_moment. This method simply wraps the function, and so the docstring for ivy.higher_order_moment also applies to this method with minimal changes. Parameters ---------- x matrix of size (n_samples, n_features) or tensor of size(n_samples, D1, ..., DN) order number of the higher-order moment to compute Returns ------- tensor if tensor is a matrix of size (n_samples, n_features), tensor of size (n_features, )*order Examples -------- >>> a = ivy.array([[1, 2], [3, 4]]) >>> result = ivy.higher_order_moment(a, 3) >>> print(result) ivy.array([[ [14, 19], [19, 26]], [[19, 26], [26, 36] ]]) """ return ivy.higher_order_moment(self._data, order, out=out) def batched_outer( self: ivy.Array, tensors: Sequence[Union[ivy.Array, ivy.NativeArray]], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Ivy Array instance method variant of ivy.batched_outer. This method simply wraps the function, and so the docstring for ivy.batched_outer also applies to this method with minimal changes. Parameters ---------- tensors list of tensors of shape (n_samples, J1, ..., JN) , (n_samples, K1, ..., KM) ... Returns ------- outer product of tensors of shape (n_samples, J1, ..., JN, K1, ..., KM, ...) Examples -------- >>> a = ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) >>> b = ivy.array([[[.1, .2], [.3, .4]], [[.5, .6], [.7, .8]]]) >>> result = ivy.batched_outer(a, b) >>> print(result) ivy.array([[[[[0.1, 0.2], [0.30000001, 0.40000001]], [[0.2 , 0.40000001], [0.60000002, 0.80000001]]], [[[0.3 , 0.60000001], [0.90000004, 1.20000002]], [[0.40000001, 0.80000001], [1.20000005, 1.60000002]]]], [[[[2.5 , 3.00000012], [3.49999994, 4.00000006]], [[3. , 3.60000014], [4.19999993, 4.80000007]]], [[[3.5 , 4.20000017], [4.89999992, 5.60000008]], [[4. , 4.80000019], [5.5999999 , 6.4000001 ]]]]]) """ return ivy.batched_outer((self._data, *tensors), out=out)
ivy/ivy/data_classes/array/experimental/linear_algebra.py/0
{ "file_path": "ivy/ivy/data_classes/array/experimental/linear_algebra.py", "repo_id": "ivy", "token_count": 15754 }
9
# For Review # global import abc from typing import Optional, Union, Tuple, List, Iterable, Sequence from numbers import Number # local import ivy from ivy import handle_view # ToDo: implement all methods here as public instance methods class _ArrayWithManipulation(abc.ABC): def view( self: ivy.Array, /, shape: Optional[Union[ivy.Shape, ivy.NativeShape, Sequence[int]]] = None, ) -> ivy.Array: if shape: return self.reshape(shape) return self.reshape(self.shape) def concat( self: ivy.Array, xs: Union[ Tuple[Union[ivy.Array, ivy.NativeArray], ...], List[Union[ivy.Array, ivy.NativeArray]], ], /, *, axis: int = 0, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.concat. This method simply wraps the function, and so the docstring for ivy.concat also applies to this method with minimal changes. Parameters ---------- self input array to join with other arrays ``xs``. xs The other arrays to join with. The arrays must have the same shape, except in the dimension specified by axis. axis axis along which the arrays will be joined. If axis is None, arrays must be flattened before concatenation. If axis is negative, axis on which to join arrays is determined by counting from the top. Default: ``0``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an output array containing the concatenated values. """ return ivy.concat([self._data] + xs, axis=axis, out=out) @handle_view def expand_dims( self: ivy.Array, /, *, copy: Optional[bool] = None, axis: Union[int, Sequence[int]] = 0, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.expand_dims. This method simply wraps the function, and so the docstring for ivy.expand_dims also applies to this method with minimal changes. Parameters ---------- self input array. axis position in the expanded array where a new axis (dimension) of size one will be added. If array ``self`` has the rank of ``N``, the ``axis`` needs to be between ``[-N-1, N]``. Default: ``0``. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret An array with the elements of ``self``, but with its dimension added by one in a given ``axis``. Examples -------- >>> x = ivy.array([-4.7, -2.3, 0.7]) #x.shape->(3,) >>> y = x.expand_dims() #y.shape->(1, 3) >>> print(y) ivy.array([[-4.7, -2.3, 0.7]]) """ return ivy.expand_dims(self._data, copy=copy, axis=axis, out=out) @handle_view def flip( self: ivy.Array, /, *, copy: Optional[bool] = None, axis: Optional[Union[int, Sequence[int]]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.flip. This method simply wraps the function, and so the docstring for ivy.flip also applies to this method with minimal changes. Parameters ---------- self input array. axis axis (or axes) along which to flip. If axis is None, all input array axes are flipped. If axis is negative, axis is counted from the last dimension. If provided more than one axis, only the specified axes. Default: None. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an output array having the same data type and shape as``self`` and whose elements, relative to ``self``, are reordered. Examples -------- >>> x = ivy.array([1, 2, 3]) >>> y = x.flip() >>> print(y) ivy.array([3, 2, 1]) >>> x = ivy.array([4, 5, 6]) >>> y = x.flip(axis=0) >>> print(y) ivy.array([6, 5, 4]) """ return ivy.flip(self._data, copy=copy, axis=axis, out=out) @handle_view def permute_dims( self: ivy.Array, /, axes: Tuple[int, ...], *, copy: Optional[bool] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.permute_dims. This method simply wraps the function, and so the docstring for ivy.permute_dims also applies to this method with minimal changes. Parameters ---------- self input array. axes tuple containing a permutation of (0, 1, ..., N-1) where N is the number of axes (dimensions) of x. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the axes permutation. The returned array must have the same data type as x. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([[1, 2, 3], [4, 5, 6]]) >>> y = x.permute_dims(axes=(1, 0)) >>> print(y) ivy.array([[1, 4], [2, 5], [3, 6]]) >>> x = ivy.zeros((2, 3)) >>> y = x.permute_dims(axes=(1, 0)) >>> print(y) ivy.array([[0., 0.], [0., 0.], [0., 0.]]) """ return ivy.permute_dims(self._data, axes, copy=copy, out=out) @handle_view def reshape( self: ivy.Array, /, shape: Union[ivy.Shape, ivy.NativeShape, Sequence[int]], *, copy: Optional[bool] = None, order: str = "C", allowzero: bool = True, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.reshape. This method simply wraps the function, and so the docstring for ivy.reshape also applies to this method with minimal changes. Parameters ---------- self input array. shape The new shape should be compatible with the original shape. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. order Read the elements of the input array using this index order, and place the elements into the reshaped array using this index order. ‘C’ means to read / write the elements using C-like index order, with the last axis index changing fastest, back to the first axis index changing slowest. ‘F’ means to read / write the elements using Fortran-like index order, with the first index changing fastest, and the last index changing slowest. Note that the ‘C’ and ‘F’ options take no account of the memory layout of the underlying array, and only refer to the order of indexing. Default order is 'C' out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an output array having the same data type as ``self`` and elements as ``self``. Examples -------- >>> x = ivy.array([[0., 1., 2.],[3., 4., 5.]]) >>> y = x.reshape((3,2)) >>> print(y) ivy.array([[0., 1.], [2., 3.], [4., 5.]]) >>> x = ivy.array([[0., 1., 2.],[3., 4., 5.]]) >>> y = x.reshape((3,2), order='F') >>> print(y) ivy.array([[0., 4.], [3., 2.], [1., 5.]]) """ return ivy.reshape( self._data, shape, copy=copy, allowzero=allowzero, out=out, order=order ) def roll( self: ivy.Array, /, shift: Union[int, Sequence[int]], *, axis: Optional[Union[int, Sequence[int]]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.roll. This method simply wraps the function, and so the docstring for ivy.roll also applies to this method with minimal changes. Parameters ---------- self input array. shift number of places by which the elements are shifted. If ``shift`` is a tuple, then ``axis`` must be a tuple of the same size, and each of the given axes must be shifted by the corresponding element in ``shift``. If ``shift`` is an ``int`` and ``axis`` a tuple, then the same ``shift`` must be used for all specified axes. If a shift is positive, then array elements must be shifted positively (toward larger indices) along the dimension of ``axis``. If a shift is negative, then array elements must be shifted negatively (toward smaller indices) along the dimension of ``axis``. axis axis (or axes) along which elements to shift. If ``axis`` is ``None``, the array must be flattened, shifted, and then restored to its original shape. Default ``None``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an output array having the same data type as ``self`` and whose elements, relative to ``self``, are shifted. Examples -------- >>> x = ivy.array([0., 1., 2.]) >>> y = x.roll(1) >>> print(y) ivy.array([2., 0., 1.]) >>> x = ivy.array([[0., 1., 2.], ... [3., 4., 5.]]) >>> y = x.roll(2, axis=-1) >>> print(y) ivy.array([[1., 2., 0.], [4., 5., 3.]]) """ return ivy.roll(self._data, shift=shift, axis=axis, out=out) @handle_view def squeeze( self: ivy.Array, /, *, axis: Optional[Union[int, Sequence[int]]], copy: Optional[bool] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.squeeze. This method simply wraps the function, and so the docstring for ivy.squeeze also applies to this method with minimal changes. Parameters ---------- self input array. axis axis (or axes) to squeeze. If a specified axis has a size greater than one, a ValueError is. If None, then all squeezable axes are squeezed. Default: ``None``. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an output array having the same data type and elements as x. Examples -------- >>> x = ivy.array([[[0.],[ 1.]]]) >>> y = x.squeeze(axis=2) >>> print(y) ivy.array([[0., 1.]]) """ return ivy.squeeze(self._data, axis=axis, copy=copy, out=out) def stack( self: ivy.Array, /, arrays: Union[ Tuple[Union[ivy.Array, ivy.NativeArray]], List[Union[ivy.Array, ivy.NativeArray]], ], *, axis: int = 0, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.stack. This method simply wraps the function, and so the docstring for ivy.stack also applies to this method with minimal changes. Parameters ---------- self Array to join with other ``arrays``. arrays Other arrays to join with. Each array must have the same shape. axis axis along which the arrays will be joined. More details can be found in the ``ivy.stack`` documentation. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret output array made by joining the input arrays along the specified axis. Examples -------- >>> x = ivy.array([1, 2]) >>> y = ivy.array([5, 6]) >>> print(x.stack(y, axis=1)) ivy.array([[1, 5], [2, 6]]) >>> x.stack([y],axis=0) ivy.array([[[1, 2]], [[5, 6]]]) """ if not isinstance(arrays, (tuple, list)): arrays = [arrays] if isinstance(arrays, tuple): x = (self._data,) + arrays else: x = [self._data] + arrays return ivy.stack(x, axis=axis, out=out) def clip( self: ivy.Array, /, x_min: Optional[Union[Number, ivy.Array, ivy.NativeArray]] = None, x_max: Optional[Union[Number, ivy.Array, ivy.NativeArray]] = None, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.clip. This method simply wraps the function, and so the docstring for ivy.clip also applies to this method with minimal changes. Parameters ---------- self Input array containing elements to clip. x_min Minimum value. x_max Maximum value. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret An array with the elements of self, but where values < x_min are replaced with x_min, and those > x_max with x_max. Examples -------- >>> x = ivy.array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> y = x.clip(1., 5.) >>> print(y) ivy.array([1., 1., 2., 3., 4., 5., 5., 5., 5., 5.]) """ return ivy.clip(self._data, x_min, x_max, out=out) def constant_pad( self: ivy.Array, /, pad_width: Iterable[Tuple[int]], *, value: Number = 0, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.constant_pad. This method simply wraps the function, and so the docstring for ivy.constant_pad also applies to this method with minimal changes. Parameters ---------- self Input array to pad. pad_width Number of values padded to the edges of each axis. Specified as ((before_1, after_1), … (before_N, after_N)), where N is number of axes of x. value The constant value to pad the array with. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret Padded array of rank equal to x with shape increased according to pad_width. Examples -------- >>> x = ivy.array([1., 2., 3.]) >>> y = x.constant_pad(pad_width = [[2, 3]]) >>> print(y) ivy.array([0., 0., 1., 2., 3., 0., 0., 0.]) """ return ivy.constant_pad(self._data, pad_width=pad_width, value=value, out=out) def repeat( self: ivy.Array, /, repeats: Union[int, Iterable[int]], *, axis: Optional[Union[int, Sequence[int]]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.repeat. This method simply wraps the function, and so the docstring for ivy.repeat also applies to this method with minimal changes. Parameters ---------- self Input array. repeats The number of repetitions for each element. repeats is broadcast to fit the shape of the given axis. axis The axis along which to repeat values. By default, use the flattened input array, and return a flat output array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The repeated output array. Examples -------- >>> x = ivy.array([0., 1., 2.]) >>> y= x.repeat(2) >>> print(y) ivy.array([0., 0., 1., 1., 2., 2.]) """ return ivy.repeat(self._data, repeats=repeats, axis=axis, out=out) @handle_view def split( self: ivy.Array, /, *, copy: Optional[bool] = None, num_or_size_splits: Optional[ Union[int, Sequence[int], ivy.Array, ivy.NativeArray] ] = None, axis: int = 0, with_remainder: bool = False, ) -> List[ivy.Array]: """ivy.Array instance method variant of ivy.split. This method simply wraps the function, and so the docstring for ivy.split also applies to this method with minimal changes. Parameters ---------- self array to be divided into sub-arrays. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. num_or_size_splits Number of equal arrays to divide the array into along the given axis if an integer. The size of each split element if a sequence of integers or 1-D array. Default is to divide into as many 1-dimensional arrays as the axis dimension. axis The axis along which to split, default is ``0``. with_remainder If the tensor does not split evenly, then store the last remainder entry. Default is ``False``. Returns ------- A list of sub-arrays. Examples -------- >>> x = ivy.array([4, 6, 5, 3]) >>> y = x.split() >>> print(y) [ivy.array([4]),ivy.array([6]),ivy.array([5]),ivy.array([3])] """ return ivy.split( self._data, copy=copy, num_or_size_splits=num_or_size_splits, axis=axis, with_remainder=with_remainder, ) @handle_view def swapaxes( self: ivy.Array, axis0: int, axis1: int, /, *, copy: Optional[bool] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.swap_axes. This method simply wraps the function, and so the docstring for ivy.split also applies to this method with minimal changes. Parameters ---------- self Input array. axis0 First axis to be swapped. axis1 Second axis to be swapped. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret x with its axes permuted. Examples -------- Using :class:`ivy.Array` instance method: >>> x = ivy.array([[0., 1., 2.]]) >>> y = x.swapaxes(0, 1) >>> print(y) ivy.array([[0.], [1.], [2.]]) >>> x = ivy.array([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> y = x.swapaxes(0, 2) >>> print(y) ivy.array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]]) """ return ivy.swapaxes(self._data, axis0, axis1, copy=copy, out=out) def tile( self: ivy.Array, /, repeats: Iterable[int], *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.tile. This method simply wraps the function, and so the docstring for ivy.tile also applies to this method with minimal changes. Parameters ---------- self Input array. repeats The number of repetitions of x along each axis. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The tiled output array. Examples -------- >>> x = ivy.array([[0], [1], [2]]) >>> y = x.tile((3,2)) >>> print(y) ivy.array([[0,0], [1,1], [2,2], [0,0], [1,1], [2,2], [0,0], [1,1], [2,2]]) """ return ivy.tile(self._data, repeats=repeats, out=out) @handle_view def unstack( self: ivy.Array, /, *, copy: Optional[bool] = None, axis: int = 0, keepdims: bool = False, ) -> ivy.Array: """ivy.Array instance method variant of ivy.unstack. This method simply wraps the function, and so the docstring for ivy.unstack also applies to this method with minimal changes. Parameters ---------- self Input array to unstack. copy boolean indicating whether or not to copy the input array. If True, the function must always copy. If False, the function must never copy. In case copy is False we avoid copying by returning a view of the input array. axis Axis for which to unpack the array. keepdims Whether to keep dimension 1 in the unstack dimensions. Default is ``False``. Returns ------- ret List of arrays, unpacked along specified dimensions. Examples -------- >>> x = ivy.array([[1, 2], [3, 4]]) >>> y = x.unstack(axis=0) >>> print(y) [ivy.array([1, 2]), ivy.array([3, 4])] >>> x = ivy.array([[1, 2], [3, 4]]) >>> y = x.unstack(axis=1, keepdims=True) >>> print(y) [ivy.array([[1], [3]]), ivy.array([[2], [4]])] """ return ivy.unstack(self._data, copy=copy, axis=axis, keepdims=keepdims) def zero_pad( self: ivy.Array, /, pad_width: Iterable[Tuple[int]], *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """ivy.Array instance method variant of ivy.zero_pad. This method simply wraps the function, and so the docstring for ivy.zero_pad also applies to this method with minimal changes. Parameters ---------- self Input array to pad. pad_width Number of values padded to the edges of each axis. Specified as ((before_1, after_1), … (before_N, after_N)), where N is number of axes of x. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret Padded array of rank equal to x with shape increased according to pad_width. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1., 2., 3.,4, 5, 6]) >>> y = x.zero_pad(pad_width = [[2, 3]]) >>> print(y) ivy.array([0., 0., 1., 2., 3., 4., 5., 6., 0., 0., 0.]) """ return ivy.zero_pad(self._data, pad_width=pad_width, out=out)
ivy/ivy/data_classes/array/manipulation.py/0
{ "file_path": "ivy/ivy/data_classes/array/manipulation.py", "repo_id": "ivy", "token_count": 12405 }
10
# local from typing import Union, Optional, Any, List, Dict import ivy from ivy.data_classes.container.base import ContainerBase # ToDo: implement all methods here as public instance methods class _ContainerWithDevice(ContainerBase): @staticmethod def _static_dev( x: ivy.Container, /, *, as_native: Union[bool, ivy.Container] = False ) -> ivy.Container: """ivy.Container static method variant of ivy.dev. This method simply wraps the function, and so the docstring for ivy.dev also applies to this method with minimal changes. Examples -------- >>> x = ivy.Container(a=ivy.array([[2, 3], [3, 5]]), ... b=ivy.native_array([1, 2, 4, 5, 7])) >>> as_native = ivy.Container(a=True, b=False) >>> y = ivy.Container.static_dev(x, as_native=as_native) >>> print(y) { a: device(type=cpu), b: cpu } """ return ContainerBase.cont_multi_map_in_function("dev", x, as_native=as_native) def dev( self: ivy.Container, as_native: Union[bool, ivy.Container] = False ) -> ivy.Container: """ivy.Container instance method variant of ivy.dev. This method simply wraps the function, and so the docstring for ivy.dev also applies to this method with minimal changes. Parameters ---------- self contaioner of arrays for which to get the device handle. as_native Whether or not to return the dev in native format. Default is ``False``. Examples -------- >>> x = ivy.Container(a=ivy.array([[2, 3, 1], [3, 5, 3]]), ... b=ivy.native_array([[1, 2], [4, 5]])) >>> as_native = ivy.Container(a=False, b=True) >>> y = x.dev(as_native=as_native) >>> print(y) { a:cpu, b:cpu } """ return self._static_dev(self, as_native=as_native) @staticmethod def _static_to_device( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], device: Union[ivy.Device, ivy.NativeDevice, ivy.Container], /, *, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, stream: Optional[Union[int, Any, ivy.Container]] = None, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.to_device. This method simply wraps the function, and so the docstring for ivy.to_device also applies to this method with minimal changes. Parameters ---------- x input array to be moved to the desired device device device to move the input array `x` to key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. stream stream object to use during copy. In addition to the types supported in array.__dlpack__(), implementations may choose to support any library-specific stream object with the caveat that any code using such an object would not be portable. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret input array x placed on the desired device Examples -------- >>> x = ivy.Container(a=ivy.array([[2, 3, 1], [3, 5, 3]]), ... b=ivy.native_array([[1, 2], [4, 5]])) >>> y = ivy.Container.static_to_device(x, 'cpu') >>> print(y.a.device, y.b.device) cpu cpu """ return ContainerBase.cont_multi_map_in_function( "to_device", x, device, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, stream=stream, out=out, ) def to_device( self: ivy.Container, device: Union[ivy.Device, ivy.NativeDevice, ivy.Container], key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, *, stream: Optional[Union[int, Any, ivy.Container]] = None, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.to_device. This method simply wraps the function, and so the docstring for ivy.to_device also applies to this method with minimal changes. Parameters ---------- x input array to be moved to the desired device device device to move the input array `x` to key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. stream stream object to use during copy. In addition to the types supported in array.__dlpack__(), implementations may choose to support any library-specific stream object with the caveat that any code using such an object would not be portable. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret input array x placed on the desired device Examples -------- >>> x = ivy.Container(a=ivy.array([[2, 3, 1], [3, 5, 3]]), ... b=ivy.native_array([[1, 2], [4, 5]])) >>> y = x.to_device('cpu') >>> print(y.a.device, y.b.device) cpu cpu """ return self._static_to_device( self, device, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, stream=stream, out=out, )
ivy/ivy/data_classes/container/device.py/0
{ "file_path": "ivy/ivy/data_classes/container/device.py", "repo_id": "ivy", "token_count": 3245 }
11
from ivy.data_classes.container.base import ContainerBase from typing import Union, List, Dict, Optional, Tuple import ivy class _ContainerWithNormsExperimental(ContainerBase): @staticmethod def static_l1_normalize( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], axis: Optional[Union[int, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.l1_normalize. This method simply wraps the function, and so the docstring for ivy.l1_normalize also applies to this method with minimal changes. Parameters ---------- x The input container with leaves to be normalized. axis The axis along which to normalize. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the normalized leaves. Examples -------- >>> x = ivy.Container(a=ivy.array([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]))) ... b=ivy.array([[-1., -1.], [-1., -0.5]]])) >>> y = ivy.Container.static_l1_normalize(x, axis=1) >>> print(y) { a: ivy.array([[0.1, 0.3, 0.5], [0.35, 0.45, 0.55]]), b: ivy.array([[-0.5, -0.5], [-0.5, -0.25]]) } """ return ContainerBase.cont_multi_map_in_function( "l1_normalize", x, axis=axis, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, ) def l1_normalize( self: ivy.Container, axis: Optional[Union[int, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.l1_normalize. This method simply wraps the function, and so the docstring for ivy.l1_normalize also applies to this method with minimal changes. Parameters ---------- self The input container with leaves to be normalized. axis The axis along which to normalize. key_chains The key-chains to apply or not apply the method to. Default is None. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is True. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is False. map_sequences Whether to also map method to sequences (lists, tuples). Default is False. out Optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret A container containing the normalized leaves. """ return self.static_l1_normalize( self, axis=axis, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, ) @staticmethod def static_l2_normalize( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], axis: Optional[Union[int, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.l2_normalize. This method simply wraps the function, and so the docstring for ivy.l2_normalize also applies to this method with minimal changes. Parameters ---------- x The input container with leaves to be normalized. axis The axis along which to normalize. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the normalized leaves. Examples -------- >>> x = ivy.Container(a=ivy.array([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]))) ... b=ivy.array([[-1., -1.], [-1., -0.5]]])) >>> y = ivy.Container.static_l2_normalize(x, axis=1) >>> print(y) { a: ivy.array([[0.16903085, 0.50709254, 0.84515423], [0.44183609, 0.56807494, 0.69431382]]), b: ivy.array([[-0.70710677, -0.70710677], [-0.89442718, -0.44721359]]) } """ return ContainerBase.cont_multi_map_in_function( "l2_normalize", x, axis=axis, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, ) def l2_normalize( self: ivy.Container, axis: Optional[Union[int, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.l2_normalize. This method simply wraps the function, and so the docstring for ivy.l2_normalize also applies to this method with minimal changes. Parameters ---------- self The input container with leaves to be normalized. axis The axis along which to normalize. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the normalized leaves. Examples -------- >>> x = ivy.Container(a=ivy.array([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]), ... b=ivy.array([[-1., -1.], [-1., -0.5]])) >>> y = x.l2_normalize(axis=1) >>> print(y) { a: ivy.array([[0.16903085, 0.50709254, 0.84515423], [0.44183609, 0.56807494, 0.69431382]]), b: ivy.array([[-0.70710677, -0.70710677], [-0.89442718, -0.44721359]]) } """ return self.static_l2_normalize( self, axis=axis, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, ) @staticmethod def static_batch_norm( x: Union[ivy.Array, ivy.NativeArray, ivy.Container], mean: Union[ivy.NativeArray, ivy.Array, ivy.Container], variance: Union[ivy.NativeArray, ivy.Array, ivy.Container], /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, training: Union[bool, ivy.Container] = False, eps: Union[float, ivy.Container] = 1e-5, momentum: Union[float, ivy.Container] = 1e-1, data_format: Union[str, ivy.Container] = "NSC", out: Optional[ Tuple[ Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], ] ] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> Tuple[ivy.Container, ivy.Container, ivy.Container]: """ivy.Container static method variant of ivy.batch_norm. This method simply wraps the function, and so the docstring for ivy.batch_norm also applies to this method with minimal changes. Parameters ---------- x Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. mean Mean array used for input's normalization. It can be of any shape braodcastable to (N,*S,C). variance Variance array used for input's normalization. It can be of any shape braodcastable to (N,*S,C). offset An offset array. If present, will be added to the normalized input. It can be of any shape broadcastable to (N,*S,C). scale A scale array. If present, the scale is applied to the normalized input. It can be of any shape broadcastable to (N,*S,C). training If true, calculate and use the mean and variance of `x`. Otherwise, use the provided `mean` and `variance`. eps A small float number to avoid dividing by 0. momentum the value used for the running_mean and running_var computation. Default value is 0.1. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. Returns ------- ret Tuple of containers containing the normalized input, running mean, and running variance. """ return ContainerBase.cont_multi_map_in_function( "batch_norm", x, mean, variance, scale=scale, offset=offset, training=training, eps=eps, momentum=momentum, data_format=data_format, out=out, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) def batch_norm( self: Union[ivy.Array, ivy.NativeArray, ivy.Container], mean: Union[ivy.NativeArray, ivy.Array, ivy.Container], variance: Union[ivy.NativeArray, ivy.Array, ivy.Container], /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, training: Union[bool, ivy.Container] = False, eps: Union[float, ivy.Container] = 1e-5, momentum: Union[float, ivy.Container] = 1e-1, data_format: Union[str, ivy.Container] = "NSC", out: Optional[ Tuple[ Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], ] ] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> Tuple[ivy.Container, ivy.Container, ivy.Container]: """ivy.Container instance method variant of ivy.batch_norm. This method simply wraps the function, and so the docstring for ivy.batch_norm also applies to this method with minimal changes. Parameters ---------- x Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. mean Mean array used for input's normalization. It can be of any shape braodcastable to (N,*S,C). variance Variance array used for input's normalization. It can be of any shape braodcastable to (N,*S,C). offset An offset array. If present, will be added to the normalized input. It can be of any shape broadcastable to (N,*S,C). scale A scale array. If present, the scale is applied to the normalized input. It can be of any shape broadcastable to (N,*S,C). training If true, calculate and use the mean and variance of `x`. Otherwise, use the provided `mean` and `variance`. eps A small float number to avoid dividing by 0. momentum the value used for the running_mean and running_var computation. Default value is 0.1. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. Returns ------- ret Tuple of containers containing the normalized input, running mean, and running variance. """ return self.static_batch_norm( self, mean, variance, scale=scale, offset=offset, training=training, eps=eps, momentum=momentum, data_format=data_format, out=out, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) @staticmethod def static_instance_norm( x: Union[ivy.Array, ivy.NativeArray, ivy.Container], mean: Union[ivy.NativeArray, ivy.Array, ivy.Container], variance: Union[ivy.NativeArray, ivy.Array, ivy.Container], /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, training: Union[bool, ivy.Container] = False, eps: Union[float, ivy.Container] = 1e-5, momentum: Union[float, ivy.Container] = 1e-1, data_format: Union[str, ivy.Container] = "NSC", out: Optional[ Tuple[ Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], ] ] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> Tuple[ivy.Container, ivy.Container, ivy.Container]: """ivy.Container static method variant of ivy.instance_norm. This method simply wraps the function, and so the docstring for ivy.instance_norm also applies to this method with minimal changes. Parameters ---------- x Input array of shape default (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. mean Mean array of size C used for input's normalization. variance Variance array of size C used for input's normalization. offset An offset array of size C. If present, will be added to the normalized input. scale A scale array of size C. If present, the scale is applied to the normalized input. training If true, calculate and use the mean and variance of `x`. Otherwise, use the provided `mean` and `variance`. eps A small float number to avoid dividing by 0. momentum the value used for the running_mean and running_var computation. Default value is 0.1. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. Returns ------- ret Tuple of containers containing the normalized input, running mean, and running variance. """ return ContainerBase.cont_multi_map_in_function( "instance_norm", x, mean, variance, scale=scale, offset=offset, training=training, eps=eps, momentum=momentum, out=out, data_format=data_format, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) def instance_norm( self: Union[ivy.Array, ivy.NativeArray, ivy.Container], mean: Union[ivy.NativeArray, ivy.Array, ivy.Container], variance: Union[ivy.NativeArray, ivy.Array, ivy.Container], /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, training: Union[bool, ivy.Container] = False, eps: Union[float, ivy.Container] = 1e-5, momentum: Union[float, ivy.Container] = 1e-1, data_format: Union[str, ivy.Container] = "NSC", out: Optional[ Tuple[ Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], Union[ivy.Array, ivy.Container], ] ] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> Tuple[ivy.Container, ivy.Container, ivy.Container]: """ivy.Container instance method variant of ivy.instance_norm. This method simply wraps the function, and so the docstring for ivy.instance_norm also applies to this method with minimal changes. Parameters ---------- self Input array of shape default (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. mean Mean array of size C used for input's normalization. variance Variance array of size C used for input's normalization. offset An offset array of size C. If present, will be added to the normalized input. scale A scale array of size C. If present, the scale is applied to the normalized input. training If true, calculate and use the mean and variance of `x`. Otherwise, use the provided `mean` and `variance`. eps A small float number to avoid dividing by 0. momentum the value used for the running_mean and running_var computation. Default value is 0.1. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. Returns ------- ret Tuple of containers containing the normalized input, running mean, and running variance. """ return self.static_instance_norm( self, mean, variance, scale=scale, offset=offset, training=training, eps=eps, momentum=momentum, out=out, data_format=data_format, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) @staticmethod def static_group_norm( x: Union[ivy.Array, ivy.NativeArray, ivy.Container], num_groups: Union[int, ivy.Container] = 1, /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, eps: Union[float, ivy.Container] = 1e-5, data_format: Union[str, ivy.Container] = "NSC", out: Optional[Union[ivy.Array, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> ivy.Container: """ivy.Container static method variant of ivy.group_norm. This method simply wraps the function, and so the docstring for ivy.group_norm also applies to this method with minimal changes. Parameters ---------- x Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. num_groups number of groups to separate the channels into offset An offset array of size C. If present, will be added to the normalized input. scale A scale array of size C. If present, the scale is applied to the normalized input. eps A small float number to avoid dividing by 0. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. Returns ------- ret The normalized array. """ return ContainerBase.cont_multi_map_in_function( "group_norm", x, num_groups, scale=scale, offset=offset, eps=eps, out=out, data_format=data_format, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) def group_norm( self: Union[ivy.Array, ivy.NativeArray, ivy.Container], num_groups: Union[int, ivy.Container] = 1, /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array, ivy.Container]] = None, eps: Union[float, ivy.Container] = 1e-5, data_format: Union[str, ivy.Container] = "NSC", out: Optional[Union[ivy.Array, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, ) -> ivy.Container: """ivy.Container static method variant of ivy.group_norm. This method simply wraps the function, and so the docstring for ivy.group_norm also applies to this method with minimal changes. Parameters ---------- self Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. num_groups number of groups to separate the channels into offset An offset array of size C. If present, will be added to the normalized input. scale A scale array of size C. If present, the scale is applied to the normalized input. eps A small float number to avoid dividing by 0. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. Returns ------- ret The normalized array. """ return self.static_group_norm( self, num_groups, scale=scale, offset=offset, eps=eps, out=out, data_format=data_format, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, ) @staticmethod def static_lp_normalize( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], p: Union[float, ivy.Container] = 2, axis: Optional[Union[int, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.lp_normalize. This method simply wraps the function, and so the docstring for ivy.lp_normalize also applies to this method with minimal changes. Parameters ---------- x The input container with leaves to be normalized. p The order of the norm. axis The axis along which to normalize. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the normalized leaves. Examples -------- >>> x = ivy.Container(a=ivy.array([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]))) ... b=ivy.array([[-1., -1.], [-1., -0.5]]])) >>> y = ivy.Container.static_lp_normalize(x, p=1, axis=1) >>> print(y) { a: ivy.array([[0.12500000, 0.37500000, 0.62500000], [0.27500000, 0.35000000, 0.42500000]]), b: ivy.array([[-1.0000000, -1.0000000], [-0.5000000, -0.2500000]]) } """ return ContainerBase.cont_multi_map_in_function( "lp_normalize", x, p=p, axis=axis, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, ) def lp_normalize( self: ivy.Container, p: Union[float, ivy.Container] = 2, axis: Optional[Union[int, ivy.Container]] = None, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.l2_normalize. This method simply wraps the function, and so the docstring for ivy.l2_normalize also applies to this method with minimal changes. Parameters ---------- self The input container with leaves to be normalized. axis The axis along which to normalize. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is ``False``. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the normalized leaves. Examples -------- >>> x = ivy.Container(a=ivy.array([[0.5, 1.5, 2.5], [3.5, 4.5, 5.5]]), ... b=ivy.array([[-1., -1.], [-1., -0.5]])) >>> y = x.lp_normalize(axis=1) >>> print(y) { a: ivy.array([[0.16903085, 0.50709254, 0.84515423], [0.44183609, 0.56807494, 0.69431382]]), b: ivy.array([[-0.70710677, -0.70710677], [-0.89442718, -0.44721359]]) } """ return self.static_lp_normalize( self, p=p, axis=axis, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, )
ivy/ivy/data_classes/container/experimental/norms.py/0
{ "file_path": "ivy/ivy/data_classes/container/experimental/norms.py", "repo_id": "ivy", "token_count": 16306 }
12
# global from numbers import Number from typing import Optional, Union, List, Dict # local import ivy from ivy.data_classes.container.base import ContainerBase # noinspection PyMissingConstructor class _ContainerWithSearching(ContainerBase): @staticmethod def _static_argmax( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], /, *, axis: Optional[Union[int, ivy.Container]] = None, keepdims: Union[bool, ivy.Container] = False, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None, select_last_index: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.argmax. This method simply wraps the function, and so the docstring for ivy.argmax also applies to this method with minimal changes. Parameters ---------- x input array or container. Should have a numeric data type. axis axis along which to search. If None, the function must return the index of the maximum value of the flattened array. Default: ``None``. keepdims If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. dtype Optional data type of the output array. out If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Returns ------- ret a container containing the indices of the maximum values across the specified axis. Examples -------- >>> x = ivy.Container(a=ivy.array([[4., 0., -1.], [2., -3., 6]]),\ ... b=ivy.array([[1., 2., 3.], [1., 1., 1.]]) >>> y = ivy.Container.static_argmax(x, axis=1, keepdims=True) >>> print(y) { a: ivy.array([[0], [2]]), b: ivy.array([[2], [0]]) } """ return ContainerBase.cont_multi_map_in_function( "argmax", x, axis=axis, keepdims=keepdims, dtype=dtype, select_last_index=select_last_index, out=out, ) def argmax( self: ivy.Container, /, *, axis: Optional[Union[int, ivy.Container]] = None, keepdims: Union[bool, ivy.Container] = False, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None, select_last_index: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.argmax. This method simply wraps the function, and so the docstring for ivy.argmax also applies to this method with minimal changes. Parameters ---------- self input array or container. Should have a numeric data type. axis axis along which to search. If None, the function must return the index of the maximum value of the flattened array. Default: ``None``. keepdims If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. dtype Optional output dtype of the container. out If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Returns ------- ret a container containing the indices of the maximum values across the specified axis. Examples -------- >>> a = ivy.array([[4., 0., -1.], [2., -3., 6]]) >>> b = ivy.array([[1., 2., 3.], [1., 1., 1.]]) >>> x = ivy.Container(a=a, b=b) >>> y = x.argmax(axis=1, keepdims=True) >>> print(y) { a: ivy.array([[0], [2]]), b: ivy.array([[2], [0]]) } """ return self._static_argmax( self, axis=axis, keepdims=keepdims, dtype=dtype, select_last_index=select_last_index, out=out, ) @staticmethod def _static_argmin( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], /, *, axis: Optional[Union[int, ivy.Container]] = None, keepdims: Union[bool, ivy.Container] = False, dtype: Optional[Union[ivy.int32, ivy.int64, ivy.Container]] = None, select_last_index: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.argmin. This method simply wraps the function, and so the docstring for ivy.argmin also applies to this method with minimal changes. Parameters ---------- x input array or container. Should have a numeric data type. axis axis along which to search. If None, the function must return the index of the minimum value of the flattened array. Default = None. keepdims if True, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see Broadcasting). Otherwise, if False, the reduced axes (dimensions) must not be included in the result. Default = False. dtype An optional output_dtype from: int32, int64. Defaults to int64. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the indices of the minimum values across the specified axis. Examples -------- >>> x = ivy.Container(a=ivy.array([[4., 0., -1.], [2., -3., 6]]),\ ... b=ivy.array([[1., 2., 3.], [1., 1., 1.]]) >>> y = ivy.Container.static_argmin(axis=1, keepdims=True) >>> print(y) { a: ivy.array([[2], [1]]), b: ivy.array([[0], [0]]) } """ return ContainerBase.cont_multi_map_in_function( "argmin", x, axis=axis, keepdims=keepdims, dtype=dtype, select_last_index=select_last_index, out=out, ) def argmin( self: ivy.Container, /, *, axis: Optional[Union[int, ivy.Container]] = None, keepdims: Union[bool, ivy.Container] = False, dtype: Optional[Union[ivy.Dtype, ivy.NativeDtype, ivy.Container]] = None, select_last_index: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.argmin. This method simply wraps the function, and so the docstring for ivy.argmin also applies to this method with minimal changes. Parameters ---------- self input array or container. Should have a numeric data type. axis axis along which to search. If None, the function must return the index of the minimum value of the flattened array. Default = None. keepdims if True, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see Broadcasting). Otherwise, if False, the reduced axes (dimensions) must not be included in the result. Default = False. dtype An optional output_dtype from: int32, int64. Defaults to int64. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the indices of the minimum values across the specified axis. Examples -------- Using :class:`ivy.Container` instance method: >>> x = ivy.Container(a=ivy.array([0., -1., 2.]), b=ivy.array([3., 4., 5.])) >>> y = x.argmin() >>> print(y) { a: ivy.array(1), b: ivy.array(0) } >>> x = ivy.Container(a=ivy.array([[4., 0., -1.], [2., -3., 6]]), ... b=ivy.array([[1., 2., 3.], [1., 1., 1.]])) >>> y = x.argmin(axis=1, keepdims=True) >>> print(y) { a: ivy.array([[2], [1]]), b: ivy.array([[0], [0]]) } """ return self._static_argmin( self, axis=axis, keepdims=keepdims, dtype=dtype, select_last_index=select_last_index, out=out, ) @staticmethod def _static_nonzero( x: Union[ivy.Container, ivy.Array, ivy.NativeArray], /, *, as_tuple: Union[bool, ivy.Container] = True, size: Optional[Union[int, ivy.Container]] = None, fill_value: Union[Number, ivy.Container] = 0, ) -> ivy.Container: """ivy.Container static method variant of ivy.nonzero. This method simply wraps the function, and so the docstring for ivy.nonzero also applies to this method with minimal changes. Parameters ---------- x input array or container. Should have a numeric data type. as_tuple if True, the output is returned as a tuple of indices, one for each dimension of the input, containing the indices of the true elements in that dimension. If False, the coordinates are returned in a (N, ndim) array, where N is the number of true elements. Default = True. size if specified, the function will return an array of shape (size, ndim). If the number of non-zero elements is fewer than size, the remaining elements will be filled with fill_value. Default = None. fill_value when size is specified and there are fewer than size number of elements, the remaining elements in the output array will be filled with fill_value. Default = 0. Returns ------- ret a container containing the indices of the nonzero values. """ return ContainerBase.cont_multi_map_in_function( "nonzero", x, as_tuple=as_tuple, size=size, fill_value=fill_value ) def nonzero( self: ivy.Container, /, *, as_tuple: Union[bool, ivy.Container] = True, size: Optional[Union[int, ivy.Container]] = None, fill_value: Union[Number, ivy.Container] = 0, ) -> ivy.Container: """ivy.Container instance method variant of ivy.nonzero. This method simply wraps the function, and so the docstring for ivy.nonzero also applies to this method with minimal changes. Parameters ---------- self input array or container. Should have a numeric data type. as_tuple if True, the output is returned as a tuple of indices, one for each dimension of the input, containing the indices of the true elements in that dimension. If False, the coordinates are returned in a (N, ndim) array, where N is the number of true elements. Default = True. size if specified, the function will return an array of shape (size, ndim). If the number of non-zero elements is fewer than size, the remaining elements will be filled with fill_value. Default = None. fill_value when size is specified and there are fewer than size number of elements, the remaining elements in the output array will be filled with fill_value. Default = 0. Returns ------- ret a container containing the indices of the nonzero values. """ return self._static_nonzero( self, as_tuple=as_tuple, size=size, fill_value=fill_value ) @staticmethod def _static_where( condition: Union[ivy.Container, ivy.Array, ivy.NativeArray], x1: Union[ivy.Container, ivy.Array, ivy.NativeArray], x2: Union[ivy.Container, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.where. This method simply wraps the function, and so the docstring for ivy.where also applies to this method with minimal changes. Parameters ---------- condition input array or container. Should have a boolean data type. x1 input array or container. Should have a numeric data type. x2 input array or container. Should have a numeric data type. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the values of x1 where condition is True, and x2 where condition is False. Examples -------- >>> x1 = ivy.Container(a=ivy.array([3, 1, 5]), b=ivy.array([2, 4, 6])) >>> x2 = ivy.Container(a=ivy.array([0, 7, 2]), b=ivy.array([3, 8, 5])) >>> res = ivy.Container.static_where((x1.a > x2.a), x1, x2) >>> print(res) { a: ivy.array([3, 7, 5]), b: ivy.array([2, 8, 6]) } """ return ContainerBase.cont_multi_map_in_function( "where", condition, x1, x2, out=out ) def where( self: ivy.Container, x1: Union[ivy.Container, ivy.Array, ivy.NativeArray], x2: Union[ivy.Container, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container instance method variant of ivy.where. This method simply wraps the function, and so the docstring for ivy.where also applies to this method with minimal changes. Parameters ---------- self input array or container. Should have a boolean data type. x1 input array or container. Should have a numeric data type. x2 input array or container. Should have a numeric data type. out optional output container, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret a container containing the values of x1 where condition is True, and x2 where condition is False. Examples -------- >>> x1 = ivy.Container(a=ivy.array([3, 1, 5]), b=ivy.array([2, 4, 6])) >>> x2 = ivy.Container(a=ivy.array([0, 7, 2]), b=ivy.array([3, 8, 5])) >>> res = x1.where((x1.a > x2.a), x2) >>> print(res) { a: ivy.array([1, 0, 1]), b: ivy.array([1, 0, 1]) } """ return self._static_where(self, x1, x2, out=out) # Extra # # ----- # @staticmethod def _static_argwhere( x: ivy.Container, /, *, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ) -> ivy.Container: """ivy.Container static method variant of ivy.argwhere. This method simply wraps the function, and so the docstring for ivy.argwhere also applies to this method with minimal changes. Parameters ---------- x Boolean array, for which indices are desired. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is False. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. Returns ------- ret Indices for where the boolean array is True. Examples -------- Using :class:`ivy.Container` instance method >>> x = ivy.Container(a=ivy.array([1, 2]), b=ivy.array([3, 4])) >>> res = ivy.Container.static_argwhere(x) >>> print(res) { a: ivy.array([[0], [1]]), b: ivy.array([[0], [1]]) } >>> x = ivy.Container(a=ivy.array([1, 0]), b=ivy.array([3, 4])) >>> res = ivy.Container.static_argwhere(x) >>> print(res) { a: ivy.array([[0]]), b: ivy.array([[0], [1]]) } """ return ContainerBase.cont_multi_map_in_function( "argwhere", x, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, ) def argwhere( self: ivy.Container, /, *, key_chains: Optional[Union[List[str], Dict[str, str], ivy.Container]] = None, to_apply: Union[bool, ivy.Container] = True, prune_unapplied: Union[bool, ivy.Container] = False, map_sequences: Union[bool, ivy.Container] = False, out: Optional[ivy.Container] = None, ): """ivy.Container instance method variant of ivy.argwhere. This method simply wraps the function, and so the docstring for ivy.argwhere also applies to this method with minimal changes. Parameters ---------- self Boolean array, for which indices are desired. key_chains The key-chains to apply or not apply the method to. Default is ``None``. to_apply If True, the method will be applied to key_chains, otherwise key_chains will be skipped. Default is ``True``. prune_unapplied Whether to prune key_chains for which the function was not applied. Default is False. map_sequences Whether to also map method to sequences (lists, tuples). Default is ``False``. Returns ------- ret Indices for where the boolean array is True. Examples -------- Using :class:`ivy.Container` instance method >>> x = ivy.Container(a=ivy.array([1, 2]), b=ivy.array([3, 4])) >>> res = x.argwhere() >>> print(res) { a: ivy.array([[0], [1]]), b: ivy.array([[0], [1]]) } >>> x = ivy.Container(a=ivy.array([1, 0]), b=ivy.array([3, 4])) >>> res = x.argwhere() >>> print(res) { a: ivy.array([[0]]), b: ivy.array([[0], [1]]) } """ return self._static_argwhere( self, key_chains=key_chains, to_apply=to_apply, prune_unapplied=prune_unapplied, map_sequences=map_sequences, out=out, )
ivy/ivy/data_classes/container/searching.py/0
{ "file_path": "ivy/ivy/data_classes/container/searching.py", "repo_id": "ivy", "token_count": 9360 }
13
# local from .base import NestedArrayBase class NestedArray(NestedArrayBase): def __init__(self, data, nested_rank, inner_shape, dtype, device, internal=False): NestedArrayBase.__init__( self, data, nested_rank, inner_shape, dtype, device, internal ) @classmethod def from_row_lengths(cls, values, row_lengths): ivy_arrays = [] for i in range(len(row_lengths)): ivy_arrays.append(values[: row_lengths[i]]) values = values[row_lengths[i] :] return cls.nested_array(ivy_arrays) @classmethod def from_row_splits(cls, values, row_splits): row_lengths = [] for i in range(1, len(row_splits)): row_lengths.append(row_splits[i] - row_splits[i - 1]) return cls.from_row_lengths(values, row_lengths)
ivy/ivy/data_classes/nested_array/nested_array.py/0
{ "file_path": "ivy/ivy/data_classes/nested_array/nested_array.py", "repo_id": "ivy", "token_count": 382 }
14
//! Nodes from the computation graph. //! //! An `XlaOp` value represents a node/operand in the computation graph, e.g. it can be the sum of two //! other nodes, a constant value, an input parameter, etc. //! //! For details on the semantics, see //! [operation_semantics](https://www.tensorflow.org/xla/operation_semantics). use super::{ArrayShape, PrimitiveType, Shape, XlaBuilder, XlaComputation}; use crate::{c_lib, Error, Result}; use pyo3::prelude::*; #[pyclass(unsendable)] pub struct XlaOp { pub(super) op: c_lib::xla_op, pub(super) builder: XlaBuilder, } macro_rules! extract_dims { ($fn_name:ident, $cnt:tt, $dims:expr, $out_type:ty) => { pub fn $fn_name(&self) -> Result<$out_type> { let dims = self.builder.get_dims(self)?; if dims.len() != $cnt { let dims: Vec<_> = dims.iter().map(|d| *d as i64).collect(); Err(Error::UnexpectedNumberOfDims { expected: $cnt, got: dims.len(), dims }) } else { let dims = $dims(dims); Ok(dims) } } }; } macro_rules! binary_op { ($func_name:ident, $expression:expr) => { pub fn $func_name(&self, op: &XlaOp) -> Result<Self> { let op = unsafe { $expression(self.op, op.op) }; self.wrap(op) } }; } macro_rules! unary_op { ($func_name:ident, $expression:expr) => { pub fn $func_name(&self) -> Result<Self> { let op = unsafe { $expression(self.op) }; self.wrap(op) } }; } impl Clone for XlaOp { fn clone(&self) -> Self { let op = unsafe { c_lib::op_clone(self.op) }; Self { op, builder: self.builder.clone() } } } impl XlaOp { pub(super) fn wrap(&self, op: c_lib::xla_op) -> Result<Self> { self.builder.get_current_status()?; Ok(XlaOp { op, builder: self.builder.clone() }) } pub fn builder(&self) -> &XlaBuilder { &self.builder } binary_op!(add_, c_lib::op_add); binary_op!(sub_, c_lib::op_sub); binary_op!(mul_, c_lib::op_mul); binary_op!(div_, c_lib::op_div); binary_op!(rem_, c_lib::op_rem); binary_op!(max, c_lib::op_max); binary_op!(min, c_lib::op_min); binary_op!(and, c_lib::op_and); binary_op!(or, c_lib::op_or); binary_op!(xor, c_lib::op_xor); binary_op!(atan2, c_lib::op_atan2); binary_op!(pow, c_lib::op_pow); binary_op!(dot, c_lib::op_dot); binary_op!(eq, c_lib::op_eq); binary_op!(ne, c_lib::op_ne); binary_op!(ge, c_lib::op_ge); binary_op!(gt, c_lib::op_gt); binary_op!(le, c_lib::op_le); binary_op!(lt, c_lib::op_lt); binary_op!(lshift, c_lib::op_shift_left); binary_op!(rshift_arith, c_lib::op_shift_right_arith); binary_op!(rshift_logic, c_lib::op_shift_right_logic); unary_op!(population_count, c_lib::op_population_count); unary_op!(not, c_lib::op_not); unary_op!(abs, c_lib::op_abs); unary_op!(exp, c_lib::op_exp); unary_op!(expm1, c_lib::op_expm1); unary_op!(floor, c_lib::op_floor); unary_op!(ceil, c_lib::op_ceil); unary_op!(round, c_lib::op_round); unary_op!(round_nearest_even, c_lib::op_round_nearest_even); unary_op!(log, c_lib::op_log); unary_op!(log1p, c_lib::op_log1p); unary_op!(logistic, c_lib::op_logistic); unary_op!(sign, c_lib::op_sign); unary_op!(clz, c_lib::op_clz); unary_op!(cos, c_lib::op_cos); unary_op!(sin, c_lib::op_sin); unary_op!(tanh, c_lib::op_tanh); unary_op!(real, c_lib::op_real); unary_op!(imag, c_lib::op_imag); unary_op!(conj, c_lib::op_conj); unary_op!(square, c_lib::op_square); unary_op!(sqrt, c_lib::op_sqrt); unary_op!(rsqrt, c_lib::op_rsqrt); unary_op!(cbrt, c_lib::op_cbrt); unary_op!(is_finite, c_lib::op_is_finite); unary_op!(neg, c_lib::op_neg); unary_op!(lower_triangle, c_lib::op_lower_triangle); unary_op!(upper_triangle, c_lib::op_upper_triangle); unary_op!(erf, c_lib::op_erf); unary_op!(copy, c_lib::op_copy); unary_op!(zeros_like, c_lib::op_zeros_like); /// Sigmoid activation function. /// /// This computes the element-wise sigmoid. pub fn sigmoid(&self) -> Result<Self> { self.logistic() } /// SiLU activation function. /// /// This computes the element-wise SiLU activation, x.sigmoid(x). pub fn silu(&self) -> Result<Self> { self * self.logistic() } pub fn relu(&self) -> Result<Self> { self.max(&self.zeros_like()?) } pub fn gelu(&self) -> Result<Self> { let prim_type = self.primitive_type()?; let elem_type = prim_type.element_type()?; let b = self.builder(); let sqrt_two = b.c0(2)?.astype(prim_type)?.sqrt()?; let one_half = b.c0(0.5)?.astype(prim_type)?; let gauss_cdf = self.div_(&sqrt_two)?.erf()?.add_(&b.one(elem_type)?)?.mul_(&one_half)?; self.mul_(&gauss_cdf) } pub fn gelu_approx(&self) -> Result<Self> { let prim_type = self.primitive_type()?; let b = self.builder(); let sqrt_two_over_pi = b.c0(2f32 / std::f32::consts::PI)?.astype(prim_type)?.sqrt()?; let v = (sqrt_two_over_pi * ((b.c0(0.044715)?.astype(prim_type)? * self.pow(&b.c0(3f32)?)?)? + self)?)?; (b.c0(0.5)?.astype(prim_type)? * self)? * (v.tanh()? + b.c0(1)?.astype(prim_type)?)? } /// A node that applies the specified Einstein summation formula to this node. pub fn einsum1(&self, config: &str) -> Result<Self> { let config = std::ffi::CString::new(config).unwrap(); let op = unsafe { c_lib::op_einsum1(self.op, config.as_ptr()) }; self.wrap(op) } /// A node that applies the specified Einstein summation formula to this node and the other /// argument node. pub fn einsum2(&self, rhs: &XlaOp, config: &str) -> Result<Self> { let config = std::ffi::CString::new(config).unwrap(); let op = unsafe { c_lib::op_einsum2(self.op, rhs.op, config.as_ptr()) }; self.wrap(op) } /// Reshape this node to a different set of dimension sizes, the number of element between the /// two different shapes has to match. pub fn reshape(&self, dims: &[i64]) -> Result<Self> { let op = unsafe { c_lib::op_reshape(self.op, dims.len(), dims.as_ptr()) }; self.wrap(op) } pub fn dynamic_reshape( &self, dim_sizes: &[XlaOp], new_size_bounds: &[i64], dims_are_dynamic: Vec<bool> ) -> Result<Self> { let dim_sizes: Vec<_> = dim_sizes.iter().map(|a| a.op).collect(); let op = unsafe {c_lib::op_dynamic_reshape( self.op, dim_sizes.len(), dim_sizes.as_ptr(), new_size_bounds.len(), new_size_bounds.as_ptr(), dims_are_dynamic.as_ptr()) }; self.wrap(op) } /// Add some broadcasting dimensions at the beginning of the current node shape. pub fn broadcast(&self, dims: &[i64]) -> Result<Self> { let op = unsafe { c_lib::op_broadcast(self.op, dims.len(), dims.as_ptr()) }; self.wrap(op) } /// Add some broadcasting dimensions at arbitrary positions. /// /// See the [semantics](https://www.tensorflow.org/xla/operation_semantics#broadcastindim). pub fn broadcast_in_dim(&self, out_dims: &[i64], broadcast_dims: &[i64]) -> Result<Self> { let op = unsafe { c_lib::op_broadcast_in_dim( self.op, out_dims.len(), out_dims.as_ptr(), broadcast_dims.len(), broadcast_dims.as_ptr(), ) }; self.wrap(op) } /// Collapse the dimensions of this node into a single dimension, [xla /// documentation](https://www.tensorflow.org/xla/operation_semantics#collapse). pub fn collapse(&self, dims: &[i64]) -> Result<Self> { let op = unsafe { c_lib::op_collapse(self.op, dims.len(), dims.as_ptr()) }; self.wrap(op) } /// Permute the dimension with the specified indexes. pub fn transpose(&self, index_perm: &[i64]) -> Result<Self> { let op = unsafe { c_lib::op_transpose(self.op, index_perm.len(), index_perm.as_ptr()) }; self.wrap(op) } /// Permute two dimensions, this is a specialized version of `transpose`. pub fn swap_dims(&self, index1: i64, index2: i64) -> Result<Self> { let index1 = self.normalize_index(index1)?; let index2 = self.normalize_index(index2)?; let rank = self.rank()?; let mut index_perm: Vec<_> = (0..rank as i64).collect(); index_perm[index1 as usize] = index2; index_perm[index2 as usize] = index1; self.transpose(&index_perm) } pub fn pad(&self, padding_value: &XlaOp, padding_config: Vec<(i64, i64, i64)>) -> Result<Self> { let lows: Vec<_> = padding_config.iter().map(|x| x.0).collect(); let highs: Vec<_> = padding_config.iter().map(|x| x.1).collect(); let interiors: Vec<_> = padding_config.iter().map(|x| x.2).collect(); let op = unsafe {c_lib::op_pad( self.op, padding_value.op, padding_config.len(), lows.as_ptr(), highs.as_ptr(), interiors.as_ptr()) }; self.wrap(op) } pub fn pad_in_dim(&self, padding_value: &XlaOp, dinmo: i64, pad_low: i64, pad_high: i64) -> Result<Self> { let op = unsafe {c_lib::op_pad_in_dim(self.op, padding_value.op, dinmo, pad_low, pad_high)}; self.wrap(op) } pub fn slice(&self, start_indices: &[i64], limit_indices: &[i64], strides: &[i64]) -> Result<Self> { let op = unsafe {c_lib::op_slice( self.op,start_indices.len(),start_indices.as_ptr(), limit_indices.len(),limit_indices.as_ptr(), strides.len(),strides.as_ptr()) }; self.wrap(op) } /// Create a node that has a partial view on the data of the original node. Indexes on the /// target dimension `dim` are restricted to the values between `start_index` (inclusive) and /// `stop_index` (exclusive), using the associated `stride` as a step between two values. pub fn slice_in_dim( &self, start_index: i64, stop_index: i64, stride: i64, dim: i64, ) -> Result<Self> { let dim = self.normalize_index(dim)?; let op = unsafe { c_lib::op_slice_in_dim(self.op, start_index, stop_index, stride, dim) }; self.wrap(op) } /// A specialized version of `slice_in_dim` using a stride of one, so with all values with an /// index between `start_index` (inclusive) and `stop_index` (exclusive). pub fn slice_in_dim1(&self, start_index: i64, stop_index: i64, dim: i64) -> Result<Self> { self.slice_in_dim(start_index, stop_index, 1, dim) } pub fn dynamic_slice( &self, start_indices: &[XlaOp], slice_indices: &[i64], ) -> Result<Self> { let start_indices: Vec<_> = start_indices.iter().map(|a| a.op).collect(); let op = unsafe { c_lib::op_dynamic_slice( self.op, start_indices.len(), start_indices.as_ptr(), slice_indices.len(), slice_indices.as_ptr()) }; self.wrap(op) } pub fn dynamic_update_slice( &self, update: &XlaOp, start_indices: &[XlaOp], ) -> Result<Self> { let start_indices: Vec<_> = start_indices.iter().map(|a| a.op).collect(); let op = unsafe { c_lib::op_dynamic_update_slice( self.op, update.op, start_indices.len(), start_indices.as_ptr()) }; self.wrap(op) } /// A new node containing only values for index `index_in_dim` on the dimension `dim_index`. /// The target dimension is squeezed so the resulting node has one less dimension than the /// original node. pub fn at(&self, index_in_dim: i64, dim_index: i64) -> Result<Self> { let slice = self.slice_in_dim(index_in_dim, index_in_dim + 1, 1, dim_index)?; slice.squeeze(dim_index) } /// Squeeze the dimension as the target index, i.e. if this dimension has size one remove it /// for the generated node. The target dimension index can be specified as a negative value, /// e.g. -1 for the last dimension. pub fn squeeze(&self, index: i64) -> Result<Self> { let index = self.normalize_index(index)?; let dims = self.dims()?; let mut new_dims = vec![]; for (i, d) in dims.iter().enumerate() { if i as i64 != index || *d != 1 { new_dims.push(*d as i64) } } self.reshape(&new_dims) } /// Concat multiple nodes (together with the `self` node) along the target dimension. pub fn concat_in_dim( &self, args: &[XlaOp], dim: i64, ) -> Result<Self> { let dim = self.normalize_index(dim)?; let args: Vec<_> = args.iter().map(|a| a.op).collect(); let op = unsafe { c_lib::op_concat_in_dim(self.op, args.as_ptr(), args.len(), dim) }; self.wrap(op) } /// Index into tuples. pub fn get_tuple_element(&self, index: i64) -> Result<Self> { let op = unsafe { c_lib::op_get_tuple_element(self.op, index) }; self.wrap(op) } /// Clamp the values in the original node to be between `min` and `max`. pub fn clamp(&self, min: &Self, max: &Self) -> Result<Self> { let op = unsafe { c_lib::op_clamp(min.op, self.op, max.op) }; self.wrap(op) } /// Select values from the original tensor to be values from `on_true` if the associated /// value in `self` is true, and the values from `on_false` otherwise. pub fn select(&self, on_true: &Self, on_false: &Self) -> Result<Self> { let op = unsafe { c_lib::op_select(self.op, on_true.op, on_false.op) }; self.wrap(op) } /// A node that when executed generates values using a random uniform distribution. pub fn rng_uniform(min: &Self, max: &Self, shape: &ArrayShape) -> Result<Self> { let dims = shape.dims(); let op = unsafe { c_lib::op_rng_uniform( min.op, max.op, shape.primitive_type() as i32, dims.len() as i32, dims.as_ptr(), ) }; min.wrap(op) } /// A node that when executed generates values using a random normal distribution. pub fn rng_normal(mu: &Self, sigma: &Self, shape: &ArrayShape) -> Result<Self> { let dims = shape.dims(); let op = unsafe { c_lib::op_rng_normal( mu.op, sigma.op, shape.primitive_type() as i32, dims.len() as i32, dims.as_ptr(), ) }; mu.wrap(op) } /// Create a new node by casting the elements of the original node to a new primitive type. pub fn astype(&self, ty: PrimitiveType) -> Result<Self> { let op = unsafe { c_lib::op_convert_element_type(self.op, ty as i32) }; self.wrap(op) } fn normalize_indexes(&self, indexes: &[i64]) -> Result<Vec<i64>> { let rank = self.rank()?; indexes .iter() .map(|&index| { if index >= rank as i64 { Err(Error::IndexOutOfBounds { index, rank }) } else if index >= 0 { Ok(index) } else if index + rank as i64 >= 0 { Ok(index + rank as i64) } else { Err(Error::IndexOutOfBounds { index, rank }) } }) .collect() } fn normalize_index(&self, index: i64) -> Result<i64> { let rank = self.rank()?; if index >= rank as i64 { Err(Error::IndexOutOfBounds { index, rank }) } else if index >= 0 { Ok(index) } else if index + rank as i64 >= 0 { Ok(index + rank as i64) } else { Err(Error::IndexOutOfBounds { index, rank }) } } /// A node that contains the size of the dimension with the target index as a `S32` scalar /// value. pub fn dimensions_size(&self, index: i64) -> Result<Self> { let index = self.normalize_index(index)?; let op = unsafe { c_lib::op_dimensions_size(self.op, index) }; self.wrap(op) } /// Create a node by folding a computation across some target dimensions. If `keep_dims` is /// `true`, the resulting node has a dimension of size one for the target dimensions, when /// using `false` these dimensions are squeezed so the resulting node has a rank that is the /// original node rank minus the number of elements in `dims`. pub fn reduce( &self, init_value: Self, comp: &XlaComputation, dims: &[i64], keep_dims: bool, ) -> Result<Self> { let dims = self.normalize_indexes(dims)?; let op = unsafe { c_lib::op_reduce(self.op, init_value.op, comp.0, dims.as_ptr(), dims.len()) }; let op = self.wrap(op)?; self.maybe_keep_dims(op, &dims, keep_dims) } /// Sequentially execute `body` until `cond` fails. /// /// - `init` argument has a type `T`. /// - `cond` is a computation with a single argument of type `T` producing a value of type /// `PRED`. /// - `body` is a computation with a single argument of type `T` producing a value of type /// `T`. pub fn while_(cond: &XlaComputation, body: &XlaComputation, init: Self) -> Result<Self> { let op = unsafe { c_lib::op_while(cond.0, body.0, init.op) }; init.wrap(op) } /// Execute `true_comp` if `self` is true, `false_comp` if `self` is false, and return the result. /// `self` has to be a scalar of type `PRED`. /// `true_op` is used as the single argument to `true_comp` and `false_op` as the single /// argument to `false_comp`. pub fn conditional( &self, true_op: Self, true_comp: &XlaComputation, false_op: Self, false_comp: &XlaComputation, ) -> Result<Self> { let op = unsafe { c_lib::op_conditional(self.op, true_op.op, true_comp.0, false_op.op, false_comp.0) }; self.wrap(op) } pub fn conv( &self, rhs: &XlaOp, window_strides: &[i64], padding: &str, feature_group_count: i64, batch_group_count: i64 ) -> Result<Self> { let padding_config = std::ffi::CString::new(padding).unwrap(); let op = unsafe { c_lib::op_conv( self.op, rhs.op, window_strides.len(), window_strides.as_ptr(), padding_config.as_ptr(), feature_group_count, batch_group_count ) }; self.wrap(op) } pub fn conv_general_dilated( &self, rhs: &XlaOp, window_strides: &[i64], padding: &[(i64, i64)], lhs_dilations: &[i64], rhs_dilations: &[i64], input_batch_dim: &i64, input_feature_dim: &i64, input_spatial_dims: &[i64], output_batch_dim: &i64, output_feature_dim: &i64, output_spatial_dims: &[i64], kernel_input_feature_dim: &i64, kernel_output_feature_dim: &i64, kernel_spatial_dims: &[i64], feature_group_count: i64, batch_group_count: i64 ) -> Result<XlaOp> { let padding: Vec<i64> = padding.iter().flat_map(|(a, b)| vec![*a, *b]).collect(); let op = unsafe { c_lib::op_conv_general_dilated( self.op, rhs.op, window_strides.len(), window_strides.as_ptr(), padding.len() / 2, padding.as_ptr(), lhs_dilations.len(), lhs_dilations.as_ptr(), rhs_dilations.len(), rhs_dilations.as_ptr(), input_batch_dim, input_feature_dim, input_spatial_dims.len(), input_spatial_dims.as_ptr(), output_batch_dim, output_feature_dim, output_spatial_dims.len(), output_spatial_dims.as_ptr(), kernel_input_feature_dim, kernel_output_feature_dim, kernel_spatial_dims.len(), kernel_spatial_dims.as_ptr(), feature_group_count, batch_group_count, ) }; self.wrap(op) } pub fn batch_norm_inference( &self, scale: &XlaOp, offset: &XlaOp, mean: &XlaOp, variance: &XlaOp, epsilon: f32, feature_index: i64, ) -> Result<Self> { let op = unsafe { c_lib::op_batch_norm_inference( self.op, scale.op, offset.op, mean.op, variance.op, epsilon, feature_index, ) }; self.wrap(op) } pub fn outfeed(&self, ty: PrimitiveType, dims: &[i64], config: &str) { let config = std::ffi::CString::new(config).unwrap(); unsafe { c_lib::outfeed(self.op, ty as i32, dims.len() as i32, dims.as_ptr(), config.as_ptr()) } } /// The kind of elements that are computed by this operand. pub fn primitive_type(&self) -> Result<PrimitiveType> { self.builder.get_primitive_type(self) } /// The kind of elements that are computed by this operand, shortcut for `primitive_type`. pub fn ty(&self) -> Result<PrimitiveType> { self.primitive_type() } /// The number of dimensions for this node. pub fn rank(&self) -> Result<usize> { self.builder.get_dimensions_size(self) } pub fn shape(&self) -> Result<Shape> { self.builder.get_shape(self) } pub fn array_shape(&self) -> Result<ArrayShape> { ArrayShape::try_from(&self.builder.get_shape(self)?) } pub fn dims(&self) -> Result<Vec<usize>> { self.builder.get_dims(self) } extract_dims!(dim1, 1, |d: Vec<usize>| d[0], usize); extract_dims!(dim2, 2, |d: Vec<usize>| (d[0], d[1]), (usize, usize)); extract_dims!(dim3, 3, |d: Vec<usize>| (d[0], d[1], d[2]), (usize, usize, usize)); extract_dims!(dim4, 4, |d: Vec<usize>| (d[0], d[1], d[2], d[3]), (usize, usize, usize, usize)); extract_dims!( dim5, 5, |d: Vec<usize>| (d[0], d[1], d[2], d[3], d[4]), (usize, usize, usize, usize, usize) ); /// General dot multiplication between two nodes, specifying the dimensions that get contracted /// as well as the batch dimensions. pub fn dot_general( &self, rhs: &XlaOp, lhs_contracting_dims: &[i64], rhs_contracting_dims: &[i64], lhs_batch_dims: &[i64], rhs_batch_dims: &[i64], ) -> Result<Self> { let op = unsafe { c_lib::op_dot_general( self.op, rhs.op, lhs_contracting_dims.as_ptr(), lhs_contracting_dims.len(), rhs_contracting_dims.as_ptr(), rhs_contracting_dims.len(), lhs_batch_dims.as_ptr(), lhs_batch_dims.len(), rhs_batch_dims.as_ptr(), rhs_batch_dims.len(), ) }; self.wrap(op) } pub fn gather( &self, start_indices: &XlaOp, offset_dims: &[i64], collapsed_slice_dims: &[i64], start_index_map: &[i64], set_index_vector_dim: Option<i64>, slice_sizes: &[i64], ) -> Result<Self> { let set_index_vector_dim_ptr = set_index_vector_dim.as_ref().map(|p| p as *const _).unwrap_or(std::ptr::null()); let op = unsafe { c_lib::op_gather( self.op, start_indices.op, offset_dims.as_ptr(), offset_dims.len(), collapsed_slice_dims.as_ptr(), collapsed_slice_dims.len(), start_index_map.as_ptr(), start_index_map.len(), set_index_vector_dim_ptr, slice_sizes.as_ptr(), slice_sizes.len(), ) }; self.wrap(op) } pub fn scatter( operands: &[XlaOp], scatter_indices: &XlaOp, updates: &[XlaOp], update_computation: &XlaComputation, update_window_dims: &[i64], inserted_window_dims: &[i64], scatter_dims_to_operand_dims: &[i64], index_vector_dim: i64 ) -> Result<XlaOp> { let operands: Vec<_> = operands.iter().map(|a| a.op).collect(); let updates: Vec<_> = updates.iter().map(|a| a.op).collect(); let op = unsafe { c_lib::op_scatter( operands.len(), operands.as_ptr(), scatter_indices.op, updates.len(), updates.as_ptr(), update_computation.0, update_window_dims.len(), update_window_dims.as_ptr(), inserted_window_dims.len(), inserted_window_dims.as_ptr(), scatter_dims_to_operand_dims.len(), scatter_dims_to_operand_dims.as_ptr(), index_vector_dim ) }; scatter_indices.wrap(op) } pub fn take(&self, indices: &XlaOp, axis: i64) -> Result<Self> { let axis = self.normalize_index(axis)?; let shape = self.array_shape()?; let indices_shape = indices.array_shape()?; let index_dims = indices_shape.dims(); let dims = shape.dims(); let offset_dims: Vec<_> = (0..((dims.len() + index_dims.len()) as i64 - 1)) .filter(|x| *x < axis || *x >= axis + index_dims.len() as i64) .collect(); let mut slice_sizes: Vec<_> = dims.to_vec(); slice_sizes[axis as usize] = 1; let mut index_dims_plus_1 = index_dims.to_vec(); index_dims_plus_1.push(1); let indices = indices.reshape(&index_dims_plus_1)?; // Same as in Jax: always use the last dimension for index_vector_dim. let index_vector_dim = Some(index_dims.len() as i64); self.gather(&indices, &offset_dims, &[axis], &[axis], index_vector_dim, &slice_sizes) } fn maybe_keep_dims(&self, res: XlaOp, dims_to_keep: &[i64], keep_dims: bool) -> Result<XlaOp> { if keep_dims && !dims_to_keep.is_empty() { let shape = self.array_shape()?; let mut dims = shape.dims().to_vec(); for d in dims_to_keep.iter() { dims[*d as usize] = 1; } res.reshape(&dims) } else { Ok(res) } } /// A node that computes the sum across the specified dimensions, e.g. if all the dimensions /// are passed as an argument the result is a scalar with the sum of all the elements in the /// original node. pub fn reduce_sum(&self, dims: &[i64], keep_dims: bool) -> Result<Self> { let builder = XlaBuilder::new("Sum"); let ty = self.primitive_type()?.element_type()?; let x = builder.parameter(0, ty, &[], "x")?; let y = builder.parameter(1, ty, &[], "y")?; let sum = x.add_(&y)?.build()?; let init_value = self.builder.zero(ty)?; self.reduce(init_value, &sum, dims, keep_dims) } /// A node that computes the average value across the specified dimensions. pub fn reduce_mean(&self, dims: &[i64], keep_dims: bool) -> Result<Self> { let b = &self.builder(); let ty = self.primitive_type()?; let mut scale = b.one(crate::ElementType::S32)?; for d in dims.iter() { scale = (scale * self.dimensions_size(*d)?)?; } let sum = self.reduce_sum(dims, keep_dims)?; sum / scale.astype(ty)? } /// A node that computes the maximum value across the specified dimensions. pub fn reduce_max(&self, dims: &[i64], keep_dims: bool) -> Result<Self> { let builder = XlaBuilder::new("Max"); let ty = self.primitive_type()?.element_type()?; let x = builder.parameter(0, ty, &[], "x")?; let y = builder.parameter(1, ty, &[], "y")?; let sum = x.max(&y)?.build()?; let init_value = self.builder.min_value(ty)?; self.reduce(init_value, &sum, dims, keep_dims) } /// A node that computes the minimum value across the specified dimensions. pub fn reduce_min(&self, dims: &[i64], keep_dims: bool) -> Result<Self> { let builder = XlaBuilder::new("Min"); let ty = self.primitive_type()?.element_type()?; let x = builder.parameter(0, ty, &[], "x")?; let y = builder.parameter(1, ty, &[], "y")?; let sum = x.min(&y)?.build()?; let init_value = self.builder.max_value(ty)?; self.reduce(init_value, &sum, dims, keep_dims) } pub fn softmax(&self, dim: i64) -> Result<Self> { let max = self.reduce_max(&[dim], true)?; let unnormalized = (self - max)?.exp()?; let sum = unnormalized.reduce_sum(&[dim], true)?; unnormalized / sum } /// Layer normalization, this normalizes values on the target dimension to be of zero mean and /// standard deviation one, and then scales the result by `scale` and adds `bias`. pub fn layer_norm(&self, dims: &[i64], scale: &XlaOp, bias: &XlaOp, eps: f64) -> Result<Self> { let ty = self.primitive_type().unwrap_or(PrimitiveType::F32); let eps = self.builder().c0(eps)?.astype(ty)?; let mean = self.reduce_mean(&dims, true)?; let mean2 = (self * self)?.reduce_mean(&dims, true)?; let var = (mean2 - (&mean * &mean)?)?; let mul = (var + eps)?.rsqrt()?; bias + ((self - mean)? * mul)? * scale } /// Matrix multiplication, this is a specialized version of `dot_general` to be used for /// matrix-matrix or matrix-vector multiplications. pub fn matmul(&self, rhs: &Self) -> Result<Self> { // Similar to the jax implementation but without the squeezing. // https://github.com/google/jax/blob/849e47f79ac64ccba1a762804217c00a9905025b/jax/_src/numpy/lax_numpy.py#L3028 let lhs_shape = self.array_shape()?; let rhs_shape = self.array_shape()?; let lhs_dims = lhs_shape.dims(); let rhs_dims = rhs_shape.dims(); let lhs_ndims = lhs_dims.len(); let rhs_ndims = rhs_dims.len(); if lhs_ndims < 1 || rhs_ndims < 1 { Err(Error::MatMulIncorrectDims { lhs_dims: lhs_dims.to_vec(), rhs_dims: rhs_dims.to_vec(), msg: "empty dimension", })? } let rhs_is_mat = rhs_ndims > 1; let lhs_batch_ndims = lhs_ndims.saturating_sub(2); let rhs_batch_ndims = rhs_ndims.saturating_sub(2); let max_ndims = usize::max(lhs_batch_ndims, rhs_batch_ndims); let mut lhs_batch_dims = vec![]; let mut rhs_batch_dims = vec![]; for idx in 0..max_ndims { let lhs_idx = (idx + lhs_batch_ndims) as i64 - max_ndims as i64; let rhs_idx = (idx + rhs_batch_ndims) as i64 - max_ndims as i64; // Only one of lhs_idx and rhs_idx can be negative. if lhs_idx < 0 && rhs_idx < 0 { panic!("internal error: negative dim idxs {lhs_dims:?} {rhs_dims:?}") } else if lhs_idx < 0 && rhs_idx >= 0 { rhs_batch_dims.push(rhs_idx) } else if lhs_idx >= 0 && rhs_idx < 0 { lhs_batch_dims.push(lhs_idx) } else if lhs_dims[lhs_idx as usize] == rhs_dims[rhs_idx as usize] { lhs_batch_dims.push(lhs_idx); rhs_batch_dims.push(rhs_idx); } else { Err(Error::MatMulIncorrectDims { lhs_dims: lhs_dims.to_vec(), rhs_dims: rhs_dims.to_vec(), msg: "incompatible batch dimensions", })? } } self.dot_general( rhs, &[lhs_ndims as i64 - 1], &[rhs_ndims as i64 - 1 - i64::from(rhs_is_mat)], &lhs_batch_dims, &rhs_batch_dims, ) } /// Generate a computation which root value is this node. pub fn build(&self) -> Result<XlaComputation> { self.builder.build(self) } } impl Drop for XlaOp { fn drop(&mut self) { unsafe { c_lib::xla_op_free(self.op) } } } macro_rules! bin_trait { ($trait:ident, $fn1:ident, $fn2:ident) => { impl<B: std::borrow::Borrow<XlaOp>> std::ops::$trait<B> for XlaOp { type Output = Result<XlaOp>; fn $fn1(self, rhs: B) -> Self::Output { (&self).$fn1(rhs) } } impl<B: std::borrow::Borrow<XlaOp>> std::ops::$trait<B> for &XlaOp { type Output = Result<XlaOp>; fn $fn1(self, rhs: B) -> Self::Output { self.$fn2(rhs.borrow()) } } impl<B: std::borrow::Borrow<XlaOp>> std::ops::$trait<Result<B>> for XlaOp { type Output = Result<XlaOp>; fn $fn1(self, rhs: Result<B>) -> Self::Output { (&self).$fn1(rhs) } } impl<B: std::borrow::Borrow<XlaOp>> std::ops::$trait<Result<B>> for &XlaOp { type Output = Result<XlaOp>; fn $fn1(self, rhs: Result<B>) -> Self::Output { self.$fn2(rhs?.borrow()) } } }; } bin_trait!(Add, add, add_); bin_trait!(Sub, sub, sub_); bin_trait!(Mul, mul, mul_); bin_trait!(Div, div, div_);
ivy/ivy/engines/XLA/rust_api/src/wrappers/xla_op.rs/0
{ "file_path": "ivy/ivy/engines/XLA/rust_api/src/wrappers/xla_op.rs", "repo_id": "ivy", "token_count": 16894 }
15
from typing import Optional, Union, Literal # global import jax import jax.numpy as jnp from ivy.functional.backends.jax import JaxArray from jax import lax import ivy from ivy.func_wrapper import with_unsupported_dtypes from . import backend_version def logit( x: JaxArray, /, *, eps: Optional[float] = None, complex_mode: Literal["split", "magnitude", "jax"] = "jax", out: Optional[JaxArray] = None, ): if eps is None: x = jnp.where(jnp.logical_or(x > 1, x < 0), jnp.nan, x) else: x = jnp.clip(x, eps, 1 - eps) return jnp.log(x / (1 - x)) def relu6( x: JaxArray, /, *, complex_mode="jax", out: Optional[JaxArray] = None ) -> JaxArray: relu6_func = jax.nn.relu6 # sets gradient at 0 and 6 to 0 instead of 0.5 # can refactor to jax.nn.relu6 when this PR is merged # https://github.com/google/jax/pull/14682 def custom_grad_func(x_and_grad, one): return lax.select( (x_and_grad[0] < 6) & (x_and_grad[0] > 0), one, lax.full_like(one, 0) ) new_func = ivy.bind_custom_gradient_function(relu6_func, custom_grad_func) return new_func(x).astype(x.dtype) def thresholded_relu( x: JaxArray, /, *, threshold: Union[int, float] = 0, out: Optional[JaxArray] = None, ) -> JaxArray: return jnp.where(x > threshold, x, 0).astype(x.dtype) def logsigmoid( input: JaxArray, /, *, complex_mode="jax", out: Optional[JaxArray] = None ) -> JaxArray: return jax.nn.log_sigmoid(input) def selu(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: ret = jax.nn.selu(x).astype(x.dtype) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret def silu(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: ret = jax.nn.silu(x) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret @with_unsupported_dtypes({"0.4.14 and below": ("float16", "bfloat16")}, backend_version) def elu( x: JaxArray, /, *, alpha: float = 1.0, out: Optional[JaxArray] = None ) -> JaxArray: ret = jax.nn.elu(x, alpha) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret def celu( x: JaxArray, /, *, alpha: float = 1.0, complex_mode="jax", out: Optional[JaxArray] = None, ) -> JaxArray: return jax.nn.celu(x, alpha=alpha) @with_unsupported_dtypes({"0.4.14 and below": ("float16", "bfloat16")}, backend_version) def hardtanh( x: JaxArray, /, *, max_val: float = 1.0, min_val: float = -1.0, out: Optional[JaxArray] = None, ) -> JaxArray: ret = jnp.where(x > max_val, max_val, jnp.where(x < min_val, min_val, x)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) def tanhshrink(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: ret = jnp.subtract(x, jax.nn.tanh(x)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret def threshold( x: JaxArray, /, *, threshold: Union[int, float], value: Union[int, float], out: Optional[JaxArray] = None, ) -> JaxArray: ret = jnp.where(x > threshold, x, value).astype(x.dtype) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) # type: ignore return ret @with_unsupported_dtypes({"0.4.16 and below": ("float16", "bfloat16")}, backend_version) def softshrink( x: JaxArray, /, *, lambd: float = 0.5, out: Optional[JaxArray] = None ) -> JaxArray: ret = jnp.where(x > lambd, x - lambd, jnp.where(x < -lambd, x + lambd, 0)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret @with_unsupported_dtypes({"0.4.17 and below": ("float64",)}, backend_version) def scaled_tanh( x: JaxArray, /, *, alpha: float = 1.7159, beta: float = 0.67, out: Optional[JaxArray] = None, ) -> JaxArray: return alpha * jax.nn.tanh(beta * x) @with_unsupported_dtypes({"0.4.16 and below": ("float16", "bfloat16")}, backend_version) def hardshrink( x: JaxArray, /, *, lambd: float = 0.5, out: Optional[JaxArray] = None ) -> JaxArray: ret = jnp.where(x > lambd, x, jnp.where(x < -lambd, x, 0)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret @with_unsupported_dtypes({"0.4.16 and below": ("float16", "bfloat16")}, backend_version) def hardsilu(x: JaxArray, /, *, out: Optional[JaxArray] = None) -> JaxArray: ret = jax.nn.hard_silu(x) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret
ivy/ivy/functional/backends/jax/experimental/activations.py/0
{ "file_path": "ivy/ivy/functional/backends/jax/experimental/activations.py", "repo_id": "ivy", "token_count": 2171 }
16
# global import jax.numpy as jnp from typing import Optional, Union # local import ivy from ivy.func_wrapper import with_unsupported_dtypes from ivy.functional.backends.jax import JaxArray from . import backend_version # invert_permutation def invert_permutation( x: Union[JaxArray, list, tuple], /, ) -> JaxArray: x = jnp.array(x) if not ivy.is_array(x) else x sorted_indices = jnp.argsort(x) inverse = jnp.zeros_like(sorted_indices) inverse = inverse.at[sorted_indices].set(jnp.arange(len(x))) inverse_permutation = jnp.argsort(inverse) return inverse_permutation # lexsort @with_unsupported_dtypes({"0.4.24 and below": ("bfloat16",)}, backend_version) def lexsort( keys: JaxArray, /, *, axis: int = -1, out: Optional[JaxArray] = None, ) -> JaxArray: return jnp.asarray(jnp.lexsort(keys, axis=axis))
ivy/ivy/functional/backends/jax/experimental/sorting.py/0
{ "file_path": "ivy/ivy/functional/backends/jax/experimental/sorting.py", "repo_id": "ivy", "token_count": 348 }
17
# global import jax.numpy as jnp from typing import Union, Optional, Sequence # local from ivy.functional.backends.jax import JaxArray import ivy def all( x: JaxArray, /, *, axis: Optional[Union[int, Sequence[int]]] = None, keepdims: bool = False, out: Optional[JaxArray] = None, ) -> JaxArray: x = jnp.array(x, dtype="bool") try: return jnp.all(x, axis, keepdims=keepdims) except ValueError as error: raise ivy.utils.exceptions.IvyIndexError(error) from error def any( x: JaxArray, /, *, axis: Optional[Union[int, Sequence[int]]] = None, keepdims: bool = False, out: Optional[JaxArray] = None, ) -> JaxArray: x = jnp.array(x, dtype="bool") try: return jnp.any(x, axis, keepdims=keepdims, out=out) except ValueError as error: raise ivy.utils.exceptions.IvyIndexError(error) from error
ivy/ivy/functional/backends/jax/utility.py/0
{ "file_path": "ivy/ivy/functional/backends/jax/utility.py", "repo_id": "ivy", "token_count": 384 }
18
# global from typing import List, Optional, Union, Tuple, Literal, Sequence import mxnet as mx # local from ivy.utils.exceptions import IvyNotImplementedException def general_pool( inputs, init, reduce_fn, window_shape, strides, padding, dim, dilation=1, ceil_mode=False, count_include_pad=False, ): raise IvyNotImplementedException() def max_pool1d( x: mx.nd.NDArray, kernel: Union[int, Tuple[int], Tuple[int, int, int]], strides: Union[int, Tuple[int], Tuple[int, int, int]], padding: Union[str, int, Tuple[int]], /, *, data_format: str = "NWC", dilation: Union[int, Tuple[int]] = 1, ceil_mode: bool = False, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def max_pool2d( x: mx.nd.NDArray, kernel: Union[int, Tuple[int], Tuple[int, int]], strides: Union[int, Tuple[int], Tuple[int, int]], padding: Union[str, int, Tuple[int], Tuple[int, int]], /, *, data_format: str = "NHWC", dilation: Union[int, Tuple[int], Tuple[int, int]] = 1, ceil_mode: bool = False, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def max_pool3d( x: mx.nd.NDArray, kernel: Union[ int, Tuple[int], Tuple[int, int, int], Tuple[int, int, int, int, int] ], strides: Union[ int, Tuple[int], Tuple[int, int, int], Tuple[int, int, int, int, int] ], padding: Union[str, int, Tuple[int], Tuple[int, int, int]], /, *, data_format: str = "NDHWC", dilation: Union[int, Tuple[int], Tuple[int, int, int]] = 1, ceil_mode: bool = False, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def avg_pool1d( x: mx.nd.NDArray, kernel: Union[int, Tuple[int]], strides: Union[int, Tuple[int]], padding: Union[str, int, List[Tuple[int, int]]], /, *, data_format: str = "NWC", count_include_pad: bool = False, ceil_mode: bool = False, divisor_override: Optional[int] = None, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def avg_pool2d( x: mx.nd.NDArray, kernel: Union[int, Tuple[int], Tuple[int, int]], strides: Union[int, Tuple[int], Tuple[int, int]], padding: Union[str, int, List[Tuple[int, int]]], /, *, data_format: str = "NHWC", count_include_pad: bool = False, ceil_mode: bool = False, divisor_override: Optional[int] = None, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def avg_pool3d( x: mx.nd.NDArray, kernel: Union[int, Tuple[int], Tuple[int, int, int]], strides: Union[int, Tuple[int], Tuple[int, int, int]], padding: Union[str, int, List[Tuple[int, int]]], /, *, data_format: str = "NDHWC", count_include_pad: bool = False, ceil_mode: bool = False, divisor_override: Optional[int] = None, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def dct( x: mx.nd.NDArray, /, *, type: Literal[1, 2, 3, 4] = 2, n: Optional[int] = None, axis: int = -1, norm: Optional[Literal["ortho"]] = None, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def fft( x: mx.nd.NDArray, dim: int, /, *, norm: str = "backward", n: Optional[Union[int, Tuple[int]]] = None, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def dropout1d( x: mx.nd.NDArray, prob: float, /, *, training: bool = True, data_format: str = "NWC", out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def dropout2d( x: mx.nd.NDArray, prob: float, /, *, training: bool = True, data_format: str = "NHWC", out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def dropout3d( x: mx.nd.NDArray, prob: float, /, *, training: bool = True, data_format: str = "NDHWC", out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def ifft( x: mx.nd.NDArray, dim: int, *, norm: str = "backward", n: Optional[Union[int, Tuple[int]]] = None, out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException() def interpolate( x: mx.nd.NDArray, size: Union[Sequence[int], int], /, *, mode: Literal[ "linear", "bilinear", "trilinear", "nd", "nearest", "area", "nearest_exact", "tf_area", "tf_bicubic", "bicubic", "mitchellcubic", "lanczos3", "lanczos5", "gaussian", ] = "linear", scale_factor: Optional[Union[Sequence[int], int]] = None, recompute_scale_factor: Optional[bool] = None, align_corners: bool = False, antialias: bool = False, out: Optional[mx.nd.NDArray] = None, ): raise IvyNotImplementedException() def rfft( x: mx.nd.NDArray, /, *, n: Optional[int] = None, axis: int = -1, norm: Literal["backward", "ortho", "forward"] = "backward", out: Optional[mx.nd.NDArray] = None, ) -> mx.nd.NDArray: raise IvyNotImplementedException()
ivy/ivy/functional/backends/mxnet/experimental/layers.py/0
{ "file_path": "ivy/ivy/functional/backends/mxnet/experimental/layers.py", "repo_id": "ivy", "token_count": 2545 }
19
from typing import Optional, Union, Literal # global import numpy as np # local import ivy from ivy.functional.backends.numpy.helpers import _scalar_output_to_0d_array from ivy.func_wrapper import ( with_unsupported_dtypes, ) from . import backend_version def logit( x: np.ndarray, /, *, eps: Optional[float] = None, complex_mode: Literal["split", "magnitude", "jax"] = "jax", out: Optional[np.ndarray] = None, ): x_dtype = x.dtype if eps is None: x = np.where(np.logical_or(x > 1, x < 0), np.nan, x) else: x = np.clip(x, eps, 1 - eps) ret = (np.log(x / (1 - x))).astype(x_dtype) if np.isscalar(ret): return np.array(ret) return ret @_scalar_output_to_0d_array def thresholded_relu( x: np.ndarray, /, *, threshold: Union[int, float] = 0, out: Optional[np.ndarray] = None, ) -> np.ndarray: return np.where(x > threshold, x, 0).astype(x.dtype) thresholded_relu.support_native_out = True @_scalar_output_to_0d_array def relu6( x: np.ndarray, /, *, complex_mode="jax", out: Optional[np.ndarray] = None ) -> np.ndarray: return np.minimum(np.maximum(x, 0, dtype=x.dtype), 6, out=out, dtype=x.dtype) relu6.support_native_out = True @with_unsupported_dtypes({"1.26.3 and below": ("bool",)}, backend_version) @_scalar_output_to_0d_array def logsigmoid( input: np.ndarray, /, *, complex_mode="jax", out: Optional[np.ndarray] = None ) -> np.ndarray: return -(np.log1p(np.exp(-(input)))) @_scalar_output_to_0d_array def selu(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray: alpha = 1.6732632423543772848170429916717 scale = 1.0507009873554804934193349852946 ret = (scale * np.where(x > 0, x, alpha * np.expm1(x))).astype(x.dtype) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret selu.support_native_out = True @_scalar_output_to_0d_array def silu(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray: ret = np.asarray(x * (1 / (1 + np.exp(-x)))) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) if not ivy.is_array(x): return ret else: return np.asarray(x * (1 / (1 + np.exp(-x)))).astype(x.dtype) silu.support_native_out = True @_scalar_output_to_0d_array def elu( x: np.ndarray, /, *, alpha: float = 1.0, out: Optional[np.ndarray] = None ) -> np.ndarray: # exp = np.expm1(x) ret = np.where(x > 0, x, np.multiply(alpha, np.expm1(x))).astype(x.dtype) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ret elu.support_native_out = True @_scalar_output_to_0d_array def celu( x: np.ndarray, /, *, alpha: float = 1.0, complex_mode="jax", out: Optional[np.ndarray] = None, ) -> np.ndarray: return (np.maximum(0, x) + alpha * np.expm1(np.minimum(0, x) / alpha)).astype( x.dtype ) @with_unsupported_dtypes({"1.25.2 and below": ("float16", "bfloat16")}, backend_version) @_scalar_output_to_0d_array def hardtanh( x: np.ndarray, /, *, max_val: float = 1.0, min_val: float = -1.0, out: Optional[np.ndarray] = None, ) -> np.ndarray: ret = np.where(x > max_val, max_val, np.where(x < min_val, min_val, x)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) hardtanh.support_native_out = True @_scalar_output_to_0d_array def tanhshrink(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray: ret = np.subtract(x, np.tanh(x)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) tanhshrink.support_native_out = True @_scalar_output_to_0d_array def threshold( x: np.ndarray, /, *, threshold: float, value: float, out: Optional[np.ndarray] = None, ) -> np.ndarray: ret = np.where(x > threshold, x, value) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) threshold.support_native_out = True @_scalar_output_to_0d_array def softshrink( x: np.ndarray, /, *, lambd: float = 0.5, out: Optional[np.ndarray] = None ) -> np.ndarray: ret = np.where(x > lambd, x - lambd, np.where(x < -lambd, x + lambd, 0)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) softshrink.support_native_out = True @_scalar_output_to_0d_array def scaled_tanh( x: np.ndarray, /, *, alpha: float = 1.7159, beta: float = 0.67, out: Optional[np.ndarray] = None, ) -> np.ndarray: return alpha * np.tanh(beta * x) @_scalar_output_to_0d_array def hardshrink( x: np.ndarray, /, *, lambd: float = 0.5, out: Optional[np.ndarray] = None ) -> np.ndarray: ret = np.where(x > lambd, x, np.where(x < -lambd, x, 0)) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) hardshrink.support_native_out = True @with_unsupported_dtypes({"2.14.0 and below": ("complex",)}, backend_version) @_scalar_output_to_0d_array def hardsilu(x: np.ndarray, /, *, out: Optional[np.ndarray] = None) -> np.ndarray: ret = x * np.divide(relu6(x + 3), 6) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) hardsilu.support_native_out = True
ivy/ivy/functional/backends/numpy/experimental/activations.py/0
{ "file_path": "ivy/ivy/functional/backends/numpy/experimental/activations.py", "repo_id": "ivy", "token_count": 2582 }
20
# global import numpy as np from typing import Optional, Union # invert_permutation def invert_permutation( x: Union[np.ndarray, list, tuple], /, ) -> np.ndarray: sorted_indices = np.argsort(x) inverse = np.zeros_like(sorted_indices) inverse[sorted_indices] = np.arange(len(x)) inverse_permutation = np.argsort(inverse) return inverse_permutation # lexsort def lexsort( keys: np.ndarray, /, *, axis: int = -1, out: Optional[np.ndarray] = None ) -> np.ndarray: return np.asarray(np.lexsort(keys, axis=axis)) lexsort.support_native_out = False
ivy/ivy/functional/backends/numpy/experimental/sorting.py/0
{ "file_path": "ivy/ivy/functional/backends/numpy/experimental/sorting.py", "repo_id": "ivy", "token_count": 230 }
21
# global import operator from typing import Optional, Union, Tuple, List, Sequence from numbers import Number import paddle from ivy.utils.exceptions import IvyNotImplementedException from ivy.func_wrapper import ( with_supported_dtypes, with_unsupported_device_and_dtypes, with_unsupported_dtypes, ) import ivy.functional.backends.paddle as paddle_backend import ivy from ivy import promote_types_of_inputs from ivy.functional.backends.paddle.elementwise import _elementwise_helper # local from .. import backend_version @with_supported_dtypes( { "2.6.0 and below": ( "float32", "float64", "int32", "int64", ) }, backend_version, ) def amax( x: paddle.Tensor, /, *, axis: Optional[Union[int, Sequence[int]]] = None, keepdims: bool = False, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: return paddle.amax(x, axis=axis, keepdim=keepdims) @with_supported_dtypes( { "2.6.0 and below": ( "float32", "float64", "int32", "int64", ) }, backend_version, ) def amin( x: paddle.Tensor, /, *, axis: Optional[Union[int, Sequence[int]]] = None, keepdims: bool = False, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: return paddle.amin(x, axis=axis, keepdim=keepdims) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64")}, backend_version, ) def lgamma( x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None ) -> paddle.Tensor: return paddle.lgamma(x) @with_supported_dtypes( {"2.6.0 and below": ("float64", "float32", "int32", "int64")}, backend_version, ) def fmax( x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: if x1.dtype != x2.dtype: x1, x2 = promote_types_of_inputs(x1, x2) return paddle.fmax(x1, x2) @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("float16",)}}, backend_version ) def sinc(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor: y = ivy.pi * paddle.where(x == 0, paddle.to_tensor(1.0e-20, dtype=x.dtype), x) return paddle.divide(paddle.sin(y), y) def float_power( x1: Union[paddle.Tensor, float, list, tuple], x2: Union[paddle.Tensor, float, list, tuple], /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: x1 = paddle.cast(x1, dtype="float64") x2 = paddle.cast(x2, dtype="float64") # Compute the element-wise power return paddle.cast(paddle.pow(x1, x2), dtype=paddle.float64) def frexp( x: Union[paddle.Tensor, Number], /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: raise IvyNotImplementedException def ldexp( x1: Union[paddle.Tensor, Number], x2: Union[paddle.Tensor, Number], /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: out_dtype = x1.dtype x1, x2 = promote_types_of_inputs(x1, x2) with ivy.ArrayMode(False): if ivy.any(ivy.less(x2, 0)): pos_exp = ivy.greater_equal(x2, 0).astype(x2.dtype) * x2 neg_exp = ivy.less(x2, 0).astype(x2.dtype) * x2 ret = ivy.multiply(ivy.pow(2, pos_exp), x1) ret = ivy.divide(ret, ivy.pow(2, -neg_exp)) else: ret = ivy.multiply(ivy.pow(2, x2), x1) return ivy.astype(ret, out_dtype, copy=False) @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("float16", "bfloat16")}}, backend_version ) def copysign( x1: Union[paddle.Tensor, Number], x2: Union[paddle.Tensor, Number], /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: x2 = paddle_backend.where( paddle_backend.equal(x2, 0.0), paddle_backend.divide(1.0, x2), x2 ) signs = paddle_backend.sign(x2) result = paddle_backend.multiply(paddle_backend.abs(x1), signs) return result @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("uint8", "int8", "int16", "float16")}}, backend_version, ) def nansum( x: paddle.Tensor, /, *, axis: Optional[Union[Tuple[int, ...], int]] = None, dtype: Optional[paddle.dtype] = None, keepdims: bool = False, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: result = paddle.nansum(x, axis=axis, dtype=dtype, keepdim=keepdims) if result.shape == [1]: result = paddle.fluid.layers.squeeze(result, [0]) return result @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("float16",)}}, backend_version ) def isclose( a: paddle.Tensor, b: paddle.Tensor, /, *, rtol: float = 1e-05, atol: float = 1e-08, equal_nan: bool = False, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: return paddle.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) @with_unsupported_dtypes( {"2.6.0 and below": ("float16", "int16", "int8", "uint8")}, backend_version ) def diff( x: Union[paddle.Tensor, list, tuple], /, *, n: int = 1, axis: int = -1, prepend: Optional[Union[paddle.Tensor, int, float, list, tuple]] = None, append: Optional[Union[paddle.Tensor, int, float, list, tuple]] = None, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: ret_dtype = x.dtype def _tensor(val): if val is not None and not isinstance(val, paddle.Tensor): return paddle.to_tensor(val, dtype=ret_dtype) return val prepend = _tensor(prepend) append = _tensor(append) return paddle.diff(x, n=n, axis=axis, prepend=prepend, append=append).cast( ret_dtype ) @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("float16",)}}, backend_version ) def signbit( x: Union[paddle.Tensor, float, int, list, tuple], /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: return paddle_backend.less( paddle_backend.where(x.astype(bool), x, paddle_backend.divide(1.0, x)), 0.0 ) def hypot( x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: raise IvyNotImplementedException() @with_unsupported_device_and_dtypes( { "2.6.0 and below": { "cpu": ( "int8", "int16", "int32", "int64", "uint8", "float16", "complex64", "complex128", "bool", ) } }, backend_version, ) def allclose( x1: paddle.Tensor, x2: paddle.Tensor, /, *, rtol: float = 1e-05, atol: float = 1e-08, equal_nan: bool = False, out: Optional[paddle.Tensor] = None, ) -> bool: return paddle.allclose(x1, x2, rtol=rtol, atol=atol, equal_nan=equal_nan).squeeze(0) @with_unsupported_dtypes({"2.6.0 and below": ("float16",)}, backend_version) def fix( x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: with ivy.ArrayMode(False): return ivy.trunc(x) @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("float16",)}}, backend_version ) def nextafter( x1: paddle.Tensor, x2: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: x1, x2 = ivy.promote_types_of_inputs(x1, x2) with ivy.ArrayMode(False): eps = ivy.finfo(x1.dtype).eps return ivy.where( ivy.equal(x1, x2), x2, ivy.where(ivy.greater(x2, x1), ivy.add(x1, eps), ivy.subtract(x1, eps)), ) _BERNOULLI_COEFS = [ 12, -720, 30240, -1209600, 47900160, -1307674368000 / 691, 74724249600, -10670622842880000 / 3617, 5109094217170944000 / 43867, -802857662698291200000 / 174611, 14101100039391805440000 / 77683, -1693824136731743669452800000 / 236364091, 186134520519971831808000000 / 657931, -37893265687455865519472640000000 / 3392780147, 759790291646040068357842010112000000 / 1723168255201, -134196726836183700385281186201600000000 / 7709321041217, ] @with_unsupported_device_and_dtypes( { "2.6.0 and below": { "cpu": ( "int8", "int16", "int32", "int64", "uint8", "uint16", "float16", "bool", ) } }, backend_version, ) def zeta( x: paddle.Tensor, q: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: with ivy.ArrayMode(False): s, a = ivy.promote_types_of_inputs(x, q) s_, a_ = paddle.unsqueeze(x, -1), paddle.unsqueeze(q, -1) N = M = ( paddle.to_tensor(8.0, dtype="float32") if q.dtype == paddle.float32 else paddle.to_tensor(8.0, dtype="float64") ) assert len(_BERNOULLI_COEFS) >= M k = paddle.unsqueeze(ivy.arange(N, dtype=q.dtype), tuple(range(q.ndim))) S = paddle.sum((a_ + k) ** -s_, -1) Q = ivy.divide((q + N) ** (1 - x), x - 1) T0 = (q + N) ** -x m = paddle.unsqueeze(ivy.arange(2 * M, dtype=s.dtype), tuple(range(s.ndim))) s_over_a = (s_ + m) / (a_ + N) s_over_a = ivy.where( s_over_a == 0, paddle.ones_like(s_over_a) * 1e-20, s_over_a ) T1 = paddle.cumprod(s_over_a, -1)[..., ::2] # t=np.array(T1) T1 = paddle.clip(T1, max=ivy.finfo(T1.dtype).max) coefs = paddle.unsqueeze( paddle.to_tensor(_BERNOULLI_COEFS[: T1.shape[-1]], dtype=T1.dtype), tuple(range(a.ndim)), ) T1 = T1 / coefs T = T0 * (0.5 + paddle.sum(T1, -1)) ans = S + Q + T mask = x < 1 ans[mask] = ivy.nan return ans def _normalize_axis_index(ax: int, ndim: int) -> int: if ax >= ndim or ax < -ndim: raise ValueError("axis index is out of range") return (ax + ndim) % ndim def _normalize_axis_tuple(axis: Union[int, list, tuple], ndim: int) -> Tuple[int, ...]: if type(axis) not in (tuple, list): try: axis = [operator.index(axis)] except TypeError: pass axis = tuple(_normalize_axis_index(ax, ndim) for ax in axis) if len(set(axis)) != len(axis): raise ValueError("repeated axis") return axis def _np_ndim(x): return paddle.to_tensor(x).ndim @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64")}, backend_version, ) def gradient( x: paddle.Tensor, /, *, spacing: Union[int, list, tuple] = 1, axis: Optional[Union[int, list, tuple]] = None, edge_order: int = 1, ) -> Union[paddle.Tensor, List[paddle.Tensor]]: """Https://github.com/numpy/numpy/blob/v1.24.3/numpy/lib/ function_base.py#L969-L1312.""" # TODO: Remove % x.shape[axis] once scatter_nd supports negative indices N = x.ndim # number of dimensions if axis is None: axes = tuple(range(N)) else: axes = _normalize_axis_tuple(axis, N) len_axes = len(axes) n = ( -1 if spacing is None else (0 if type(spacing) in (int, float) else len(spacing)) ) if n == -1: # no spacing argument - use 1 in all axes dx = [1.0] * len_axes elif n == 0: dx = [spacing] * len_axes elif n == 1 and _np_ndim(spacing[0]) == 0: # single scalar for all axes dx = spacing * len_axes elif n == len_axes: # scalar or 1d array for each axis dx = list(spacing) for i, distances in enumerate(dx): distances = paddle.to_tensor(distances) if _np_ndim(distances) == 0: continue elif _np_ndim(distances) != 1: raise ValueError("distances must be either scalars or 1d") if len(distances) != x.shape[axes[i]]: raise ValueError( "when 1d, distances must match the length of the corresponding" f" dimension {len(distances)} {x.shape[axes[i]]}" ) if paddle.is_integer(distances): # Convert numpy integer types to float64 to avoid modular # arithmetic in np.diff(distances). distances = distances.astype("float64") diffx = paddle.diff(distances) # if distances are constant reduce to the scalar case # since it brings a consistent speedup # cmp = diffx == diffx[0] if paddle.all(paddle.equal(diffx, diffx[0])): diffx = diffx[0] # if tf.reduce_sum(tf.cast(cmp, tf.int32)) == cmp.numel(): # print(diffx, (diffx == diffx[0])) # diffx = diffx[0] dx[i] = diffx else: raise TypeError("invalid number of arguments") if edge_order > 2: raise ValueError("'edge_order' greater than 2 not supported") # use central differences on interior and one-sided differences on the # endpoints. This preserves second order-accuracy over the full domain. outvals = [] dx = paddle.to_tensor(dx) # create slice objects --- initially all are [:, :, ..., :] slice1 = [slice(None)] * N slice2 = [slice(None)] * N slice3 = [slice(None)] * N slice4 = [slice(None)] * N if paddle.is_integer(x): x = x.astype("float64") for axis, ax_dx in zip(axes, dx): if x.shape[axis] < edge_order + 1: raise ValueError( "Shape of array too small to calculate a numerical gradient, " "at least (edge_order + 1) elements are required." ) # result allocation out = paddle.empty_like(x) # x.clone() # spacing for the current axis uniform_spacing = _np_ndim(ax_dx) == 0 # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) if uniform_spacing: x_slice2 = x[tuple(slice2)] x_slice4 = x[tuple(slice4)] # since paddle doesn't support elementwise operations for empty tensors # numpy behaviour needs to be replicated manually if 0 not in x_slice2.shape + x_slice4.shape: out[tuple(slice1)] = x_slice4 - x_slice2 / (2.0 * ax_dx) else: # fix the shape for broadcasting shape = [1] * N shape[axis] = -1 dx1 = ax_dx[0:-1] dx2 = ax_dx[1:] a = (-(dx2) / (dx1 * (dx1 + dx2))).reshape(shape) b = ((dx2 - dx1) / (dx1 * dx2)).reshape(shape) c = (dx1 / (dx2 * (dx1 + dx2))).reshape(shape) x_slice2 = x[tuple(slice2)] x_slice3 = x[tuple(slice3)] x_slice4 = x[tuple(slice4)] # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] if ( 0 not in x_slice2.shape + x_slice3.shape + x_slice4.shape + a.shape + b.shape + c.shape ): out[tuple(slice1)] = a * x_slice2 + b * x_slice3 + c * x_slice4 # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 slice2[axis] = 1 slice3[axis] = 0 dx_0 = ax_dx if uniform_spacing else ax_dx[0] x_slice2 = x[tuple(slice2)] x_slice3 = x[tuple(slice3)] # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) if 0 not in x_slice2.shape + x_slice3.shape: out[tuple(slice1)] = (x_slice2 - x_slice3) / dx_0 slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = ax_dx if uniform_spacing else ax_dx[-1] x_slice2 = x[tuple(slice2)] x_slice3 = x[tuple(slice3)] # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) if 0 not in x_slice2.shape + x_slice3.shape: out[tuple(slice1)] = (x_slice2 - x_slice3) / dx_n # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 slice2[axis] = 0 slice3[axis] = 1 slice4[axis] = 2 if uniform_spacing: a = -1.5 / ax_dx b = 2.0 / ax_dx c = -0.5 / ax_dx else: dx1 = ax_dx[0] dx2 = ax_dx[1] a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2)) b = (dx1 + dx2) / (dx1 * dx2) c = -dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] x_slice2 = x[tuple(slice2)] x_slice3 = x[tuple(slice3)] x_slice4 = x[tuple(slice4)] if ( 0 not in x_slice2.shape + x_slice3.shape + x_slice4.shape + a.shape + b.shape + c.shape ): out[tuple(slice1)] = a * x_slice2 + b * x_slice3 + c * x_slice4 slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 slice4[axis] = -1 if uniform_spacing: a = 0.5 / ax_dx b = -2.0 / ax_dx c = 1.5 / ax_dx else: dx1 = ax_dx[-2] dx2 = ax_dx[-1] a = (dx2) / (dx1 * (dx1 + dx2)) b = -(dx2 + dx1) / (dx1 * dx2) c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] x_slice2 = x[tuple(slice2)] x_slice3 = x[tuple(slice3)] x_slice4 = x[tuple(slice4)] if ( 0 not in x_slice2.shape + x_slice3.shape + x_slice4.shape + a.shape + b.shape + c.shape ): out[tuple(slice1)] = a * x_slice2 + b * x_slice3 + c * x_slice4 outvals.append(out) # reset the slice object in this dimension to ":" slice1[axis] = slice(None) slice2[axis] = slice(None) slice3[axis] = slice(None) slice4[axis] = slice(None) if len_axes == 1: return outvals[0] else: return outvals def xlogy( x: paddle.Tensor, y: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None ) -> paddle.Tensor: x, y, ret_dtype = _elementwise_helper(x, y) with ivy.ArrayMode(False): x_ok = ivy.not_equal(x, 0.0) safe_x = ivy.where(x_ok, x, 1.0) safe_y = ivy.where(x_ok, y, 1.0) return ivy.where( x_ok, ivy.multiply(safe_x, ivy.log(safe_y)), ivy.zeros_like(x) ).cast(ret_dtype) def count_nonzero( a: paddle.Tensor, /, *, axis: Optional[Union[int, list, tuple]] = None, keepdims: bool = False, dtype: Optional[paddle.dtype] = None, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: with ivy.ArrayMode(False): return ivy.sum(ivy.not_equal(a, 0), axis=axis, keepdims=keepdims, dtype=dtype) @with_supported_dtypes( { "2.6.0 and below": ( "complex64", "complex128", "float32", "float64", "int32", "int64", ) }, backend_version, ) def conj(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor: return paddle.conj(x) @with_supported_dtypes( { "2.5.0 and below": ( "float32", "float64", ) }, backend_version, ) def modf( x: paddle.Tensor, /, *, out: Optional[Tuple[paddle.Tensor, paddle.Tensor]] = None ) -> Tuple[paddle.Tensor, paddle.Tensor]: with ivy.ArrayMode(False): integer_part = paddle.floor(x) fractional_part = x - integer_part return fractional_part, integer_part @with_supported_dtypes( { "2.5.0 and below": ( "float32", "float64", ) }, backend_version, ) def digamma( x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: return paddle.digamma(x) # --- erfc --- # # Polynomials for computing erf/erfc. Originally from cephes library. # https://netlib.org/cephes/doubldoc.html kErfcPCoefficient = paddle.to_tensor( [ 2.46196981473530512524e-10, 5.64189564831068821977e-1, 7.46321056442269912687e0, 4.86371970985681366614e1, 1.96520832956077098242e2, 5.26445194995477358631e2, 9.34528527171957607540e2, 1.02755188689515710272e3, 5.57535335369399327526e2, ] ) kErfcQCoefficient = paddle.to_tensor( [ 1.00000000000000000000e0, 1.32281951154744992508e1, 8.67072140885989742329e1, 3.54937778887819891062e2, 9.75708501743205489753e2, 1.82390916687909736289e3, 2.24633760818710981792e3, 1.65666309194161350182e3, 5.57535340817727675546e2, ] ) kErfcRCoefficient = paddle.to_tensor( [ 5.64189583547755073984e-1, 1.27536670759978104416e0, 5.01905042251180477414e0, 6.16021097993053585195e0, 7.40974269950448939160e0, 2.97886665372100240670e0, ] ) kErfcSCoefficient = paddle.to_tensor( [ 1.00000000000000000000e0, 2.26052863220117276590e0, 9.39603524938001434673e0, 1.20489539808096656605e1, 1.70814450747565897222e1, 9.60896809063285878198e0, 3.36907645100081516050e0, ] ) # Evaluate the polynomial given coefficients and `x`. # N.B. Coefficients should be supplied in decreasing order. def _EvaluatePolynomial(x, coefficients): poly = paddle.full_like(x, 0.0) for c in coefficients: poly = poly * x + c return poly def _is_scalar(x): """Determines if the given tensor is a scalar. Args ---- - x (paddle.Tensor): Input tensor. Return ------ - bool: True if the tensor is a scalar, False otherwise. """ return x.size == 1 and x.dim() == 0 and tuple(x.shape) == () # TODO: Repalce once native function becomes available. # Compute an approximation of the error function complement (1 - erf(x)). @with_supported_dtypes( {"2.6.0 and below": ("float64", "float32")}, backend_version, ) def erfc(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor: any_input_is_scalar = _is_scalar(x) if str(x.dtype) not in ["paddle.float32", "paddle.float64"]: raise ValueError("Input must be of type float32 or float64.") abs_x = paddle.abs(x) z = paddle.exp(-x * x) pp = _EvaluatePolynomial(abs_x, kErfcPCoefficient) pq = _EvaluatePolynomial(abs_x, kErfcQCoefficient) pr = _EvaluatePolynomial(abs_x, kErfcRCoefficient) ps = _EvaluatePolynomial(abs_x, kErfcSCoefficient) abs_x_small = abs_x < 8.0 y = paddle.where(abs_x_small, z * pp / pq, z * pr / ps) result_no_underflow = paddle.where(x < 0.0, 2.0 - y, y) def is_pos_inf(op): return paddle.logical_and(paddle.isinf(op), op > 0) underflow = paddle.logical_or( z == 0, paddle.logical_or( paddle.logical_and(is_pos_inf(pq), abs_x_small), paddle.logical_and(is_pos_inf(ps), paddle.logical_not(abs_x_small)), ), ) result_underflow = paddle.where( x < 0, paddle.full_like(x, 2), paddle.full_like(x, 0) ) result = paddle.where(underflow, result_underflow, result_no_underflow) if any_input_is_scalar: result = paddle.squeeze(result, axis=-1) return result @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64")}, backend_version, ) def erfinv( x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None ) -> paddle.Tensor: return paddle.erfinv(x)
ivy/ivy/functional/backends/paddle/experimental/elementwise.py/0
{ "file_path": "ivy/ivy/functional/backends/paddle/experimental/elementwise.py", "repo_id": "ivy", "token_count": 12596 }
22
"""Collection of Paddle gradient functions, wrapped to fit Ivy syntax and signature.""" # global from typing import Optional, Callable import paddle import ivy.functional.backends.paddle as paddle_backend from itertools import chain # local import ivy from ivy.func_wrapper import with_unsupported_device_and_dtypes from . import backend_version from ivy.functional.ivy.gradients import ( _get_required_float_variables, _get_y_and_ret_idxs, _get_native_y, _set_duplicates, _process_func_ret_and_grads, ) def variable(x, /): if not x.is_leaf: ret = x.detach() ret.stop_gradient = False return ret ret = paddle_backend.copy_array(x).to_native() ret.stop_gradient = False return ret def is_variable(x, /, *, exclusive: bool = False): return isinstance(x, paddle.Tensor) and not x.stop_gradient def variable_data(x: paddle.Tensor, /) -> paddle.Tensor: return x.value() def _grad_func(y, xs, retain_grads): """Gradient calculation function.""" # Creating a zero gradient nest for the case where no gradients are computed grads_ = ivy.nested_map( lambda x: (paddle.to_tensor([0.0]) if x is None else paddle.zeros_like(x)), xs, include_derived=True, shallow=False, ) # Gradient calculation if isinstance(xs, paddle.Tensor): grads = paddle.grad( outputs=y, inputs=xs, retain_graph=True, create_graph=retain_grads, allow_unused=True, )[0] grads = grads_ if grads is None else grads elif isinstance(xs, ivy.Container): grads = xs.cont_from_flat_list( list( paddle.grad( outputs=[y], inputs=[ paddle.to_tensor([0.0]) if v is None else v for k, v in xs.cont_to_iterator() ], retain_graph=True, create_graph=retain_grads, allow_unused=True, ) ) ) # Returning zeros if no gradients are computed for consistent results if isinstance(grads, ivy.Container): grads = ivy.nested_map( lambda x: 0 if x is None else x, grads, include_derived=True ) grads = ivy.add(grads, grads_) else: grads = grads_ if grads is None else grads else: def grad_(x): x = paddle.to_tensor([0.0]) if x is None else x grad = paddle.grad( outputs=y, inputs=paddle.to_tensor([0.0]) if x is None else x, retain_graph=True, create_graph=retain_grads, allow_unused=True, )[0] return grad if grad is not None else paddle.zeros_like(x) grads = ivy.nested_map(grad_, xs, include_derived=True, shallow=False) grads = ivy.nested_multi_map( lambda x, _: (paddle_backend.add(x[0], x[1])), [grads, grads_] ) return grads @with_unsupported_device_and_dtypes( {"2.6.0 and below": {"cpu": ("float16",)}}, backend_version ) def execute_with_gradients( func, xs, /, *, retain_grads=False, xs_grad_idxs=((0,),), ret_grad_idxs=((0,),) ): # Conversion of required arrays to float variables and duplicate index chains xs, xs_grad_idxs, xs1, required_duplicate_index_chains, _ = ( _get_required_float_variables(xs, xs_grad_idxs) ) func_ret = func(xs) xs = xs1 if isinstance(xs, ivy.Container): duplicate_indices = list( chain.from_iterable( [ map(lambda x: x.split("/"), duplicate_index_chain[1:]) for duplicate_index_chain in required_duplicate_index_chains ] ) ) xs = ivy.set_nest_at_indices(xs, duplicate_indices, None, shallow=False) # Getting the relevant outputs from the function return for gradient calculation ret_grad_idxs, y, ret_idxs = _get_y_and_ret_idxs( func_ret, ret_grad_idxs, create_var=True ) if isinstance(y, ivy.NativeArray): # Gradient calculation for a single output grads = _set_duplicates( _grad_func(paddle.clone(y), xs, retain_grads), required_duplicate_index_chains, ) else: # Gradient calculation for multiple outputs # y = _get_native_y(y) grad_arr_idxs = ivy.nested_argwhere(y, lambda x: ivy.is_native_array(x)) grad_arr_values = ivy.multi_index_nest(y, grad_arr_idxs) grads_ = [ _grad_func(paddle.clone(arr_value), xs, retain_grads) for arr_value in grad_arr_values ] grads = grads_ if isinstance(ret_idxs, list) and len(ret_idxs): grads = { ret_idxs[i]: _set_duplicates(grad, required_duplicate_index_chains) for i, grad in enumerate(grads_) } # Stop further gradient propagation if not retaining gradients return _process_func_ret_and_grads(func_ret, grads, retain_grads) def value_and_grad(func): def grad_fn(xs): return ivy.to_native(func(xs)) def callback_fn(xs): y = grad_fn(xs) def autograd_fn(x): x = ivy.to_native(x) grad = paddle.grad(y, x, allow_unused=True)[0] grad = grad if grad is not None else paddle.zeros_like(x) grad = ivy.to_ivy(grad) return grad grads = ivy.nested_map(autograd_fn, xs, include_derived=True, shallow=False) y = ivy.to_ivy(y) return y, grads return callback_fn def stop_gradient( x: Optional[paddle.Tensor], /, *, preserve_type: bool = True, out: Optional[paddle.Tensor] = None, ): is_var = is_variable(x) x.stop_gradient = True if is_var and preserve_type: return variable(x) return x def _get_jac_one_arg_fn(grad_fn, xs, out_idx): nested_indices = iter(ivy.all_nested_indices(xs)) def one_arg_fn(x): idx = next(nested_indices) new_xs = ivy.set_nest_at_index(xs, idx, x, shallow=False) if idx else x ret = grad_fn(new_xs) for i in out_idx: ret = ret[i] return ret return one_arg_fn def _get_one_out_fn(grad_fn, xs, fn_ret): out_nested_indices = iter(ivy.all_nested_indices(fn_ret)) def one_out_fn(o): out_idx = next(out_nested_indices) out_shape = ivy.index_nest(grad_fn(xs), out_idx).shape one_arg_fn = _get_jac_one_arg_fn(grad_fn, xs, out_idx) jacobian = ivy.nested_map( lambda x: jacobian_to_ivy( paddle.incubate.autograd.Jacobian( one_arg_fn, ivy.to_native(x.expand_dims()) ), x.shape, out_shape, ), xs, shallow=False, ) return jacobian return one_out_fn def jacobian_to_ivy(jacobian, in_shape, out_shape): jac_ivy = ivy.to_ivy(jacobian[:]) jac_shape = out_shape + in_shape jac_reshaped = jac_ivy.reshape(jac_shape) return jac_reshaped def jac(func: Callable): def grad_fn(x_in): return ivy.to_native( func(ivy.to_ivy(x_in, nested=True)), nested=True, include_derived=True ) def callback_fn(xs): fn_ret = grad_fn(xs) one_out_fn = _get_one_out_fn(grad_fn, xs, fn_ret) jacobian = ivy.nested_map(one_out_fn, fn_ret) return jacobian return callback_fn def grad(f, argnums=0): if grad.nth == 0: grad.f_original = f # ToDo: Return grads on nth chained calls rather than None. issue with paddle.grad. def _nth_derivative(n): def _inner(x): x = ivy.to_native(x) if n == 0: x.stop_gradient = False ret = grad.f_original(x) if grad.f_original is not None else f(x) grad.nth = 0 return ret else: x.stop_gradient = False y = _nth_derivative(n - 1)(x) y = ivy.to_native(y) y_ones = paddle.ones_like(y) y_ones.stop_gradient = False y.stop_gradient = False dy_dx = paddle.grad( outputs=[y], inputs=[x], create_graph=True, grad_outputs=y_ones, retain_graph=True, allow_unused=True, )[0] return dy_dx return _inner grad.nth += 1 return _nth_derivative(grad.nth) grad.f_original = None grad.nth = 0
ivy/ivy/functional/backends/paddle/gradients.py/0
{ "file_path": "ivy/ivy/functional/backends/paddle/gradients.py", "repo_id": "ivy", "token_count": 4508 }
23
# global from typing import Optional, Union, Sequence, List import numpy as np import tensorflow as tf from tensorflow.python.framework.dtypes import DType # local import ivy from ivy.func_wrapper import with_unsupported_dtypes from ivy.functional.ivy.data_type import _handle_nestable_dtype_info from . import backend_version ivy_dtype_dict = { tf.int8: "int8", tf.int16: "int16", tf.int32: "int32", tf.int64: "int64", tf.uint8: "uint8", tf.uint16: "uint16", tf.uint32: "uint32", tf.uint64: "uint64", tf.bfloat16: "bfloat16", tf.float16: "float16", tf.float32: "float32", tf.float64: "float64", tf.complex64: "complex64", tf.complex128: "complex128", tf.bool: "bool", } native_dtype_dict = { "int8": tf.int8, "int16": tf.int16, "int32": tf.int32, "int64": tf.int64, "uint8": tf.uint8, "uint16": tf.uint16, "uint32": tf.uint32, "uint64": tf.uint64, "bfloat16": tf.bfloat16, "float16": tf.float16, "float32": tf.float32, "float64": tf.float64, "complex64": tf.complex64, "complex128": tf.complex128, "bool": tf.bool, } class Finfo: def __init__(self, tf_finfo: tf.experimental.numpy.finfo): self._tf_finfo = tf_finfo def __repr__(self): return repr(self._tf_finfo) @property def bits(self): return self._tf_finfo.bits @property def eps(self): return float(self._tf_finfo.eps) @property def max(self): return float(self._tf_finfo.max) @property def min(self): return float(self._tf_finfo.min) @property def smallest_normal(self): return float(self._tf_finfo.tiny) class Bfloat16Finfo: def __init__(self): self.resolution = 0.01 self.bits = 16 self.eps = 0.0078125 self.max = 3.38953e38 self.min = -3.38953e38 self.tiny = 1.17549e-38 def __repr__(self): return ( f"finfo(resolution={self.resolution}, min={self.min}, max={self.max}," " dtype='bfloat16')" ) # Array API Standard # # -------------------# def astype( x: Union[tf.Tensor, tf.Variable], dtype: Union[DType, str], /, *, copy: bool = True, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: dtype = ivy.as_native_dtype(dtype) if x.dtype == dtype: return tf.experimental.numpy.copy(x) if copy else x return tf.cast(x, dtype) def broadcast_arrays( *arrays: Union[tf.Tensor, tf.Variable], ) -> List[Union[tf.Tensor, tf.Variable]]: if len(arrays) > 1: try: desired_shape = tf.broadcast_dynamic_shape(arrays[0].shape, arrays[1].shape) except tf.errors.InvalidArgumentError as e: raise ivy.utils.exceptions.IvyBroadcastShapeError(e) from e if len(arrays) > 2: for i in range(2, len(arrays)): try: desired_shape = tf.broadcast_dynamic_shape( desired_shape, arrays[i].shape ) except tf.errors.InvalidArgumentError as e: raise ivy.utils.exceptions.IvyBroadcastShapeError(e) from e else: return [arrays[0]] result = [] for tensor in arrays: result.append(tf.broadcast_to(tensor, desired_shape)) return result def broadcast_to( x: Union[tf.Tensor, tf.Variable], /, shape: Union[ivy.NativeShape, Sequence[int]], *, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: ivy.utils.assertions.check_shapes_broadcastable(x.shape, shape) if tf.rank(x) > len(shape): return tf.broadcast_to(tf.reshape(x, -1), shape) return tf.broadcast_to(x, shape) @_handle_nestable_dtype_info def finfo(type: Union[DType, str, tf.Tensor, tf.Variable, np.ndarray], /) -> Finfo: if isinstance(type, (tf.Tensor, np.ndarray)): type = type.dtype if ivy.as_native_dtype(type) == tf.bfloat16: return Finfo(Bfloat16Finfo()) return Finfo(tf.experimental.numpy.finfo(ivy.as_native_dtype(type))) @_handle_nestable_dtype_info def iinfo(type: Union[DType, str, tf.Tensor, tf.Variable, np.ndarray], /) -> np.iinfo: if isinstance(type, (tf.Tensor, np.ndarray)): type = type.dtype return tf.experimental.numpy.iinfo(ivy.as_ivy_dtype(type)) @with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, backend_version) def result_type( *arrays_and_dtypes: Union[tf.Tensor, tf.Variable, tf.DType], ) -> ivy.Dtype: if len(arrays_and_dtypes) <= 1: return tf.experimental.numpy.result_type(arrays_and_dtypes) result = tf.experimental.numpy.result_type( arrays_and_dtypes[0], arrays_and_dtypes[1] ) for i in range(2, len(arrays_and_dtypes)): result = tf.experimental.numpy.result_type(result, arrays_and_dtypes[i]) return as_ivy_dtype(result) # Extra # # ------# def as_ivy_dtype( dtype_in: Union[tf.DType, str, int, float, complex, bool, np.dtype], /, ) -> ivy.Dtype: if dtype_in is int: return ivy.default_int_dtype() if dtype_in is float: return ivy.default_float_dtype() if dtype_in is complex: return ivy.default_complex_dtype() if dtype_in is bool: return ivy.Dtype("bool") if isinstance(dtype_in, np.dtype): dtype_in = dtype_in.name if isinstance(dtype_in, str): if dtype_in in native_dtype_dict: dtype_str = dtype_in else: raise ivy.utils.exceptions.IvyException( "Cannot convert to ivy dtype." f" {dtype_in} is not supported by TensorFlow backend." ) else: dtype_str = ivy_dtype_dict[dtype_in] if "uint" in dtype_str: return ivy.UintDtype(dtype_str) elif "int" in dtype_str: return ivy.IntDtype(dtype_str) elif "float" in dtype_str: return ivy.FloatDtype(dtype_str) elif "complex" in dtype_str: return ivy.ComplexDtype(dtype_str) elif "bool" in dtype_str: return ivy.Dtype("bool") else: raise ivy.utils.exceptions.IvyException( f"Cannot recognize {dtype_str} as a valid Dtype." ) def as_native_dtype( dtype_in: Union[tf.DType, str, bool, int, float, np.dtype], ) -> tf.DType: if dtype_in is int: return ivy.default_int_dtype(as_native=True) if dtype_in is float: return ivy.default_float_dtype(as_native=True) if dtype_in is complex: return ivy.default_complex_dtype(as_native=True) if dtype_in is bool: return tf.bool if isinstance(dtype_in, np.dtype): dtype_in = dtype_in.name if not isinstance(dtype_in, str): return dtype_in if dtype_in in native_dtype_dict: return native_dtype_dict[ivy.Dtype(dtype_in)] else: raise ivy.utils.exceptions.IvyException( "Cannot convert to TensorFlow dtype." f" {dtype_in} is not supported by TensorFlow." ) def dtype( x: Union[tf.Tensor, tf.Variable, np.ndarray], *, as_native: bool = False ) -> ivy.Dtype: if as_native: return ivy.as_native_dtype(x.dtype) return as_ivy_dtype(x.dtype) def dtype_bits(dtype_in: Union[tf.DType, str, np.dtype], /) -> int: dtype_str = as_ivy_dtype(dtype_in) if "bool" in dtype_str: return 1 return int( dtype_str.replace("tf.", "") .replace("uint", "") .replace("int", "") .replace("bfloat", "") .replace("float", "") .replace("complex", "") ) def is_native_dtype(dtype_in: Union[tf.DType, str], /) -> bool: if not ivy.is_hashable_dtype(dtype_in): return False return bool(dtype_in in ivy_dtype_dict and isinstance(dtype_in, tf.dtypes.DType)) # ToDo: # 1. result_type: Add support for bfloat16 with int16 # 2. can_cast : Add support for complex64, complex128
ivy/ivy/functional/backends/tensorflow/data_type.py/0
{ "file_path": "ivy/ivy/functional/backends/tensorflow/data_type.py", "repo_id": "ivy", "token_count": 3753 }
24
# global from typing import Union, Optional, Sequence import tensorflow as tf from tensorflow.python.framework.dtypes import DType # local import ivy from .. import backend_version from ivy.func_wrapper import with_unsupported_dtypes from ivy.functional.ivy.random import ( _check_shapes_broadcastable, ) # dirichlet @with_unsupported_dtypes( { "2.15.0 and below": ( "blfoat16", "float16", ) }, backend_version, ) def dirichlet( alpha: Union[tf.Tensor, tf.Variable, float, Sequence[float]], /, *, size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, out: Optional[Union[tf.Tensor, tf.Variable]] = None, seed: Optional[int] = None, dtype: Optional[tf.Tensor] = None, ) -> Union[tf.Tensor, tf.Variable]: pass # TODO: Implement purely in tensorflow def beta( alpha: Union[float, tf.Tensor, tf.Variable], beta: Union[float, tf.Tensor, tf.Variable], /, *, shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, device: Optional[str] = None, dtype: Optional[Union[DType, ivy.Dtype]] = None, seed: Optional[int] = None, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: pass # TODO: Implement purely in tensorflow def gamma( alpha: Union[float, tf.Tensor, tf.Variable], beta: Union[float, tf.Tensor, tf.Variable], /, *, shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, device: Optional[str] = None, dtype: Optional[Union[DType, ivy.Dtype]] = None, seed: Optional[int] = None, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: pass # TODO: Implement purely in tensorflow @with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, backend_version) def poisson( lam: Union[float, tf.Tensor, tf.Variable], *, shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, device: Optional[str] = None, dtype: DType, seed: Optional[int] = None, fill_value: Optional[Union[float, int]] = 0, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: lam = tf.cast(lam, "float32") if seed: tf.random.set_seed(seed) if shape is None: return tf.random.poisson((), lam, dtype=dtype, seed=seed) shape = tf.cast(shape, "int32") _check_shapes_broadcastable(lam.shape, shape) lam = tf.broadcast_to(lam, tuple(shape)) ret = tf.random.poisson((), lam, dtype=dtype, seed=seed) if tf.reduce_any(lam < 0): return tf.where(lam < 0, fill_value, ret) return ret @with_unsupported_dtypes({"2.15.0 and below": ("bfloat16",)}, backend_version) def bernoulli( probs: Union[float, tf.Tensor, tf.Variable], *, logits: Union[float, tf.Tensor, tf.Variable] = None, shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None, device: Optional[str] = None, dtype: Optional[str] = None, seed: Optional[int] = None, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: dtype = dtype if dtype is not None else probs.dtype if logits is not None: probs = tf.nn.softmax(logits, -1) if not _check_shapes_broadcastable(shape, probs.shape): shape = probs.shape return tf.keras.backend.random_bernoulli(shape, probs, dtype, seed)
ivy/ivy/functional/backends/tensorflow/experimental/random.py/0
{ "file_path": "ivy/ivy/functional/backends/tensorflow/experimental/random.py", "repo_id": "ivy", "token_count": 1398 }
25
# global import tensorflow as tf from typing import Union, Optional, Literal, List # local import ivy from ivy.func_wrapper import with_unsupported_dtypes from . import backend_version @with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version) def argsort( x: Union[tf.Tensor, tf.Variable], /, *, axis: int = -1, descending: bool = False, stable: bool = True, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: direction = "DESCENDING" if descending else "ASCENDING" x = tf.convert_to_tensor(x) ret = tf.argsort(x, axis=axis, direction=direction, stable=stable) return tf.cast(ret, dtype=tf.int64) @with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version) def sort( x: Union[tf.Tensor, tf.Variable], /, *, axis: int = -1, descending: bool = False, stable: bool = True, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: # TODO: handle stable sort when it's supported in tensorflow # currently it supports only quicksort (unstable) direction = "DESCENDING" if descending else "ASCENDING" x = tf.convert_to_tensor(x) ret = tf.sort(x, axis=axis, direction=direction) return ret # msort @with_unsupported_dtypes({"2.15.0 and below": ("complex", "bool")}, backend_version) def msort( a: Union[tf.Tensor, tf.Variable, list, tuple], /, *, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: return tf.sort(a, axis=0) @with_unsupported_dtypes({"2.15.0 and below": ("complex",)}, backend_version) def searchsorted( x: Union[tf.Tensor, tf.Variable], v: Union[tf.Tensor, tf.Variable], /, *, side: Literal["left", "right"] = "left", sorter: Optional[Union[ivy.Array, ivy.NativeArray, List[int]]] = None, ret_dtype: tf.DType = tf.int64, out: Optional[Union[tf.Tensor, tf.Variable]] = None, ) -> Union[tf.Tensor, tf.Variable]: assert ivy.is_int_dtype(ret_dtype), TypeError( "only Integer data types are supported for ret_dtype." ) is_supported_int_ret_dtype = ret_dtype in [tf.int32, tf.int64] if sorter is not None: assert ivy.is_int_dtype(sorter.dtype), TypeError( f"Only signed integer data type for sorter is allowed, got {sorter.dtype}." ) if sorter.dtype not in [tf.int32, tf.int64]: sorter = tf.cast(sorter, tf.int32) if len(x.shape) == 1: x = tf.gather(x, sorter) else: x = tf.gather(x, sorter, batch_dims=-1) if len(x.shape) == 1 and len(v.shape) != 1: out_shape = v.shape v = tf.reshape(v, (1, -1)) # Leading dims must be the same if is_supported_int_ret_dtype: return tf.reshape( tf.searchsorted(x, v, side=side, out_type=ret_dtype), out_shape ) else: return tf.cast( tf.reshape(tf.searchsorted(x, v, side=side), out_shape), ret_dtype ) v = tf.cast(v, x.dtype) if is_supported_int_ret_dtype: return tf.searchsorted(x, v, side=side, out_type=ret_dtype) return tf.cast(tf.searchsorted(x, v, side=side), ret_dtype)
ivy/ivy/functional/backends/tensorflow/sorting.py/0
{ "file_path": "ivy/ivy/functional/backends/tensorflow/sorting.py", "repo_id": "ivy", "token_count": 1443 }
26
from typing import Optional, Union, Literal # global import torch import torch.nn # local import ivy from ivy.func_wrapper import with_unsupported_dtypes from . import backend_version @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version) def logit( x: torch.Tensor, /, *, eps: Optional[float] = None, complex_mode: Literal["split", "magnitude", "jax"] = "jax", out: Optional[torch.Tensor] = None, ) -> torch.Tensor: return torch.logit(x, eps=eps, out=out) @with_unsupported_dtypes({"2.2 and below": ("complex", "float16")}, backend_version) def thresholded_relu( x: torch.Tensor, /, *, threshold: Optional[Union[int, float]] = None, out: Optional[torch.Tensor] = None, ) -> torch.Tensor: return torch.threshold(x, threshold=threshold, value=0) @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version) def relu6( x: torch.Tensor, /, *, complex_mode="jax", out: Optional[torch.Tensor] = None ) -> torch.Tensor: return torch.nn.functional.relu6(x) @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version) def logsigmoid( input: torch.Tensor, /, *, complex_mode="jax", out: Optional[torch.Tensor] = None ) -> torch.Tensor: if torch.is_complex(input): return torch.log(torch.sigmoid(input)) return torch.nn.functional.logsigmoid(input) @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version) def selu(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor: ret = torch.nn.functional.selu(x) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) def silu(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor: return torch.nn.functional.silu(x) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) def elu( x: torch.Tensor, /, *, alpha: float = 1.0, out: Optional[torch.Tensor] = None ) -> torch.Tensor: ret = torch.nn.functional.elu(x, alpha) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) @with_unsupported_dtypes( { "2.2 and below": ( "complex", "float16", "bfloat16", ) }, backend_version, ) def celu( x: torch.Tensor, /, *, alpha: float = 1.0, complex_mode="jax", out: Optional[torch.Tensor] = None, ) -> torch.Tensor: return torch.celu(x, alpha=alpha) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) def hardtanh( x: torch.Tensor, /, *, max_val: float = 1.0, min_val: float = -1.0, out: Optional[torch.Tensor] = None, ) -> torch.Tensor: ret = torch.nn.functional.hardtanh(x, max_val=max_val, min_val=min_val) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) @with_unsupported_dtypes({"2.2 and below": ("float16",)}, backend_version) def tanhshrink( x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None ) -> torch.Tensor: ret = torch.nn.functional.tanhshrink(x) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) def threshold( x: torch.Tensor, /, *, threshold: float, value: float, out: Optional[torch.Tensor] = None, ) -> torch.Tensor: ret = torch.nn.functional.threshold(threshold=threshold, value=value, input=x) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) def softshrink( x: torch.Tensor, /, *, lambd: float = 0.5, out: Optional[torch.Tensor] = None ) -> torch.Tensor: ret = torch.nn.functional.softshrink(x, lambd=lambd) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) def scaled_tanh( x: torch.Tensor, /, *, alpha: float = 1.7159, beta: float = 0.67, out: Optional[torch.Tensor] = None, ) -> torch.Tensor: return alpha * torch.nn.functional.tanh(beta * x) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) def hardshrink( x: torch.Tensor, /, *, lambd: float = 0.5, out: Optional[torch.Tensor] = None ) -> torch.Tensor: ret = torch.nn.functional.hardshrink(x, lambd=lambd) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype) @with_unsupported_dtypes({"2.0.1 and below": ("complex", "float16")}, backend_version) def hardsilu(x: torch.Tensor, /, *, out: Optional[torch.Tensor] = None) -> torch.Tensor: ret = torch.nn.functional.hardswish(x) if ivy.exists(out): return ivy.inplace_update(out, ret).astype(x.dtype) return ivy.astype(ret, x.dtype)
ivy/ivy/functional/backends/torch/experimental/activations.py/0
{ "file_path": "ivy/ivy/functional/backends/torch/experimental/activations.py", "repo_id": "ivy", "token_count": 2248 }
27
# global import torch from typing import Optional, Union # local import ivy # invert_permutation def invert_permutation( x: Union[torch.Tensor, list, tuple], /, ) -> torch.Tensor: x = torch.tensor(x) if not ivy.is_array(x) else x sorted_indices = torch.argsort(x) inverse = torch.zeros_like(sorted_indices) inverse[sorted_indices] = torch.arange(len(x)) inverse_permutation = torch.argsort(inverse) return inverse_permutation # lexsort def lexsort( keys: torch.Tensor, /, *, axis: int = -1, out: Optional[torch.Tensor] = None ) -> torch.Tensor: shape = keys.size() if len(shape) == 1: _, result = torch.sort(keys, dim=axis, stable=True) return result if shape[0] == 0: raise TypeError("need sequence of keys with len > 0 in lexsort") if len(shape) == 2 and shape[1] == 1: return torch.tensor([0]) _, result = torch.sort(keys[0], dim=axis, stable=True) # result = torch.argsort(keys[0], dim=axis, stable=True) # only valid for torch > 2.2 if shape[0] == 1: return result for i in range(1, shape[0]): key = keys[i] ind = key[result] _, temp = torch.sort(ind, dim=axis, stable=True) # temp = torch.argsort(ind, dim=axis, stable=True) # only valid for torch > 2.2 result = result[temp] return result lexsort.support_native_out = False
ivy/ivy/functional/backends/torch/experimental/sorting.py/0
{ "file_path": "ivy/ivy/functional/backends/torch/experimental/sorting.py", "repo_id": "ivy", "token_count": 587 }
28
# global import functools from typing import Callable import inspect # local import ivy import ivy.functional.frontends.jax as jax_frontend import ivy.functional.frontends.numpy as np_frontend # --- Helpers --- # # --------------- # def _from_ivy_array_to_jax_frontend_array(x, nested=False, include_derived=None): if nested: return ivy.nested_map( _from_ivy_array_to_jax_frontend_array, x, include_derived, shallow=False ) elif isinstance(x, ivy.Array): return jax_frontend.Array(x) return x def _from_ivy_array_to_jax_frontend_array_weak_type( x, nested=False, include_derived=None ): if nested: return ivy.nested_map( _from_ivy_array_to_jax_frontend_array_weak_type, x, include_derived, shallow=False, ) elif isinstance(x, ivy.Array): return jax_frontend.Array(x, weak_type=True) return x def _from_jax_frontend_array_to_ivy_array(x): if isinstance(x, jax_frontend.Array) and x.weak_type and x.ivy_array.shape == (): setattr(x.ivy_array, "weak_type", True) return x.ivy_array if hasattr(x, "ivy_array"): return x.ivy_array return x def _native_to_ivy_array(x): if isinstance(x, ivy.NativeArray): return ivy.array(x) return x def _to_ivy_array(x): return _from_jax_frontend_array_to_ivy_array(_native_to_ivy_array(x)) # --- Main --- # # ------------ # def handle_jax_dtype(fn: Callable) -> Callable: @functools.wraps(fn) def _handle_jax_dtype(*args, dtype=None, **kwargs): if len(args) > (dtype_pos + 1): dtype = args[dtype_pos] kwargs = { **dict( zip( list(inspect.signature(fn).parameters.keys())[ dtype_pos + 1 : len(args) ], args[dtype_pos + 1 :], ) ), **kwargs, } args = args[:dtype_pos] elif len(args) == (dtype_pos + 1): dtype = args[dtype_pos] args = args[:-1] if not dtype: return fn(*args, dtype=dtype, **kwargs) dtype = np_frontend.to_ivy_dtype(dtype) if not jax_frontend.config.jax_enable_x64: dtype = ( jax_frontend.numpy.dtype_replacement_dict[dtype] if dtype in jax_frontend.numpy.dtype_replacement_dict else dtype ) return fn(*args, dtype=dtype, **kwargs) dtype_pos = list(inspect.signature(fn).parameters).index("dtype") _handle_jax_dtype.handle_jax_dtype = True return _handle_jax_dtype def inputs_to_ivy_arrays(fn: Callable) -> Callable: @functools.wraps(fn) def _inputs_to_ivy_arrays_jax(*args, **kwargs): # check if kwargs contains an out argument, and if so, remove it has_out = False out = None if "out" in kwargs: out = kwargs["out"] del kwargs["out"] has_out = True # convert all arrays in the inputs to ivy.Array instances new_args = ivy.nested_map( _to_ivy_array, args, include_derived={"tuple": True}, shallow=False ) new_kwargs = ivy.nested_map( _to_ivy_array, kwargs, include_derived={"tuple": True}, shallow=False ) # add the original out argument back to the keyword arguments if has_out: new_kwargs["out"] = out return fn(*new_args, **new_kwargs) _inputs_to_ivy_arrays_jax.inputs_to_ivy_arrays_jax = True return _inputs_to_ivy_arrays_jax def outputs_to_frontend_arrays(fn: Callable) -> Callable: @functools.wraps(fn) def _outputs_to_frontend_arrays_jax(*args, **kwargs): weak_type = not any( (isinstance(arg, jax_frontend.Array) and arg.weak_type is False) or (isinstance(arg, ivy.Array) and arg.weak_type is False) or isinstance(arg, (tuple, list)) for arg in args ) if "dtype" in kwargs and kwargs["dtype"] is not None: weak_type = False # call unmodified function # ToDo: Remove this default dtype setting # once frontend specific backend setting is added if jax_frontend.config.jax_enable_x64: ivy.set_default_int_dtype("int64") ivy.set_default_float_dtype("float64") try: ret = fn(*args, **kwargs) finally: ivy.unset_default_int_dtype() ivy.unset_default_float_dtype() else: ret = fn(*args, **kwargs) # convert all arrays in the return to `jax_frontend.Array` instances if weak_type: return _from_ivy_array_to_jax_frontend_array_weak_type( ret, nested=True, include_derived={"tuple": True}, ) return _from_ivy_array_to_jax_frontend_array( ret, nested=True, include_derived={"tuple": True} ) _outputs_to_frontend_arrays_jax.outputs_to_frontend_arrays_jax = True return _outputs_to_frontend_arrays_jax def outputs_to_native_arrays(fn: Callable): @functools.wraps(fn) def _outputs_to_native_arrays(*args, **kwargs): ret = fn(*args, **kwargs) if isinstance(ret, jax_frontend.Array): ret = ret.ivy_array.data return ret return _outputs_to_native_arrays def to_ivy_arrays_and_back(fn: Callable) -> Callable: return outputs_to_frontend_arrays(inputs_to_ivy_arrays(fn))
ivy/ivy/functional/frontends/jax/func_wrapper.py/0
{ "file_path": "ivy/ivy/functional/frontends/jax/func_wrapper.py", "repo_id": "ivy", "token_count": 2859 }
29
# local import ivy from ivy.functional.frontends.jax.func_wrapper import ( to_ivy_arrays_and_back, ) from ivy.functional.frontends.jax.numpy import ( promote_types_of_jax_inputs as promote_jax_arrays, ) from ivy.utils.exceptions import IvyNotImplementedException from ivy.func_wrapper import with_unsupported_dtypes # --- Helpers --- # # --------------- # def _packbits_nested_list_padding(arr, pad_length): if arr.ndim > 1: nested_list = [] for sub_arr in arr: nested_list.append(_packbits_nested_list_padding(sub_arr, pad_length)) return nested_list else: return arr.zero_pad(pad_width=[[0, pad_length]]) # --- Main --- # # ------------ # @to_ivy_arrays_and_back def all(a, axis=None, out=None, keepdims=False, *, where=False): return ivy.all(a, axis=axis, keepdims=keepdims, out=out) @to_ivy_arrays_and_back def allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): a, b = promote_jax_arrays(a, b) return ivy.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) @to_ivy_arrays_and_back def any(a, axis=None, out=None, keepdims=False, *, where=None): # TODO: Out not supported ret = ivy.any(a, axis=axis, keepdims=keepdims) if ivy.is_array(where): where = ivy.array(where, dtype=ivy.bool) ret = ivy.where(where, ret, ivy.default(None, ivy.zeros_like(ret))) return ret @to_ivy_arrays_and_back def array_equal(a1, a2, equal_nan: bool) -> bool: a1, a2 = promote_jax_arrays(a1, a2) if ivy.shape(a1) != ivy.shape(a2): return False eq = ivy.asarray(a1 == a2) if equal_nan: eq = ivy.logical_or(eq, ivy.logical_and(ivy.isnan(a1), ivy.isnan(a2))) return ivy.all(eq) @to_ivy_arrays_and_back def array_equiv(a1, a2) -> bool: a1, a2 = promote_jax_arrays(a1, a2) try: eq = ivy.equal(a1, a2) except ValueError: # shapes are not broadcastable return False return ivy.all(eq) @to_ivy_arrays_and_back def bitwise_and(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.bitwise_and(x1, x2) @to_ivy_arrays_and_back def bitwise_not(x, /): return ivy.bitwise_invert(x) @to_ivy_arrays_and_back def bitwise_or(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.bitwise_or(x1, x2) @to_ivy_arrays_and_back def bitwise_xor(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.bitwise_xor(x1, x2) @to_ivy_arrays_and_back def equal(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.equal(x1, x2) @to_ivy_arrays_and_back @with_unsupported_dtypes({"0.4.24 and below": ("bfloat16",)}, "jax") def fromfunction(function, shape, *, dtype=float, **kwargs): def canonicalize_shape(shape, context="shape argument"): if isinstance(shape, int): return (shape,) elif isinstance(shape, list): return tuple(shape) elif isinstance(shape, tuple): return shape else: msg = f"{context} must be an int, list, or tuple, but got {type(shape)}." raise TypeError(msg) arr = ivy.zeros(shape, dtype=dtype) shape = canonicalize_shape(shape) # Iterate over the indices of the array for indices in ivy.ndindex(shape): f_indices = indices ivy.set_nest_at_index( arr, f_indices, ivy.asarray(function(*indices, **kwargs), dtype=dtype) ) return arr @to_ivy_arrays_and_back def greater(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.greater(x1, x2) @to_ivy_arrays_and_back def greater_equal(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.greater_equal(x1, x2) @to_ivy_arrays_and_back def invert(x, /): return ivy.bitwise_invert(x) @to_ivy_arrays_and_back def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False): a, b = promote_jax_arrays(a, b) return ivy.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) @to_ivy_arrays_and_back def iscomplex(x: any): return ivy.bitwise_invert(ivy.isreal(x)) @to_ivy_arrays_and_back def iscomplexobj(x): return ivy.is_complex_dtype(ivy.dtype(x)) @to_ivy_arrays_and_back def isfinite(x, /): return ivy.isfinite(x) @to_ivy_arrays_and_back def isin(element, test_elements, assume_unique=False, invert=False): return ivy.isin(element, test_elements, assume_unique=assume_unique, invert=invert) @to_ivy_arrays_and_back def isinf(x, /): return ivy.isinf(x) @to_ivy_arrays_and_back def isnan(x, /): return ivy.isnan(x) @to_ivy_arrays_and_back def isneginf(x, /, out=None): return ivy.isinf(x, detect_positive=False, out=out) @to_ivy_arrays_and_back def isposinf(x, /, out=None): return ivy.isinf(x, detect_negative=False, out=out) @to_ivy_arrays_and_back def isreal(x, out=None): return ivy.isreal(x, out=out) @to_ivy_arrays_and_back def isrealobj(x: any): return not ivy.is_complex_dtype(ivy.dtype(x)) @to_ivy_arrays_and_back def isscalar(x, /): return ivy.isscalar(x) @to_ivy_arrays_and_back def left_shift(x1, x2): # TODO: implement raise IvyNotImplementedException() @to_ivy_arrays_and_back def less(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.less(x1, x2) @to_ivy_arrays_and_back def less_equal(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.less_equal(x1, x2) @to_ivy_arrays_and_back # known issue in jnp's documentation of arguments # https://github.com/google/jax/issues/9119 def logical_and(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) if x1.dtype == "complex128" or x2.dtype == "complex128": x1 = ivy.astype(x1, ivy.complex128) x2 = ivy.astype(x2, ivy.complex128) else: x1, x2 = promote_jax_arrays(x1, x2) return ivy.logical_and(x1, x2) @to_ivy_arrays_and_back def logical_not(x, /): return ivy.logical_not(x) @to_ivy_arrays_and_back def logical_or(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.logical_or(x1, x2) @to_ivy_arrays_and_back def logical_xor(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.logical_xor(x1, x2) @to_ivy_arrays_and_back def not_equal(x1, x2, /): x1, x2 = promote_jax_arrays(x1, x2) return ivy.not_equal(x1, x2) @to_ivy_arrays_and_back def packbits(x, /, *, axis=None, bitorder="big"): x = ivy.greater(x, ivy.zeros_like(x)).astype("uint8") bits = ivy.arange(8, dtype="uint8") if bitorder == "big": bits = bits[::-1] if axis is None: x = ivy.flatten(x) axis = 0 x = ivy.swapaxes(x, axis, -1) remainder = x.shape[-1] % 8 if remainder: x = _packbits_nested_list_padding(x, 8 - remainder) x = ivy.array(x) x = ivy.reshape(x, list(x.shape[:-1]) + [x.shape[-1] // 8, 8]) bits = ivy.expand_dims(bits, axis=tuple(range(x.ndim - 1))) packed = (x << bits).sum(axis=-1).astype("uint8") return ivy.swapaxes(packed, axis, -1) @to_ivy_arrays_and_back def right_shift(x1, x2, /): return ivy.bitwise_right_shift(x1, x2) @to_ivy_arrays_and_back @with_unsupported_dtypes({"0.4.24 and below": ("bfloat16", "bool")}, "jax") def setxor1d(ar1, ar2, assume_unique=False): common_dtype = ivy.promote_types(ivy.dtype(ar1), ivy.dtype(ar2)) ar1 = ivy.asarray(ar1, dtype=common_dtype) ar2 = ivy.asarray(ar2, dtype=common_dtype) if not assume_unique: ar1 = ivy.unique_values(ar1) ar2 = ivy.unique_values(ar2) ar1 = ivy.reshape(ar1, (-1,)) ar2 = ivy.reshape(ar2, (-1,)) aux = ivy.concat([ar1, ar2], axis=0) if aux.size == 0: return aux aux = ivy.sort(aux) flag = ivy.concat( (ivy.array([True]), ivy.not_equal(aux[1:], aux[:-1]), ivy.array([True])), axis=0 ) mask = flag[1:] & flag[:-1] if ivy.all(ivy.logical_not(mask)): ret = ivy.asarray([], dtype=common_dtype) else: ret = aux[mask] return ret alltrue = all sometrue = any
ivy/ivy/functional/frontends/jax/numpy/logic.py/0
{ "file_path": "ivy/ivy/functional/frontends/jax/numpy/logic.py", "repo_id": "ivy", "token_count": 3911 }
30
from . import numpy from . import numpy as np from . import numpy_extension from . import numpy_extension as npx _frontend_array = np.array
ivy/ivy/functional/frontends/mxnet/__init__.py/0
{ "file_path": "ivy/ivy/functional/frontends/mxnet/__init__.py", "repo_id": "ivy", "token_count": 46 }
31
import ivy from ivy.functional.frontends.numpy.func_wrapper import ( to_ivy_arrays_and_back, handle_numpy_dtype, ) @handle_numpy_dtype @to_ivy_arrays_and_back def array(object, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None): ret = ivy.array(object, copy=copy, dtype=dtype) if ivy.get_num_dims(ret) < ndmin: ret = ivy.expand_dims(ret, axis=list(range(ndmin - ivy.get_num_dims(ret)))) return ret @handle_numpy_dtype @to_ivy_arrays_and_back def asarray( a, dtype=None, order=None, *, like=None, ): return ivy.asarray(a, dtype=dtype) @to_ivy_arrays_and_back def copy(a, order="K", subok=False): return ivy.copy_array(a) @handle_numpy_dtype def frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None): return ivy.frombuffer(buffer)
ivy/ivy/functional/frontends/numpy/creation_routines/from_existing_data.py/0
{ "file_path": "ivy/ivy/functional/frontends/numpy/creation_routines/from_existing_data.py", "repo_id": "ivy", "token_count": 382 }
32
import ivy from ivy.functional.frontends.numpy.func_wrapper import ( to_ivy_arrays_and_back, ) import ivy.functional.frontends.numpy as np_frontend class AxisConcatenator: # allow ma.mr_ to override this concatenate = staticmethod(np_frontend.concatenate) makemat = staticmethod(np_frontend.matrix) def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): self.axis = axis self.matrix = matrix self.trans1d = trans1d self.ndmin = ndmin def __getitem__(self, key): if not isinstance(key, tuple): key = (key,) # copy attributes, since they can be overridden in the first argument trans1d = self.trans1d ndmin = self.ndmin matrix = self.matrix axis = self.axis objs = [] # dtypes or scalars for weak scalar handling in result_type result_type_objs = [] for k, item in enumerate(key): scalar = False if isinstance(item, slice): step = item.step start = item.start stop = item.stop if start is None: start = 0 if step is None: step = 1 if ivy.is_complex_dtype(step): size = int(abs(step)) newobj = np_frontend.linspace(start, stop, num=size).ivy_array else: newobj = np_frontend.arange(start, stop, step).ivy_array if ndmin > 1: newobj = np_frontend.array( newobj, copy=False, ndmin=ndmin ).ivy_array if trans1d != -1: newobj = ivy.swapaxes(newobj, -1, trans1d) elif isinstance(item, str): if k != 0: raise ValueError("special directives must be the first entry.") if item in ("r", "c"): matrix = True col = item == "c" continue if "," in item: vec = item.split(",") try: axis, ndmin = (int(x) for x in vec[:2]) if len(vec) == 3: trans1d = int(vec[2]) continue except Exception as e: raise ValueError(f"unknown special directive {item!r}") from e try: axis = int(item) continue except (ValueError, TypeError) as e: raise ValueError("unknown special directive") from e elif (ivy.isscalar(item)) or (ivy.is_ivy_array(item) and item.ndim == 0): scalar = True newobj = item else: item = ivy.array(item) newobj = np_frontend.array(item, copy=False, ndmin=ndmin).ivy_array if trans1d != -1 and item.ndim < ndmin: k2 = ndmin - item.ndim k1 = trans1d if k1 < 0: k1 += k2 + 1 defaxes = list(range(ndmin)) axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] newobj = np_frontend.transpose(newobj, axes=axes).ivy_array objs.append(newobj) if scalar: result_type_objs.append(item) else: result_type_objs.append(newobj.dtype) # Ensure that scalars won't up-cast unless warranted, for 0, drops # through to error in concatenate. if len(result_type_objs) != 0: if len(result_type_objs) > 1: final_dtype = ivy.result_type(*result_type_objs) else: final_dtype = ivy.result_type(result_type_objs[0], result_type_objs[0]) # concatenate could do cast, but that can be overridden: objs = [ np_frontend.array( obj, copy=False, ndmin=ndmin, dtype=final_dtype ).ivy_array for obj in objs ] res = self.concatenate(tuple(objs), axis=axis) if matrix: oldndim = res.ndim res = self.makemat(res) if oldndim == 1 and col: res = res.T return res def __len__(self): return 0 class RClass(AxisConcatenator): def __init__(self): super().__init__(0) class CClass(AxisConcatenator): def __init__(self): super().__init__(-1, ndmin=2, trans1d=0) @to_ivy_arrays_and_back def fill_diagonal(a, val, wrap=False): if a.ndim < 2: raise ValueError("array must be at least 2-d") end = None if a.ndim == 2: # Explicit, fast formula for the common case. For 2-d arrays, we # accept rectangular ones. step = a.shape[1] + 1 # This is needed to don't have tall matrix have the diagonal wrap. if not wrap: end = a.shape[1] * a.shape[1] else: # For more than d=2, the strided formula is only valid for arrays with # all dimensions equal, so we check first. if not ivy.all(ivy.diff(a.shape) == 0): raise ValueError("All dimensions of input must be of equal length") step = 1 + ivy.sum(ivy.cumprod(a.shape[:-1])) # Write the value out into the diagonal. shape = a.shape temp = ivy.flatten(a) temp[:end:step] = val a = ivy.reshape(temp, shape) c_ = CClass() r_ = RClass()
ivy/ivy/functional/frontends/numpy/indexing_routines/inserting_data_into_arrays.py/0
{ "file_path": "ivy/ivy/functional/frontends/numpy/indexing_routines/inserting_data_into_arrays.py", "repo_id": "ivy", "token_count": 3060 }
33
import ivy import ivy.functional.frontends.numpy as np_frontend import numpy as np masked = True masked_print_options = "--" nomask = False # Class # # ----- # class MaskedArray(np_frontend.ndarray): def __init__( self, data, mask=nomask, dtype=None, copy=False, ndmin=0, fill_value=None, keep_mask=True, hard_mask=False, shrink=True, subok=True, order=None, ): self._init_data(data, dtype, mask, keep_mask) self._init_fill_value(fill_value) self._init_ndmin(ndmin) self._init_hard_mask(hard_mask) # shrink if shrink and not ivy.any(self._mask): self._mask = ivy.array(False) # copy if copy: self._data = ivy.copy_array(self._data) self._mask = ivy.copy_array(self._mask) def _init_data(self, data, dtype, mask, keep_mask): if _is_masked_array(data): self._data = ( ivy.array(data.data, dtype=dtype) if ivy.exists(dtype) else ivy.array(data.data) ) self._init_mask(mask) if keep_mask: if not isinstance(data.mask, bool): ivy.utils.assertions.check_equal( ivy.shape(self._mask), ivy.shape(data.mask), message="shapes of input mask does not match current mask", as_array=False, ) self._mask = ivy.bitwise_or(self._mask, data.mask) else: self._data = ( ivy.array(data, dtype=dtype) if ivy.exists(dtype) else ivy.array(data) ) self._init_mask(mask) self._dtype = self._data.dtype def _init_mask(self, mask): if isinstance(mask, list) or ivy.is_array(mask): ivy.utils.assertions.check_equal( ivy.shape(self._data), ivy.shape(ivy.array(mask)), message="shapes of data and mask must match", as_array=False, ) self._mask = ivy.array(mask) elif mask.all(): self._mask = ivy.ones_like(self._data) else: self._mask = ivy.zeros_like(self._data) self._mask = self._mask.astype("bool") def _init_fill_value(self, fill_value): if ivy.exists(fill_value): self._fill_value = ivy.array(fill_value, dtype=self._dtype) elif ivy.is_bool_dtype(self._dtype): self._fill_value = ivy.array(True) elif ivy.is_int_dtype(self._dtype): self._fill_value = ivy.array(999999, dtype="int64") else: self._fill_value = ivy.array(1e20, dtype="float64") def _init_ndmin(self, ndmin): ivy.utils.assertions.check_isinstance(ndmin, int) if ndmin > len(ivy.shape(self._data)): self._data = ivy.expand_dims(self._data, axis=0) self._mask = ivy.expand_dims(self._mask, axis=0) def _init_hard_mask(self, hard_mask): ivy.utils.assertions.check_isinstance(hard_mask, bool) self._hard_mask = hard_mask # Properties # # ---------- # @property def data(self): return self._data @property def mask(self): return self._mask @property def fill_value(self): return self._fill_value @property def hardmask(self): return self._hard_mask @property def dtype(self): return self._dtype # Setter # # ------ # @mask.setter def mask(self, mask): self._init_mask(mask) @fill_value.setter def fill_value(self, fill_value): self._init_fill_value(fill_value) # Built-ins # # --------- # def __getitem__(self, query): if self._mask.shape != self._data.shape: self._mask = ivy.ones_like(self._data, dtype=ivy.bool) * self._mask if self._fill_value.shape != self._data.shape: self._fill_value = ivy.ones_like(self._data) * self._fill_value if hasattr(self._mask[query], "shape"): return MaskedArray( data=self._data[query], mask=self._mask[query], fill_value=self._fill_value[query], hard_mask=self._hard_mask, ) def __setitem__(self, query, val): self._data[query] = val if self._mask.shape != self._data.shape: self._mask = ivy.ones_like(self._data, dtype=ivy.bool) * self._mask val_mask = ivy.ones_like(self._mask[query]) * getattr(val, "_mask", False) if self._hard_mask: self._mask[query] |= val_mask else: self._mask[query] = val_mask return self def __repr__(self): dec_vals = ivy.array_decimal_values with np.printoptions(precision=dec_vals): return ( "ivy.MaskedArray(" + self._array_in_str() + ",\n\tmask=" + str(self._mask.to_list()) + ",\n\tfill_value=" + str(self._fill_value.to_list()) + "\n)" ) def _array_in_str(self): # check if we have unsized array if self._data.shape == (): if self._mask: return masked_print_options return str(self._data.to_list()) if ivy.any(self._mask): return str( [ masked_print_options if mask else x for x, mask in zip(self._data.to_list(), self._mask.to_list()) ] ) return str(self._data.to_list()) # --- Helpers --- # # --------------- # def _is_masked_array(x): return isinstance(x, (np.ma.MaskedArray, np_frontend.ma.MaskedArray)) # Instance Methods # # ---------------- # # TODO # masked_array (alias) masked_array = MaskedArray
ivy/ivy/functional/frontends/numpy/ma/MaskedArray.py/0
{ "file_path": "ivy/ivy/functional/frontends/numpy/ma/MaskedArray.py", "repo_id": "ivy", "token_count": 3129 }
34
# local import ivy from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back @to_ivy_arrays_and_back def array_split(ary, indices_or_sections, axis=0): return ivy.split( ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=True ) @to_ivy_arrays_and_back def dsplit(ary, indices_or_sections): if isinstance(indices_or_sections, (list, tuple, ivy.Array)): indices_or_sections = ( ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[2]]) .astype(ivy.int8) .to_list() ) return ivy.dsplit(ary, indices_or_sections) @to_ivy_arrays_and_back def hsplit(ary, indices_or_sections): if isinstance(indices_or_sections, (list, tuple, ivy.Array)): if ary.ndim == 1: indices_or_sections = ( ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]]) .astype(ivy.int8) .to_list() ) else: indices_or_sections = ( ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[1]]) .astype(ivy.int8) .to_list() ) return ivy.hsplit(ary, indices_or_sections) @to_ivy_arrays_and_back def split(ary, indices_or_sections, axis=0): if isinstance(indices_or_sections, (list, tuple, ivy.Array)): indices_or_sections = ( ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[axis]]) .astype(ivy.int8) .to_list() ) return ivy.split( ary, num_or_size_splits=indices_or_sections, axis=axis, with_remainder=True ) @to_ivy_arrays_and_back def vsplit(ary, indices_or_sections): if isinstance(indices_or_sections, (list, tuple, ivy.Array)): indices_or_sections = ( ivy.diff(indices_or_sections, prepend=[0], append=[ary.shape[0]]) .astype(ivy.int8) .to_list() ) return ivy.vsplit(ary, indices_or_sections)
ivy/ivy/functional/frontends/numpy/manipulation_routines/splitting_arrays.py/0
{ "file_path": "ivy/ivy/functional/frontends/numpy/manipulation_routines/splitting_arrays.py", "repo_id": "ivy", "token_count": 1021 }
35
from . import averages_and_variances from .averages_and_variances import * from . import order_statistics from .order_statistics import * from . import correlating from .correlating import * from . import histograms from .histograms import *
ivy/ivy/functional/frontends/numpy/statistics/__init__.py/0
{ "file_path": "ivy/ivy/functional/frontends/numpy/statistics/__init__.py", "repo_id": "ivy", "token_count": 68 }
36
# global import ivy from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes import ivy.functional.frontends.paddle as paddle_frontend from ivy.functional.frontends.paddle.func_wrapper import ( to_ivy_arrays_and_back, ) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back def arange(start, end=None, step=1, dtype=None, name=None): return ivy.arange(start, end, step=step, dtype=dtype) @with_supported_dtypes( {"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64", "bool")}, "paddle", ) @to_ivy_arrays_and_back def assign(x, output=None): if len(ivy.shape(x)) == 0: x = ivy.reshape(ivy.Array(x), (1,)) if ivy.exists(output): output = ivy.reshape(ivy.Array(output), (1,)) else: x = ivy.reshape(x, ivy.shape(x)) ret = ivy.copy_array(x, to_ivy_array=False, out=output) return ret @with_unsupported_dtypes( {"2.6.0 and below": ("bfloat16", "uint16", "uint32", "uint64")}, "paddle" ) @to_ivy_arrays_and_back def clone(x): return ivy.copy_array(x) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64")}, "paddle", ) @to_ivy_arrays_and_back def complex(real, imag, name=None): assert real.dtype == imag.dtype, ( "(InvalidArgument) The type of data we are trying to retrieve does not match" " the type of data currently contained in the container." ) complex_dtype = "complex64" if real.dtype == "float32" else "complex128" imag_cmplx = ivy.astype(imag, complex_dtype) * 1j complex_array = real + imag_cmplx return complex_array @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) @to_ivy_arrays_and_back def diag(x, offset=0, padding_value=0, name=None): if len(x.shape) == 1: padding_value = ivy.astype(padding_value, ivy.dtype(x)) ret = ivy.diagflat(x, offset=offset, padding_value=padding_value) if len(ret.shape) != 2: ret = ivy.reshape(ret, (1, 1)) else: ret = ivy.diag(x, k=offset) return ret @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) @to_ivy_arrays_and_back def diagflat(x, offset=0, name=None): arr = ivy.diagflat(x, offset=offset) return arr @to_ivy_arrays_and_back def empty(shape, dtype=None): return ivy.empty(shape=shape, dtype=dtype) @to_ivy_arrays_and_back def empty_like(x, dtype=None, name=None): return ivy.empty_like(x, dtype=dtype) @with_supported_dtypes( {"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle" ) @to_ivy_arrays_and_back def eye(num_rows, num_columns=None, dtype=None, name=None): return ivy.eye(num_rows, num_columns, dtype=dtype) @to_ivy_arrays_and_back def full(shape, fill_value, /, *, dtype=None, name=None): dtype = "float32" if dtype is None else dtype return ivy.full(shape, fill_value, dtype=dtype) @to_ivy_arrays_and_back def full_like(x, fill_value, /, *, dtype=None, name=None): dtype = x.dtype if dtype is None else dtype return ivy.full_like(x, fill_value, dtype=dtype) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) @to_ivy_arrays_and_back def linspace(start, stop, num, dtype=None, name=None): return ivy.linspace(start, stop, num=num, dtype=dtype) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) @to_ivy_arrays_and_back def logspace(start, stop, num, base=10.0, dtype=None, name=None): return ivy.logspace(start, stop, num=num, base=base, dtype=dtype) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) @to_ivy_arrays_and_back def meshgrid(*args, **kwargs): return ivy.meshgrid(*args, indexing="ij") @with_unsupported_dtypes({"2.6.0 and below": "int8"}, "paddle") @to_ivy_arrays_and_back def ones(shape, /, *, dtype=None, name=None): dtype = "float32" if dtype is None else dtype return ivy.ones(shape, dtype=dtype) @with_unsupported_dtypes( {"2.6.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle" ) @to_ivy_arrays_and_back def ones_like(x, /, *, dtype=None, name=None): dtype = x.dtype if dtype is None else dtype return ivy.ones_like(x, dtype=dtype) @to_ivy_arrays_and_back def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True): array = ivy.array(data, dtype=dtype, device=place) return paddle_frontend.Tensor(array, dtype=dtype, place=place) @with_unsupported_dtypes( { "2.6.0 and below": ( "uint8", "int8", "int16", "float16", "complex64", "complex128", "bool", ) }, "paddle", ) @to_ivy_arrays_and_back def tril(x, diagonal=0, name=None): return ivy.tril(x, k=diagonal) @with_supported_dtypes({"2.6.0 and below": ("int32", "int64")}, "paddle") @to_ivy_arrays_and_back def tril_indices(row, col, offset=0, dtype="int64"): arr = ivy.tril_indices(row, col, offset) arr = ivy.astype(arr, dtype) return arr @with_unsupported_dtypes( { "2.6.0 and below": ( "uint8", "int8", "int16", "float16", "complex64", "complex128", "bool", ) }, "paddle", ) @to_ivy_arrays_and_back def triu(x, diagonal=0, name=None): return ivy.triu(x, k=diagonal) @with_supported_dtypes({"2.6.0 and below": ("int32", "int64")}, "paddle") @to_ivy_arrays_and_back def triu_indices(row, col=None, offset=0, dtype="int64"): arr = ivy.triu_indices(row, col, offset) if not ivy.to_scalar(ivy.shape(arr[0], as_array=True)): return arr arr = ivy.astype(arr, dtype) return arr @with_unsupported_dtypes({"2.6.0 and below": "int8"}, "paddle") @to_ivy_arrays_and_back def zeros(shape, /, *, dtype=None, name=None): dtype = "float32" if dtype is None else dtype return ivy.zeros(shape, dtype=dtype) @with_unsupported_dtypes( {"2.6.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle" ) @to_ivy_arrays_and_back def zeros_like(x, /, *, dtype=None, name=None): dtype = x.dtype if dtype is None else dtype return ivy.zeros_like(x, dtype=dtype)
ivy/ivy/functional/frontends/paddle/creation.py/0
{ "file_path": "ivy/ivy/functional/frontends/paddle/creation.py", "repo_id": "ivy", "token_count": 2942 }
37
# local import ivy from ivy.func_wrapper import with_supported_dtypes from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back @to_ivy_arrays_and_back @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def layer_norm(x, normalized_shape, weight=None, bias=None, epsilon=1e-05, name=None): return ivy.layer_norm(x, normalized_shape, scale=weight, offset=bias, eps=epsilon) @to_ivy_arrays_and_back @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): if axis < 0: axis = ivy.get_num_dims(x) + axis return ivy.lp_normalize(x, p=p, axis=axis)
ivy/ivy/functional/frontends/paddle/nn/functional/norm.py/0
{ "file_path": "ivy/ivy/functional/frontends/paddle/nn/functional/norm.py", "repo_id": "ivy", "token_count": 291 }
38
# local import ivy import ivy.functional.frontends.paddle as paddle_frontend from ivy.func_wrapper import ( with_supported_dtypes, with_unsupported_dtypes, with_supported_device_and_dtypes, ) from ivy.functional.frontends.paddle.func_wrapper import _to_ivy_array class Tensor: def __init__(self, array, dtype=None, place="cpu", stop_gradient=True): self._ivy_array = ( ivy.array(array, dtype=dtype, device=place) if not isinstance(array, ivy.Array) else array ) self._dtype = dtype self._place = place self._stop_gradient = stop_gradient def __repr__(self): return ( f"ivy.frontends.paddle.Tensor(shape={self.shape}, dtype={self.dtype}, " + str(self.ivy_array.__repr__()).replace("ivy.array(", "") ) # Properties # # ---------- # @property def ivy_array(self): return self._ivy_array @property def place(self): return self.ivy_array.device @property def dtype(self): return self._ivy_array.dtype @property def shape(self): return list(self.ivy_array.shape.shape) @property def ndim(self): return self.dim() # Setters # # --------# @ivy_array.setter def ivy_array(self, array): self._ivy_array = ( ivy.array(array) if not isinstance(array, ivy.Array) else array ) # Special Methods # # -------------------# @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __add__(self, y, /, name=None): return paddle_frontend.add(self, y) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __radd__(self, x, /, name=None): return paddle_frontend.add(self, x) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __sub__(self, y, /, name=None): return paddle_frontend.subtract(self, y) @with_unsupported_dtypes( {"2.6.0 and below": ("uint8", "int8", "int16", "float16", "bfloat16")}, "paddle", ) def __mul__(self, y, /, name=None): return paddle_frontend.multiply(self, y) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def __gt__(self, y, /, name=None): return paddle_frontend.logic.greater_than(self, y) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def __lt__(self, y, /, name=None): return paddle_frontend.logic.less_than(self, y) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def __ge__(self, y, /, name=None): return paddle_frontend.logic.greater_equal(self, y) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def __le__(self, y, /, name=None): return paddle_frontend.logic.less_equal(self, y) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "int32", "int64", ) }, "paddle", ) def __or__(self, y, /, name=None): return paddle_frontend.logic.bitwise_or(self, y) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __rsub__(self, x, /, name=None): return paddle_frontend.subtract(x, self) def __getitem__(self, item): ivy_args = ivy.nested_map(_to_ivy_array, [self, item]) ret = ivy.get_item(*ivy_args) return paddle_frontend.Tensor(ret) def __setitem__(self, item, value): raise ivy.utils.exceptions.IvyException( "ivy.functional.frontends.paddle.Tensor object doesn't support assignment" ) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def __floordiv__(self, y, /, name=None): return paddle_frontend.floor_divide(self, y) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def __ne__(self, y, /, name=None): return paddle_frontend.not_equal(self, y) def __iter__(self): if self.ndim == 0: raise TypeError("iteration over a 0-d tensor not supported") for i in range(self.shape[0]): yield self[i] @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __rmul__(self, y, /, name=None): return paddle_frontend.multiply(self, y) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __float__(self): return float(self._ivy_array) def __xor__(self, y, /, name=None): return paddle_frontend.logic.bitwise_xor(self, y) def __invert__(self, out=None, name=None): return paddle_frontend.logic.bitwise_not(self) def __len__(self): return len(self._ivy_array) def __neg__(self): return paddle_frontend.neg(self) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __rdiv__(self, y, /, name=None): return paddle_frontend.divide(y, self) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __rtruediv__(self, y, /, name=None): return paddle_frontend.divide(y, self) @with_unsupported_dtypes( {"2.6.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle", ) def __int__(self): return int(self._ivy_array) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "unsigned", "int8", "int32", "int64", "float16", "bfloat16", ) }, "paddle", ) def __long__(self): return int(self._ivy_array) # Instance Methods # # ---------------- # def reshape(self, *args, shape=None): if args and shape: raise TypeError("reshape() got multiple values for argument 'shape'") if shape is not None: return paddle_frontend.reshape(self, shape) if args: if isinstance(args[0], (tuple, list)): shape = args[0] return paddle_frontend.reshape(self, shape) else: return paddle_frontend.reshape(self, args) else: raise ValueError("reshape() got no values for argument 'shape'") def reshape_(self, *args, shape=None): if args and shape: raise TypeError("reshape() got multiple values for argument 'shape'") if shape is not None: self.ivy_array = paddle_frontend.reshape( self._ivy_array, shape=shape ).ivy_array return self if args: if isinstance(args[0], (tuple, list)): shape = args[0] self.ivy_array = paddle_frontend.reshape( self._ivy_array, shape=shape ).ivy_array return self else: self.ivy_array = paddle_frontend.reshape( self._ivy_array, args ).ivy_array return self else: raise ValueError("reshape_() got no values for argument 'shape'") def dim(self): return self.ivy_array.ndim @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def abs(self): return paddle_frontend.abs(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def acosh(self, name=None): return paddle_frontend.acosh(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def add_n(self, inputs, name=None): inputs = ivy.array(inputs) return ivy.sum(inputs, dtype=inputs.dtype, axis=0) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def ceil(self): return paddle_frontend.ceil(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def ceil_(self): self.ivy_array = self.ceil().ivy_array return self @with_unsupported_dtypes({"2.6.0 and below": ("complex", "int8")}, "paddle") def numel(self): return paddle_frontend.numel(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16",)}, "paddle") def asinh(self, name=None): return paddle_frontend.asinh(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def asin(self, name=None): return paddle_frontend.asin(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def cosh(self, name=None): return paddle_frontend.cosh(self) @with_supported_dtypes( { "2.6.0 and below": ( "int32", "int64", "float64", "complex128", "float32", "complex64", "bool", ) }, "paddle", ) def diagonal(self, offset, axis1=0, axis2=1, name=None): return paddle_frontend.diagonal(self, offset=offset, axis1=axis1, axis2=axis2) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def log(self, name=None): return paddle_frontend.log(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def sin(self, name=None): return paddle_frontend.sin(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def sinh(self, name=None): return paddle_frontend.sinh(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def lerp(self, y, weight, name=None): return paddle_frontend.lerp(self, y, weight) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def lerp_(self, y, weight, name=None): self.ivy_array = paddle_frontend.lerp(self, y, weight).ivy_array return self @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def argmax(self, axis=None, keepdim=False, dtype=None, name=None): return paddle_frontend.argmax(self, axis=axis, keepdim=keepdim, dtype=dtype) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "uint16")}, "paddle") def unsqueeze(self, axis=None, name=None): return paddle_frontend.Tensor(ivy.expand_dims(self._ivy_array, axis=axis)) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def sqrt(self, name=None): return paddle_frontend.sqrt(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def sqrt_(self, name=None): self.ivy_array = self.sqrt().ivy_array return self @with_unsupported_dtypes({"2.6.0 and below": ("bfloat16", "uint16")}, "paddle") def zero_(self): self.ivy_array = paddle_frontend.zeros_like(self).ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def cos(self, name=None): return paddle_frontend.cos(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def exp(self, name=None): return paddle_frontend.exp(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def exp_(self, name=None): self.ivy_array = self.exp().ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def erf(self, name=None): return paddle_frontend.erf(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def subtract(self, y, name=None): return paddle_frontend.subtract(self, y) @with_unsupported_dtypes( {"2.6.0 and below": ("float16", "uint8", "int8", "bool")}, "paddle" ) def subtract_(self, y, name=None): self.ivy_array = self.subtract(y).ivy_array return self @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def log10(self, name=None): return paddle_frontend.Tensor(ivy.log10(self._ivy_array)) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def argsort(self, axis=-1, descending=False, name=None): return paddle_frontend.argsort(self, axis=axis, descending=descending) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def floor(self, name=None): return paddle_frontend.floor(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def floor_(self): self.ivy_array = self.floor().ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def round_(self, name=None): self.ivy_array = paddle_frontend.round(self).ivy_array return self @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def clip(self, min=None, max=None, name=None): ivy.utils.assertions.check_all_or_any_fn( min, max, fn=ivy.exists, type="any", limit=[1, 2], message="at most one of min or max can be None", ) if min is None: ret = ivy.minimum(self._ivy_array, max) elif max is None: ret = ivy.maximum(self._ivy_array, min) else: ret = ivy.clip(self._ivy_array, min, max) return paddle_frontend.Tensor(ret) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def clip_(self, min=None, max=None, name=None): self._ivy_array = self.clip(min, max).ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def tanh(self, name=None): return paddle_frontend.tanh(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def add(self, y, name=None): return paddle_frontend.Tensor(ivy.add(self._ivy_array, _to_ivy_array(y))) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def add_(self, y, name=None): self.ivy_array = paddle_frontend.add(self, y).ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def addmm(self, x, y, beta=1.0, alpha=1.0, name=None): return paddle_frontend.addmm(self, x, y, beta, alpha) @with_supported_dtypes( {"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle", ) def isinf(self, name=None): return paddle_frontend.isinf(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "uint16")}, "paddle") def unsqueeze_(self, axis=None, name=None): self.ivy_array = self.unsqueeze(axis=axis).ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def square(self, name=None): return paddle_frontend.square(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def remainder_(self, y, name=None): self.ivy_array = paddle_frontend.remainder(self, y).ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def cholesky(self, upper=False, name=None): return paddle_frontend.cholesky(self, upper=upper) @with_unsupported_dtypes( {"2.6.0 and below": ("float16", "uint16", "int16")}, "paddle" ) def squeeze(self, axis=None, name=None): if isinstance(axis, int) and self.ndim > 0: if self.shape[axis] > 1: return self if len(self.shape) == 0: return self return paddle_frontend.squeeze(self, axis=axis) @with_unsupported_dtypes( {"2.6.0 and below": ("float16", "uint16", "int16")}, "paddle" ) def squeeze_(self, axis=None, name=None): self.ivy_array = paddle_frontend.squeeze(self, axis=axis).ivy_array return self @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def multiply(self, y, name=None): return paddle_frontend.multiply(self, y) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def matmul(self, y, transpose_x=False, transpose_y=False, name=None): return paddle_frontend.matmul( self, y, transpose_x=transpose_x, transpose_y=transpose_y ) @with_supported_dtypes( {"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle", ) def isfinite(self, name=None): return paddle_frontend.isfinite(self) @with_supported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") def all(self, axis=None, keepdim=False, name=None): return paddle_frontend.Tensor( ivy.all(self.ivy_array, axis=axis, keepdims=keepdim) ) @with_supported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def allclose(self, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): return paddle_frontend.allclose( self, other, rtol=rtol, atol=atol, equal_nan=equal_nan ) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def sort(self, axis=-1, descending=False, name=None): return paddle_frontend.sort(self, axis=axis, descending=descending) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def log1p(self, name=None): return paddle_frontend.log1p(self) @with_supported_dtypes( { "2.4.2 and below": ( "bool", "uint8", "int8", "int16", "int32", "int64", ) }, "paddle", ) def bitwise_and(self, y, out=None, name=None): return paddle_frontend.bitwise_and(self, y) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int8", "int16", "int32", "int64", "float32", "float64", ) }, "paddle", ) def logical_or(self, y, out=None, name=None): return paddle_frontend.logical_or(self, y, out=out) @with_supported_dtypes( {"2.6.0 and below": ("bool", "uint8", "int8", "int16", "int32", "int64")}, "paddle", ) def bitwise_xor(self, y, out=None, name=None): return paddle_frontend.bitwise_xor(self, y) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def any(self, axis=None, keepdim=False, name=None): return paddle_frontend.any(self, axis=axis, keepdim=keepdim) @with_unsupported_dtypes({"2.6.0 and below": "bfloat16"}, "paddle") def astype(self, dtype): return paddle_frontend.Tensor(ivy.astype(self._ivy_array, dtype)) @with_supported_dtypes( {"2.6.0 and below": ("bool", "uint8", "int8", "int16", "int32", "int64")}, "paddle", ) def bitwise_not(self, out=None, name=None): return paddle_frontend.bitwise_not(self, out=out) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int8", "int16", "int32", "int64", ) }, "paddle", ) def bitwise_or(self, y, out=None, name=None): return paddle_frontend.bitwise_or(self, y, out=out) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int8", "int16", "int32", "int64", "float32", "float64", ) }, "paddle", ) def logical_xor(self, y, out=None, name=None): return paddle_frontend.logical_xor(self, y, out=out) @with_supported_dtypes( {"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle", ) def isnan(self, name=None): return paddle_frontend.isnan(self) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def greater_than(self, y, name=None): return paddle_frontend.greater_than(self, y) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def rsqrt(self, name=None): return paddle_frontend.rsqrt(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def rsqrt_(self, name=None): self.ivy_array = self.rsqrt().ivy_array return self @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def reciprocal(self, name=None): return paddle_frontend.reciprocal(self) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int8", "int16", "int32", "int64", "float32", "float64", ) }, "paddle", ) def logical_and(self, y, out=None, name=None): return paddle_frontend.logical_and(self, y, out=out) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def divide(self, y, name=None): return paddle_frontend.divide(self, y) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "complex64", "complex128")}, "paddle", ) def eigvals(self, name=None): return paddle_frontend.eigvals(self) @with_unsupported_dtypes( { "2.6.0 and below": ( "bool", "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def less_than(self, y, name=None): return paddle_frontend.less_than(self, y) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def cumprod(self, dim=None, dtype=None, name=None): return paddle_frontend.cumprod(self, dim=dim, dtype=dtype) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def cumsum(self, axis=None, dtype=None, name=None): return paddle_frontend.Tensor( ivy.cumsum(self._ivy_array, axis=axis, dtype=dtype) ) @with_supported_dtypes( {"2.6.0 and below": ("complex64", "complex128", "float32", "float64")}, "paddle", ) def angle(self, name=None): return paddle_frontend.angle(self) @with_unsupported_dtypes( { "2.6.0 and below": ( "uint8", "int8", "int16", "complex64", "complex128", ) }, "paddle", ) def equal(self, y, name=None): return paddle_frontend.equal(self, y) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def rad2deg(self, name=None): return paddle_frontend.rad2deg(self) @with_unsupported_dtypes( { "2.6.0 and below": ( "uint8", "int8", "int16", "float16", "complex64", "complex128", ) }, "paddle", ) def equal_all(self, y, name=None): return paddle_frontend.equal_all(self, y) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def maximum(self, other, name=None): return paddle_frontend.maximum(self, other) @with_unsupported_dtypes({"2.6.0 and below": "bfloat16"}, "paddle") def fmax(self, y, name=None): return paddle_frontend.fmax(self, y) @with_unsupported_dtypes({"2.6.0 and below": "bfloat16"}, "paddle") def fmin(self, y, name=None): return paddle_frontend.fmin(self, y) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def minimum(self, y, name=None): return paddle_frontend.minimum(self, y) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def max(self, axis=None, keepdim=False, name=None): return paddle_frontend.max(self, axis=axis, keepdim=keepdim) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def deg2rad(self, name=None): return paddle_frontend.deg2rad(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def digamma(self, name=None): return paddle_frontend.digamma(self) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64", "bool")}, "paddle" ) def rot90(self, k=1, axes=(0, 1), name=None): return paddle_frontend.rot90(self, k=k, axes=axes) @with_supported_dtypes( {"2.6.0 and below": ("complex64", "complex128")}, "paddle", ) def imag(self, name=None): return paddle_frontend.imag(self) def is_tensor(self): return paddle_frontend.is_tensor(self) @with_supported_dtypes( { "2.6.0 and below": ( "float32", "float64", ) }, "paddle", ) def isclose(self, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): return paddle_frontend.isclose( self, y, rtol=rtol, atol=atol, equal_nan=equal_nan ) @with_supported_dtypes({"2.6.0 and below": ("int32", "int64")}, "paddle") def floor_divide(self, y, name=None): return paddle_frontend.floor_divide(self, y) @with_supported_dtypes({"2.6.0 and below": ("int32", "int64")}, "paddle") def mod(self, y, name=None): return paddle_frontend.Tensor(ivy.fmod(self._ivy_array, _to_ivy_array(y))) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def floor_mod(self, y, name=None): return paddle_frontend.remainder(self, y) # cond @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def cond(self, p=None, name=None): return paddle_frontend.cond(self, p=p, name=name) @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle") def conj(self, name=None): return paddle_frontend.conj(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def log2(self, name=None): return paddle_frontend.log2(self) @with_unsupported_dtypes( {"2.4.2 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def neg(self, name=None): return paddle_frontend.neg(self) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int8", "int16", "int32", "int64", "float32", "float64", ) }, "paddle", ) def logical_not(self, out=None, name=None): return paddle_frontend.logical_not(self) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def sign(self, name=None): return paddle_frontend.sign(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def var(self, axis=None, unbiased=True, keepdim=False, name=None): return paddle_frontend.var(self, axis=axis, unbiased=unbiased, keepdim=keepdim) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def sgn(self, name=None): return paddle_frontend.sgn(self) def tolist(self): return paddle_frontend.Tensor(ivy.to_list(self._ivy_array)) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle", ) def min(self, axis=None, keepdim=False, name=None): return paddle_frontend.min(self, axis=axis, keepdim=keepdim) @with_supported_dtypes( {"2.6.0 and below": ("int32", "int64", "float32", "float64")}, "paddle" ) def pow(self, y, name=None): return paddle_frontend.pow(self, y) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def prod(self, axis=None, keepdim=False, dtype=None, name=None): return paddle_frontend.Tensor( ivy.prod(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype) ) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def atan(self, name=None): return paddle_frontend.atan(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def atanh(self, name=None): return paddle_frontend.atanh(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def std(self, axis=None, unbiased=True, keepdim=False, name=None): return paddle_frontend.std(self, axis=axis, unbiased=unbiased, keepdim=keepdim) @with_supported_dtypes( {"2.6.0 and below": ("int32", "int64", "float32", "float64")}, "paddle" ) def trunc(self, name=None): return paddle_frontend.trunc(self) @with_supported_dtypes({"2.6.0 and below": ("complex64", "complex128")}, "paddle") def as_real(self, name=None): if not ivy.is_complex_dtype(self._ivy_array): raise ivy.exceptions.IvyError( "as_real is only supported for complex tensors" ) re_part = ivy.real(self._ivy_array) im_part = ivy.imag(self._ivy_array) return paddle_frontend.Tensor(ivy.stack((re_part, im_part), axis=-1)) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def stanh(self, scale_a=0.67, scale_b=1.7159, name=None): return paddle_frontend.stanh(self, scale_a=scale_a, scale_b=scale_b) @with_supported_dtypes( {"2.6.0 and below": ("int32", "int64", "float32", "float64")}, "paddle" ) def trace(self, offset=0, axis1=0, axis2=1, name=None): return paddle_frontend.Tensor( ivy.trace(self._ivy_array, offset=offset, axis1=axis1, axis2=axis2) ) @with_supported_dtypes({"2.6.0 and below": ("float64", "float32")}, "paddle") def cov(self, rowvar=True, ddof=True, fweights=None, aweights=None): return paddle_frontend.Tensor( ivy.cov( self._ivy_array, rowVar=rowvar, ddof=int(ddof), fweights=fweights, aweights=aweights, ) ) @with_supported_dtypes( { "2.6.0 and below": ( "bfloat16", "float32", "float64", "int8", "int16", "int32", "int64", "uint8", ) }, "paddle", ) def flatten(self, start_axis=0, stop_axis=-1, name=None): if len(self.shape) == 0: return self.unsqueeze(axis=0) return paddle_frontend.Tensor( ivy.flatten(self.ivy_array, start_dim=start_axis, end_dim=stop_axis) ) @with_supported_dtypes( { "2.6.0 and below": ( "float32", "float64", "int16", "int32", "int64", "uint8", ) }, "paddle", ) def argmin(self, axis=None, keepdim=False, dtype=None, name=None): return paddle_frontend.argmin(self, axis=axis, keepdim=keepdim, dtype=dtype) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle", ) def topk(self, k, axis=None, largest=True, sorted=True, name=None): return paddle_frontend.topk(self, k, axis=axis, largest=largest, sorted=sorted) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def remainder(self, y, name=None): return paddle_frontend.remainder(self, y) def is_floating_point(self): return paddle_frontend.is_floating_point(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def tanh_(self, name=None): y = self.tanh(self) return ivy.inplace_update(self, y) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def reciprocal_(self, name=None): y = self.reciprocal(self) return ivy.inplace_update(self, y) @with_unsupported_dtypes( {"2.6.0 and below": ("complex", "uint8", "uint16")}, "paddle" ) def numpy(self): return self.ivy_array.to_numpy() @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def nonzero(self): return paddle_frontend.nonzero(self) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def inner(self, y, name=None): return paddle_frontend.inner(self, y, name) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def acos(self, name=None): return paddle_frontend.Tensor(ivy.acos(self._ivy_array)) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def mean(self, axis=None, keepdim=False, name=None): return paddle_frontend.mean(self, axis=axis, keepdim=keepdim) @with_supported_dtypes({"2.6.0 and below": ("float32", "float64")}, "paddle") def as_complex(self, name=None): if self.ivy_array.shape[-1] != 2: raise ivy.exceptions.IvyError( "The size of the last dimension of tensor does not equals 2" ) dtype = ( ivy.complex64 if ivy.dtype(self.ivy_array) == "float32" else ivy.complex128 ) re_part = self.ivy_array[..., 0] im_part = ivy.multiply(1j, self.ivy_array[..., 1]) value = paddle_frontend.Tensor(ivy.add(re_part, im_part).astype(dtype)) return value @with_supported_dtypes( {"2.6.0 and below": ("int32", "int64", "float32", "float64", "bool")}, "paddle" ) def not_equal(self, y, name=None): return paddle_frontend.not_equal(self._ivy_array, y) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def less_equal(self, y, name=None): return paddle_frontend.less_equal(self._ivy_array, y) @with_supported_dtypes({"2.6.0 and below": ("complex64", "complex128")}, "paddle") def real(self, name=None): return paddle_frontend.real(self._ivy_array) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def t(self, name=None): axes = list(range(len(self.ivy_array.shape)))[::-1] return ivy.permute_dims(self.ivy_array, axes=axes) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "float16", "float32", "float64", "int32", "int64", "uint8", ) }, "paddle", ) def cast(self, dtype): return paddle_frontend.cast(self, dtype) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def bmm(self, y, transpose_x=False, transpose_y=False, name=None): return paddle_frontend.bmm(self, y, transpose_x, transpose_y) @with_supported_dtypes( {"2.6.0 and below": ("float16", "float32", "float64", "int32", "int64")}, "paddle", ) def fill_(self, value): filled_tensor = paddle_frontend.full_like(self, value) return ivy.inplace_update(self, filled_tensor) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int32", "int64", "float16", "float32", "float64", ) }, "paddle", ) def unbind(self, axis=0): return paddle_frontend.unbind(self._ivy_array, axis=axis) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "int32", "int64", "float16", "float32", "float64", ) }, "paddle", ) def unique_consecutive(self, axis=0): return paddle_frontend.unique_consecutive(self._ivy_array, axis=axis) def cpu(self): self.ivy_array = ivy.to_device(self.ivy_array, ivy.as_ivy_dev("cpu")) return self @with_unsupported_dtypes( {"2.6.0 and below": ("int16", "complex64", "complex128")}, "paddle", ) def split(self, num_or_sections, axis=0, name=None): return paddle_frontend.split(self._ivy_array, num_or_sections, axis, name) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def frac(self, name=None): return paddle_frontend.frac(self._ivy_array) @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle") def gather(self, y, name=None): return paddle_frontend.gather(self, y) def is_complex(self): return paddle_frontend.is_complex(self) @with_unsupported_dtypes( {"2.6.0 and below": ("float16", "uint8", "int8", "bool")}, "paddle" ) def gather_(self, y, name=None): res = self.gather(self, y) return ivy.inplace_update(self, res) @with_supported_dtypes( {"2.6.0 and below": ("float32", "float64", "int32", "int64")}, "paddle" ) def heaviside(self, y, name=None): return paddle_frontend.heaviside(self, y) @with_supported_dtypes( {"2.6.0 and below": ("bool", "int32", "int64", "float32", "float64")}, "paddle" ) def expand(self, shape, name=None): return paddle_frontend.expand(self._ivy_array, shape) @with_supported_device_and_dtypes( { "2.6.0 and below": { "cpu": ( "bool", "int32", "int64", "float32", "float64", "complex64", "complex128", ) } }, "paddle", ) def tile(self, repeat_times): return paddle_frontend.Tensor(ivy.tile(self._ivy_array, repeats=repeat_times)) @with_supported_dtypes( { "2.6.0 and below": ( "bool", "float16", "float32", "float64", "int8", "int16", "int32", "int64", ) }, "paddle", ) def chunk(self, chunks, axis=0, name=None): return paddle_frontend.split(self._ivy_array, num_or_sections=chunks, axis=axis)
ivy/ivy/functional/frontends/paddle/tensor/tensor.py/0
{ "file_path": "ivy/ivy/functional/frontends/paddle/tensor/tensor.py", "repo_id": "ivy", "token_count": 20721 }
39
# global from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back import ivy # dct @to_ivy_arrays_and_back def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None): return ivy.dct(x, type=type, n=n, axis=axis, norm=norm) # fft @to_ivy_arrays_and_back def fft(x, n=None, axis=-1, norm="backward", overwrite_x=False): return ivy.fft(x, axis, norm=norm, n=n) @to_ivy_arrays_and_back def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False): return ivy.fft2(x, s=s, dim=axes, norm=norm) # idct @to_ivy_arrays_and_back def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, orthogonalize=None): inverse_type = {1: 1, 2: 3, 3: 2, 4: 4}[type] return ivy.dct(x, type=inverse_type, n=n, axis=axis, norm=norm) # ifft @to_ivy_arrays_and_back def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False): return ivy.ifft(x, axis, norm=norm, n=n) @to_ivy_arrays_and_back def ifftn( x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None ): return ivy.ifftn(x, s=s, axes=axes, norm=norm) @to_ivy_arrays_and_back def rfftn( x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *, plan=None ): return ivy.rfftn(x, s=s, axes=axes, norm=norm)
ivy/ivy/functional/frontends/scipy/fft/fft.py/0
{ "file_path": "ivy/ivy/functional/frontends/scipy/fft/fft.py", "repo_id": "ivy", "token_count": 601 }
40
from abc import ABCMeta, abstractmethod from ..base import ( BaseEstimator, ClassifierMixin, MultiOutputMixin, ) import copy from ._criterion import Gini, Criterion from ._splitter import BestSplitter, Splitter from ._tree import DepthFirstTreeBuilder, Tree import ivy import numbers class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): @abstractmethod def __init__( self, *, criterion, splitter, max_depth, min_samples_split, min_samples_leaf, min_weight_fraction_leaf, max_features, max_leaf_nodes, random_state, min_impurity_decrease, class_weight=None, ccp_alpha=0.0, ): self.criterion = criterion self.splitter = splitter self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.max_features = max_features self.max_leaf_nodes = max_leaf_nodes self.random_state = random_state self.min_impurity_decrease = min_impurity_decrease self.class_weight = class_weight self.ccp_alpha = ccp_alpha def get_depth(self): raise NotImplementedError def get_n_leaves(self): raise NotImplementedError def _fit( self, X, y, sample_weight=None, check_input=True, missing_values_in_feature_mask=None, ): ivy.seed(seed_value=self.random_state) n_samples, self.n_features_in_ = X.shape y = ivy.atleast_1d(y) if y.ndim == 1: y = ivy.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] y = ivy.copy.copy(y) self.classes_ = [] self.n_classes_ = [] if self.class_weight is not None: ivy.copy.copy(y) y_encoded = ivy.zeros(y.shape, dtype=ivy.int32) for k in range(self.n_outputs_): classes_k, y_encoded[:, k] = ivy.unique_inverse(y[:, k]) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_encoded self.n_classes_ = ivy.array(self.n_classes_, dtype="int64") y = ivy.array(y, dtype="float32") max_depth = ( ivy.iinfo(ivy.int32).max if self.max_depth is None else self.max_depth ) if isinstance(self.min_samples_leaf, numbers.Integral): min_samples_leaf = self.min_samples_leaf else: min_samples_leaf = int(ivy.ceil(self.min_samples_leaf * n_samples)) if isinstance(self.min_samples_split, numbers.Integral): min_samples_split = self.min_samples_split else: min_samples_split = int(ivy.ceil(self.min_samples_split * n_samples)) min_samples_split = max(2, min_samples_split) min_samples_split = max(min_samples_split, 2 * min_samples_leaf) if self.max_features is None: # todo: other cases max_features = self.n_features_in_ self.max_features_ = max_features assert len(y) == n_samples, "Number of labels does not match number of samples" if sample_weight is None: min_weight_leaf = self.min_weight_fraction_leaf * n_samples else: min_weight_leaf = self.min_weight_fraction_leaf * ivy.sum(sample_weight) self.n_classes_ = ivy.array(self.n_classes_, dtype=ivy.int64) criterion = self.criterion if not isinstance(criterion, Criterion): criterion = Gini(self.n_outputs_, self.n_classes_) else: criterion = copy.deepcopy(criterion) splitter = self.splitter monotonic_cst = None if not isinstance(self.splitter, Splitter): splitter = BestSplitter( criterion, self.max_features_, min_samples_leaf, min_weight_leaf, self.random_state, monotonic_cst, ) self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_) builder = DepthFirstTreeBuilder( splitter, min_samples_split, min_samples_leaf, min_weight_leaf, max_depth, self.min_impurity_decrease, ) builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask) if self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self._prune_tree() return self def _prune_tree(self): if self.ccp_alpha == 0.0: return n_classes = ivy.atleast_1d(self.n_classes_) pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_) self.tree_ = pruned_tree def predict(self, X, check_input=True): ivy.seed(seed_value=self.random_state) proba = self.tree_.predict(X) n_samples = X.shape[0] # Classification if self.n_outputs_ == 1: return ivy.gather(self.classes_, ivy.argmax(proba, axis=1), axis=0) else: class_type = self.classes_[0].dtype predictions = ivy.zeros((n_samples, self.n_outputs_), dtype=class_type) for k in range(self.n_outputs_): predictions[:, k] = ivy.gather( self.classes_[k], ivy.argmax(proba[:, k], axis=1), axis=0 ) return predictions def apply(self, X, check_input=True): raise NotImplementedError def decision_path(self, X, check_input=True): raise NotImplementedError @property def feature_importances_(self): raise NotImplementedError class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): def __init__( self, *, criterion="gini", splitter="best", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0, ): super().__init__( criterion=criterion, splitter=splitter, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf, max_features=max_features, max_leaf_nodes=max_leaf_nodes, class_weight=class_weight, random_state=random_state, min_impurity_decrease=min_impurity_decrease, ccp_alpha=ccp_alpha, ) def fit(self, X, y, sample_weight=None, check_input=True): super()._fit( X, y, sample_weight=sample_weight, check_input=check_input, ) return self def predict_proba(self, X, check_input=True): raise NotImplementedError def predict_log_proba(self, X): raise NotImplementedError
ivy/ivy/functional/frontends/sklearn/tree/_classes.py/0
{ "file_path": "ivy/ivy/functional/frontends/sklearn/tree/_classes.py", "repo_id": "ivy", "token_count": 3625 }
41
from . import cropping from .cropping import *
ivy/ivy/functional/frontends/tensorflow/image/__init__.py/0
{ "file_path": "ivy/ivy/functional/frontends/tensorflow/image/__init__.py", "repo_id": "ivy", "token_count": 13 }
42
import ivy from ivy.functional.frontends.tensorflow.func_wrapper import ( to_ivy_arrays_and_back, handle_tf_dtype, ) from ivy.func_wrapper import with_unsupported_dtypes @to_ivy_arrays_and_back def gamma(shape, alpha, beta=None, dtype=ivy.float32, seed=None, name=None): return ivy.gamma(alpha, beta, shape=shape, dtype=dtype, seed=seed) @with_unsupported_dtypes( {"2.15.0 and below": ("int8", "int16", "int32", "int64", "unsigned")}, "tensorflow" ) @to_ivy_arrays_and_back def normal(shape, mean=0.0, stddev=1.0, dtype=ivy.float32, seed=None, name=None): return ivy.random_normal(mean=mean, std=stddev, shape=shape, dtype=dtype, seed=seed) @with_unsupported_dtypes( {"2.15.0 and below": ("int8", "int16", "unsigned")}, "tensorflow" ) @to_ivy_arrays_and_back @handle_tf_dtype def poisson(shape, lam, dtype=ivy.float32, seed=None, name=None): shape = ivy.array(shape, dtype=ivy.int32) lam = ivy.array(lam, dtype=ivy.float32) if lam.ndim > 0: shape = ivy.concat([shape, ivy.array(lam.shape)]) return ivy.poisson(shape=shape, lam=lam, dtype=dtype, seed=seed, fill_value=0) # implement random shuffle @with_unsupported_dtypes( {"2.15.0 and below": ("int8", "int16", "in32", "int64", "unsigned")}, "tensorflow" ) @to_ivy_arrays_and_back def shuffle(value, seed=None, name=None): return ivy.shuffle(value, seed=seed) @with_unsupported_dtypes( {"2.15.0 and below": ("int8", "int16", "unsigned")}, "tensorflow" ) @to_ivy_arrays_and_back def stateless_normal( shape, seed, mean=0.0, stddev=1.0, dtype=ivy.float32, name=None, alg="auto_select" ): return ivy.random_normal( mean=mean, std=stddev, shape=shape, dtype=dtype, seed=seed[0] + seed[1] ) @with_unsupported_dtypes( {"2.15.0 and below": ("int8", "int16", "unsigned")}, "tensorflow" ) @to_ivy_arrays_and_back def stateless_poisson(shape, seed, lam, dtype=ivy.int32, name=None): return ivy.poisson(shape=shape, lam=lam, dtype=dtype, seed=seed[0] + seed[1]) @to_ivy_arrays_and_back def stateless_uniform( shape, seed, minval=0, maxval=None, dtype=ivy.float32, name=None, alg="auto_select" ): return ivy.random_uniform( shape=shape, seed=seed[0] + seed[1], low=minval, high=maxval, dtype=dtype ) @with_unsupported_dtypes( {"2.15.0 and below": ("int8", "int16", "unsigned")}, "tensorflow" ) @to_ivy_arrays_and_back def uniform(shape, minval=0, maxval=None, dtype=ivy.float32, seed=None, name=None): if maxval is None: if dtype != "int64": maxval = 1.0 else: raise ValueError("maxval must be specified for int64 dtype") return ivy.random_uniform( shape=shape, low=minval, high=maxval, dtype=dtype, seed=seed )
ivy/ivy/functional/frontends/tensorflow/random.py/0
{ "file_path": "ivy/ivy/functional/frontends/tensorflow/random.py", "repo_id": "ivy", "token_count": 1205 }
43
# local import math import ivy import ivy.functional.frontends.torch as torch_frontend from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back from ivy.func_wrapper import with_supported_dtypes, with_unsupported_dtypes from collections import namedtuple @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def cholesky(input, *, upper=False, out=None): return ivy.cholesky(input, upper=upper, out=out) @to_ivy_arrays_and_back def cholesky_ex(input, *, upper=False, check_errors=False, out=None): try: matrix = ivy.cholesky(input, upper=upper, out=out) info = ivy.zeros(input.shape[:-2], dtype=ivy.int32) return matrix, info except RuntimeError as e: if check_errors: raise RuntimeError(e) from e else: matrix = input * math.nan info = ivy.ones(input.shape[:-2], dtype=ivy.int32) return matrix, info @to_ivy_arrays_and_back @with_supported_dtypes({"2.2 and below": ("float32", "float64", "complex")}, "torch") def cond(input, p=None, *, out=None): return ivy.cond(input, p=p, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def cross(input, other, *, dim=None, out=None): return torch_frontend.miscellaneous_ops.cross(input, other, dim=dim, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def det(A, *, out=None): return ivy.det(A, out=out) @to_ivy_arrays_and_back def diagonal(A, *, offset=0, dim1=-2, dim2=-1): return torch_frontend.diagonal(A, offset=offset, dim1=dim1, dim2=dim2) @to_ivy_arrays_and_back def divide(input, other, *, rounding_mode=None, out=None): return ivy.divide(input, other, out=out) @to_ivy_arrays_and_back @with_unsupported_dtypes({"2.2 and below": ("bfloat16", "float16")}, "torch") def eig(input, *, out=None): return ivy.eig(input, out=out) @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64", "complex128")}, "torch", ) def eigh(A, UPLO="L", *, out=None): return ivy.eigh(A, UPLO=UPLO, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def eigvals(input, *, out=None): ret = ivy.eigvals(input) if ivy.exists(out): return ivy.inplace_update(out, ret) return ret @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def eigvalsh(input, UPLO="L", *, out=None): ret = ivy.eigvalsh(input, UPLO=UPLO, out=out) if "complex64" in ivy.as_ivy_dtype(ret.dtype): ret = ivy.astype(ret, ivy.float32) elif "complex128" in ivy.as_ivy_dtype(ret.dtype): ret = ivy.astype(ret, ivy.float64) return ret @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def inv(A, *, out=None): return ivy.inv(A, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def inv_ex(A, *, check_errors=False, out=None): if ivy.any(ivy.det(A) == 0): if check_errors: raise RuntimeError("Singular Matrix") else: inv = A * math.nan # TODO: info should return an array containing the diagonal element of the # LU decomposition of the input matrix that is exactly zero info = ivy.ones(A.shape[:-2], dtype=ivy.int32) else: inv = ivy.inv(A, out=out) info = ivy.zeros(A.shape[:-2], dtype=ivy.int32) return inv, info @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def lu_factor(A, *, pivot=True, out=None): return ivy.lu_factor(A, pivot=pivot, out=out) @to_ivy_arrays_and_back def lu_factor_ex(A, *, pivot=True, check_errors=False, out=None): try: LU = ivy.lu_factor(A, pivot=pivot, out=out) info = ivy.zeros(A.shape[:-2], dtype=ivy.int32) return LU, info except RuntimeError as e: if check_errors: raise RuntimeError(e) from e else: matrix = A * math.nan info = ivy.ones(A.shape[:-2], dtype=ivy.int32) return matrix, info def lu_solve(LU, pivots, B, *, left=True, adjoint=False, out=None): return ivy.lu_solve(LU, pivots, B, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def matmul(input, other, *, out=None): return ivy.matmul(input, other, out=out) @to_ivy_arrays_and_back @with_supported_dtypes({"2.2 and below": ("float32", "float64", "complex")}, "torch") def matrix_exp(A): return ivy.matrix_exp(A) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def matrix_norm(input, ord="fro", dim=(-2, -1), keepdim=False, *, dtype=None, out=None): if dtype is not None: input = ivy.astype(input, dtype) return ivy.matrix_norm(input, ord=ord, axis=dim, keepdims=keepdim, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def matrix_power(A, n, *, out=None): return ivy.matrix_power(A, n, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def matrix_rank(A, *, atol=None, rtol=None, hermitian=False, out=None): return ivy.matrix_rank(A, atol=atol, rtol=rtol, hermitian=hermitian, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def multi_dot(tensors, *, out=None): return ivy.multi_dot(tensors, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex64", "complex128")}, "torch" ) def norm(input, ord=None, dim=None, keepdim=False, *, dtype=None, out=None): if dim is None and (ord is not None): if input.ndim == 1: ret = ivy.vector_norm(input, axis=dim, keepdims=keepdim, ord=ord) else: ret = ivy.matrix_norm(input, keepdims=keepdim, ord=ord) elif dim is None and ord is None: input = ivy.flatten(input) ret = ivy.vector_norm(input, axis=0, keepdims=keepdim, ord=2) if isinstance(dim, int): ret = ivy.vector_norm(input, axis=dim, keepdims=keepdim, ord=ord) elif isinstance(dim, tuple): ret = ivy.matrix_norm(input, axis=dim, keepdims=keepdim, ord=ord) return ret @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def pinv(input, *, atol=None, rtol=None, hermitian=False, out=None): # TODO: add handling for hermitian if atol is None: return ivy.pinv(input, rtol=rtol, out=out) else: sigma = ivy.svdvals(input)[0] if rtol is None: rtol = atol / sigma else: if atol > rtol * sigma: rtol = atol / sigma return ivy.pinv(input, rtol=rtol, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def qr(A, mode="reduced", *, out=None): if mode == "reduced": ret = ivy.qr(A, mode="reduced") elif mode == "r": Q, R = ivy.qr(A, mode="r") Q = [] ret = Q, R elif mode == "complete": ret = ivy.qr(A, mode="complete") if ivy.exists(out): return ivy.inplace_update(out, ret) return ret @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def slogdet(A, *, out=None): sign, logabsdet = ivy.slogdet(A) if "complex64" in ivy.as_ivy_dtype(logabsdet.dtype): logabsdet = ivy.astype(logabsdet, ivy.float32) if "complex128" in ivy.as_ivy_dtype(logabsdet.dtype): logabsdet = ivy.astype(logabsdet, ivy.float64) ret = namedtuple("slogdet", ["sign", "logabsdet"])(sign, logabsdet) if ivy.exists(out): return ivy.inplace_update(out, ret, keep_input_dtype=True) return ret @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def solve(A, B, *, left=True, out=None): if left: return ivy.solve(A, B, out=out) A_t = ivy.linalg.matrix_transpose(A) B_t = ivy.linalg.matrix_transpose(B if B.ndim > 1 else ivy.reshape(B, (-1, 1))) X_t = ivy.solve(A_t, B_t) return ivy.linalg.matrix_transpose(X_t, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def solve_ex(A, B, *, left=True, check_errors=False, out=None): try: if left: result = ivy.solve(A, B, out=out) else: A_t = ivy.linalg.matrix_transpose(A) B_t = ivy.linalg.matrix_transpose( B if B.ndim > 1 else ivy.reshape(B, (-1, 1)) ) X_t = ivy.solve(A_t, B_t) result = ivy.linalg.matrix_transpose(X_t, out=out) info = ivy.zeros(A.shape[:-2], dtype=ivy.int32) return result, info except RuntimeError as e: if check_errors: raise RuntimeError(e) from e else: result = A * math.nan info = ivy.ones(A.shape[:-2], dtype=ivy.int32) return result, info @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def svd(A, /, *, full_matrices=True, driver=None, out=None): # TODO: add handling for driver and out return ivy.svd(A, compute_uv=True, full_matrices=full_matrices) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def svdvals(A, *, driver=None, out=None): if driver in ["gesvd", "gesvdj", "gesvda", None]: return ivy.svdvals(A, driver=driver, out=out) else: raise ValueError("Unsupported SVD driver") @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def tensorinv(input, ind=2, *, out=None): not_invertible = "Reshaped tensor is not invertible" prod_cond = "Tensor shape must satisfy prod(A.shape[:ind]) == prod(A.shape[ind:])" positive_ind_cond = "Expected a strictly positive integer for 'ind'" input_shape = ivy.shape(input) assert ind > 0, f"{positive_ind_cond}" shape_ind_end = input_shape[:ind] shape_ind_start = input_shape[ind:] prod_ind_end = 1 prod_ind_start = 1 for i in shape_ind_start: prod_ind_start *= i for j in shape_ind_end: prod_ind_end *= j assert prod_ind_end == prod_ind_start, f"{prod_cond}." inverse_shape = shape_ind_start + shape_ind_end input = ivy.reshape(input, shape=(prod_ind_end, -1)) inverse_shape_tuple = (*inverse_shape,) assert inv_ex(input, check_errors=True), f"{not_invertible}." inverse_tensor = ivy.inv(input) return ivy.reshape(inverse_tensor, shape=inverse_shape_tuple, out=out) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def tensorsolve(A, B, dims=None, *, out=None): return ivy.tensorsolve(A, B, axes=dims, out=out) @to_ivy_arrays_and_back @with_supported_dtypes({"2.2 and below": ("integer", "float", "complex")}, "torch") def vander(x, N=None): if len(x.shape) < 1: raise RuntimeError("Input dim must be greater than or equal to 1.") # pytorch always return int64 for integers if "int" in x.dtype: x = ivy.astype(x, ivy.int64) if len(x.shape) == 1: # torch always returns the powers in ascending order return ivy.vander(x, N=N, increasing=True) # support multi-dimensional array original_shape = x.shape if N is None: N = x.shape[-1] # store the vander output x = ivy.reshape(x, (-1, x.shape[-1])) output = [] for i in range(x.shape[0]): output.append(ivy.vander(x[i], N=N, increasing=True)) output = ivy.stack(output) output = ivy.reshape(output, (*original_shape, N)) output = ivy.astype(output, x.dtype) return output @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def vecdot(x, y, *, dim=-1, out=None): if "complex" in ivy.as_ivy_dtype(x.dtype): x = ivy.conj(x) return ivy.sum(ivy.multiply(x, y), axis=dim) @to_ivy_arrays_and_back @with_supported_dtypes( {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" ) def vector_norm(input, ord=2, dim=None, keepdim=False, *, dtype=None, out=None): return ivy.vector_norm( input, axis=dim, keepdims=keepdim, ord=ord, out=out, dtype=dtype )
ivy/ivy/functional/frontends/torch/linalg.py/0
{ "file_path": "ivy/ivy/functional/frontends/torch/linalg.py", "repo_id": "ivy", "token_count": 6142 }
44
from . import module from .module import Module
ivy/ivy/functional/frontends/torch/nn/modules/__init__.py/0
{ "file_path": "ivy/ivy/functional/frontends/torch/nn/modules/__init__.py", "repo_id": "ivy", "token_count": 11 }
45
from . import gbm from .gbm import *
ivy/ivy/functional/frontends/xgboost/gbm/__init__.py/0
{ "file_path": "ivy/ivy/functional/frontends/xgboost/gbm/__init__.py", "repo_id": "ivy", "token_count": 13 }
46
# global from numbers import Number from typing import Optional, Union, Literal # local import ivy from ivy.func_wrapper import ( handle_array_function, handle_out_argument, to_native_arrays_and_back, handle_nestable, handle_array_like_without_promotion, inputs_to_ivy_arrays, handle_device, handle_complex_input, handle_backend_invalid, ) from ivy.utils.exceptions import handle_exceptions # Array API Standard # # -------------------# @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def abs( x: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: # noqa """Calculate the absolute value for each element ``x_i`` of the input array ``x`` (i.e., the element-wise result has the same magnitude as the respective element in ``x`` but has positive sign). .. note:: For signed integer data types, the absolute value of the minimum representable integer is implementation-dependent. **Special Cases** For real-valued floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``-0``, the result is ``+0``. - If ``x_i`` is ``-infinity``, the result is ``+infinity``. For complex floating-point operands, let ``a = real(x_i)`` and ``b = imag(x_i)``. and - If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is any value (including ``NaN``), the result is ``+infinity``. - If ``a`` is any value (including ``NaN``) and ``b`` is ``+infinity``, the result is ``+infinity``. - If ``a`` is either ``+0`` or ``-0``, the result is ``abs(b)``. - If ``b`` is ``+0`` or ``-0``, the result is ``abs(a)``. - If ``a`` is ``NaN`` and ``b`` is a finite number, the result is ``NaN``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN``. - If ``a`` is ``Na``N and ``b`` is ``NaN``, the result is ``NaN``. Parameters ---------- x input array. Should have a numeric data type out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the absolute value of each element in ``x``. The returned array must have the same data type as ``x``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.abs.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([-1,0,-6]) >>> y = ivy.abs(x) >>> print(y) ivy.array([1, 0, 6]) >>> x = ivy.array([3.7, -7.7, 0, -2, -0]) >>> y = ivy.abs(x) >>> print(y) ivy.array([ 3.7, 7.7, 0., 2., 0.]) >>> x = ivy.array([[1.1, 2.2, 3.3], [-4.4, -5.5, -6.6]]) >>> ivy.abs(x, out=x) >>> print(x) ivy.array([[ 1.1, 2.2, 3.3], [4.4, 5.5, 6.6]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]), b=ivy.array([4.5, -5.3, -0, -2.3])) # noqa >>> y = ivy.abs(x) >>> print(y) { a: ivy.array([0., 2.6, 3.5]), b: ivy.array([4.5, 5.3, 0., 2.3]) } """ # noqa: E501 return ivy.current_backend(x).abs(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def acos( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation of the principal value of the inverse cosine, having domain [-1, +1] and codomain [+0, +π], for each element x_i of the input array x. Each element-wise result is expressed in radians. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is greater than ``1``, the result is ``NaN``. - If ``x_i`` is less than ``-1``, the result is ``NaN``. - If ``x_i`` is ``1``, the result is ``+0``. For complex floating-point operands, let a = real(x_i) and b = imag(x_i), and - If ``a`` is either ``+0`` or ``-0`` and ``b`` is ``+0``, the result is ``π/2 - 0j``. - if ``a`` is either ``+0`` or ``-0`` and ``b`` is ``NaN``, the result is ``π/2 + NaN j``. - If ``a`` is a finite number and ``b`` is ``+infinity``, the result is ``π/2 - infinity j``. - If ``a`` is a nonzero finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``-infinity`` and ``b`` is a positive (i.e., greater than 0) finite number, the result is ``π - infinity j``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than 0) finite number, the result is ``+0 - infinity j``. - If ``a`` is ``-infinity`` and ``b`` is ``+infinity``, the result is ``3π/4 - infinity j``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``π/4 - infinity j``. - If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is ``NaN``, the result is ``NaN ± infinity j`` (sign of the imaginary component is unspecified). - If ``a`` is ``NaN`` and ``b`` is a finite number, the result is ``NaN + NaN j``. - if ``a`` is ``NaN`` and ``b`` is ``+infinity``, the result is ``NaN - infinity j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse cosine of each element in x. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.acos.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0., 1., -1.]) >>> y = ivy.acos(x) >>> print(y) ivy.array([1.57, 0. , 3.14]) >>> x = ivy.array([1., 0., -1.]) >>> y = ivy.zeros(3) >>> ivy.acos(x, out=y) >>> print(y) ivy.array([0. , 1.57, 3.14]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -1])) >>> y = ivy.acos(x) >>> print(y) { a: ivy.array([1.57, 3.14, 0.]), b: ivy.array([0., 1.57, 3.14]) } """ return ivy.current_backend(x).acos(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def acosh( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the inverse hyperbolic cosine, having domain ``[+1, +infinity]`` and codomain ``[+0, +infinity]``, for each element ``x_i`` of the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``1``, the result is ``NaN``. - If ``x_i`` is ``1``, the result is ``+0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. For complex floating-point operands, let a = real(x_i) and b = imag(x_i), and - If ``a`` is either ``+0`` or ``-0`` and ``b`` is ``+0``, the result is ``+0 + πj/2``. - If ``a`` is a finite number and ``b`` is ``+infinity``, the result is ``+infinity + πj/2``. - If ``a`` is a nonzero finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+0`` and ``b`` is ``NaN``, the result is ``NaN ± πj/2`` (sign of the imaginary component is unspecified). - If ``a`` is ``-infinity`` and ``b`` is a positive (i.e., greater than 0) finite number, the result is ``+infinity + πj``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than 0) finite number, the result is ``+infinity + 0j``. - If ``a`` is ``-infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + 3πj/4``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + πj/4``. - If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is ``NaN``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is a finite number, the result is ``NaN + NaN j``. - if ``a`` is ``NaN`` and ``b`` is ``+infinity``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. Parameters ---------- x input array whose elements each represent the area of a hyperbolic sector. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse hyperbolic cosine of each element in x. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.acosh.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1, 2.5, 10]) >>> y = ivy.acosh(x) >>> print(y) ivy.array([0. , 1.57, 2.99]) >>> x = ivy.array([1., 2., 6.]) >>> y = ivy.zeros(3) >>> ivy.acosh(x, out=y) >>> print(y) ivy.array([0. , 1.32, 2.48]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([1., 2., 10.]), b=ivy.array([1., 10., 6.])) >>> y = ivy.acosh(x) >>> print(y) { a: ivy.array([0., 1.32, 2.99]), b: ivy.array([0., 2.99, 2.48]) } """ return ivy.current_backend(x).acosh(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def add( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, alpha: Optional[Union[int, float]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate the sum for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. **Special cases** For floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``-infinity``, the result is ``NaN``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``+infinity``, the result is ``NaN``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``-infinity``, the result is ``-infinity``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a finite number, the result is ``+infinity``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a finite number, the result is ``-infinity``. - If ``x1_i`` is a finite number and ``x2_i`` is ``+infinity``, the result is ``+infinity``. - If ``x1_i`` is a finite number and ``x2_i`` is ``-infinity``, the result is ``-infinity``. - If ``x1_i`` is ``-0`` and ``x2_i`` is ``-0``, the result is ``-0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is ``+0``, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is ``-0``, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is ``+0``, the result is ``+0``. - If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is a nonzero finite number, the result is ``x2_i``. - If ``x1_i`` is a nonzero finite number and ``x2_i`` is either ``+0`` or ``-0``, the result is ``x1_i``. - If ``x1_i`` is a nonzero finite number and ``x2_i`` is ``-x1_i``, the result is ``+0``. - In the remaining cases, when neither ``infinity``, ``+0``, ``-0``, nor a ``NaN`` is involved, and the operands have the same mathematical sign or have different magnitudes, the sum must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported round mode. If the magnitude is too large to represent, the operation overflows and the result is an `infinity` of appropriate mathematical sign. .. note:: Floating-point addition is a commutative operation, but not always associative. For complex floating-point operands, addition is defined according to the following table. For real components ``a`` and ``c``, and imaginary components ``b`` and ``d``, +-------------------+-------------------+-------------------+-------------------+ | | c | dj | c+dj | +===================+===================+===================+===================+ | **a** | a + c | a + dj | (a+c) + dj | +-------------------+-------------------+-------------------+-------------------+ | **bj** | c + bj | (b+d)j | c + (b+d)j | +-------------------+-------------------+-------------------+-------------------+ | **a+bj** | (a+c) + bj | a + (b+d)j | (a+c) + (b+d)j | +-------------------+-------------------+-------------------+-------------------+ For complex floating-point operands, the real valued floating-point special cases must independently apply to the real and imaginary component operation involving real numbers as described in the above table. For example, let ``a = real(x1_i)``, ``c = real(x2_i)``, ``d = imag(x2_i)``, and - if ``a`` is ``-0``, the real component of the result is ``-0``. - Similarly, if ``b`` is ``+0`` and ``d`` is ``-0``, the imaginary component of the result is ``+0``. Hence, if ``z1 = a + bj = -0 + 0j`` and ``z2 = c + dj = -0 - 0j``, then the result of ``z1 + z2`` is ``-0 + 0j``. Parameters ---------- x1 first input array. Should have a numeric data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type. alpha optional scalar multiplier for ``x2``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise sums. The returned array must have a data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.add.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.array([4, 5, 6]) >>> z = ivy.add(x, y) >>> print(z) ivy.array([5, 7, 9]) >>> x = ivy.array([1, 2, 3]) >>> y = ivy.array([4, 5, 6]) >>> z = ivy.add(x, y, alpha=2) >>> print(z) ivy.array([9, 12, 15]) >>> x = ivy.array([[1.1, 2.3, -3.6]]) >>> y = ivy.array([[4.8], [5.2], [6.1]]) >>> z = ivy.zeros((3, 3)) >>> ivy.add(x, y, out=z) >>> print(z) ivy.array([[5.9, 7.1, 1.2], [6.3, 7.5, 1.6], [7.2, 8.4, 2.5]]) >>> x = ivy.array([[[1.1], [3.2], [-6.3]]]) >>> y = ivy.array([[8.4], [2.5], [1.6]]) >>> ivy.add(x, y, out=x) >>> print(x) ivy.array([[[9.5], [5.7], [-4.7]]]) """ return ivy.current_backend(x1, x2).add(x1, x2, alpha=alpha, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def asin( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation of the principal value of the inverse sine, having domain ``[-1, +1]`` and codomain ``[-π/2, +π/2]`` for each element ``x_i`` of the input array ``x``. Each element- wise result is expressed in radians. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is greater than ``1``, the result is ``NaN``. - If ``x_i`` is less than ``-1``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. For complex floating-point operands, special cases must be handled as if the operation is implemented as ``-1j * asinh(x * 1j)``. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.asin.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([-2.4, -0, +0, 3.2, float('nan')]) >>> y = ivy.asin(x) >>> print(y) ivy.array([nan, 0., 0., nan, nan]) >>> x = ivy.array([-1, -0.5, 0.6, 1]) >>> y = ivy.zeros(4) >>> ivy.asin(x, out=y) >>> print(y) ivy.array([-1.57,-0.524,0.644,1.57]) >>> x = ivy.array([[0.1, 0.2, 0.3],[-0.4, -0.5, -0.6]]) >>> ivy.asin(x, out=x) >>> print(x) ivy.array([[0.1,0.201,0.305],[-0.412,-0.524,-0.644]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 0.1, 0.2]), ... b=ivy.array([0.3, 0.4, 0.5])) >>> y = ivy.asin(x) >>> print(y) {a:ivy.array([0.,0.1,0.201]),b:ivy.array([0.305,0.412,0.524])} """ return ivy.current_backend(x).asin(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def asinh( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the inverse hyperbolic sine, having domain ``[-infinity, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` in the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-infinity``. For complex floating-point operands, let ``a = real(x_i)`` and ``b = imag(x_i)``, and - If ``a`` is ``+0`` and ``b`` is ``+0``, the result is ``+0 + 0j``. - If ``a`` is a positive (i.e., greater than ``0``) finite number and ``b`` is ``+infinity``, the result is ``+infinity + πj/2``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity + 0j``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + πj/4``. - If ``a`` is ``NaN`` and ``b`` is ``+0``, the result is ``NaN + 0j``. - If ``a`` is ``NaN`` and ``b`` is nonzero finite number, the result is ``NaN + NaNj``. - If ``a`` is ``NaN`` and ``b`` is ``+infinity``, the result is ``±infinity ± NaNj``, (sign of real component is unspecified). - If ``a`` is ``NaN`` and ``b`` is ``NaN``, ``NaN + NaNj``. Parameters ---------- x input array whose elements each represent the area of a hyperbolic sector. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse hyperbolic sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.asinh.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([-3.5, -0, +0, 1.3, float('nan')]) >>> y = ivy.asinh(x) >>> print(y) ivy.array([-1.97, 0., 0., 1.08, nan]) >>> x = ivy.array([-2, -0.75, 0.9, 1]) >>> y = ivy.zeros(4) >>> ivy.asinh(x, out=y) >>> print(y) ivy.array([-1.44, -0.693, 0.809, 0.881]) >>> x = ivy.array([[0.2, 0.4, 0.6],[-0.8, -1, -2]]) >>> ivy.asinh(x, out=x) >>> print(x) ivy.array([[ 0.199, 0.39, 0.569], [-0.733, -0.881, -1.44]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1, 2]), ... b=ivy.array([4.2, -5.3, -0, -2.3])) >>> y = ivy.asinh(x) >>> print(y) { a: ivy.array([0., 0.881, 1.44]), b: ivy.array([2.14, -2.37, 0., -1.57]) } """ return ivy.current_backend(x).asinh(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def atan( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation of the principal value of the inverse tangent, having domain ``[-infinity, +infinity]`` and codomain ``[-π/2, +π/2]``, for each element ``x_i`` of the input array ``x``. Each element-wise result is expressed in radians. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+infinity``, the result is an implementation-dependent approximation to ``+π/2``. - If ``x_i`` is ``-infinity``, the result is an implementation-dependent approximation to ``-π/2``. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse tangent of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.atan.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0., 1., 2.]) >>> y = ivy.atan(x) >>> print(y) ivy.array([0. , 0.785, 1.11 ]) >>> x = ivy.array([4., 0., -6.]) >>> y = ivy.zeros(3) >>> ivy.atan(x, out=y) >>> print(y) ivy.array([ 1.33, 0. , -1.41]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -6])) >>> y = ivy.atan(x) >>> print(y) { a: ivy.array([0., -0.785, 0.785]), b: ivy.array([0.785, 0., -1.41]) } """ return ivy.current_backend(x).atan(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def atan2( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation of the inverse tangent of the quotient ``x1/x2``, having domain ``[-infinity, +infinity] x. [-infinity, +infinity]`` (where the ``x`` notation denotes the set of ordered pairs of elements ``(x1_i, x2_i)``) and codomain ``[-π, +π]``, for each pair of elements ``(x1_i, x2_i)`` of the input arrays ``x1`` and ``x2``, respectively. Each element-wise result is expressed in radians. The mathematical signs of ``x1_i and x2_i`` determine the quadrant of each element-wise result. The quadrant (i.e., branch) is chosen such that each element-wise result is the signed angle in radians between the ray ending at the origin and passing through the point ``(1,0)`` and the ray ending at the origin and passing through the point ``(x2_i, x1_i)``. **Special cases** For floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is an approximation to ``+π/2``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is an approximation to ``+π/2``. - If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is ``+0``, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is ``-0``, the result is an approximation to ``+π``. - If ``x1_i`` is ``+0`` and ``x2_i`` is less than 0, the result is an approximation to ``+π``. - If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``-0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is ``+0``, the result is ``-0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is ``-0``, the result is an approximation to ``-π``. - If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is an approximation to ``-π``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is an approximation to ``-π/2``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is an approximation to ``-π/2``. - If ``x1_i`` is greater than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``+infinity``, the result is ``+0``. - If ``x1_i`` is greater than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``-infinity``, the result is an approximation to ``+π``. - If ``x1_i`` is less than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``+infinity``, the result is ``-0``. - If ``x1_i`` is less than ``0``, ``x1_i`` is a finite number, and ``x2_i`` is ``-infinity``, the result is an approximation to ``-π``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is finite, the result is an approximation to ``+π/2``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is finite, the result is an approximation to ``-π/2``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``+infinity``, the result is an approximation to ``+π/4``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``-infinity``, the result is an approximation to ``+3π/4``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``+infinity``, the result is an approximation to ``-π/4``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``-infinity``, the result is an approximation to ``-3π/4``. Parameters ---------- x1 input array corresponding to the y-coordinates. Should have a floating-point data type. x2 input array corresponding to the x-coordinates. Must be compatible with ``x1``. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse tangent of the quotient ``x1/x2``. The returned array must have a floating-point data type. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.atan2.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1.0, -1.0, -2.0]) >>> y = ivy.array([2.0, 0.0, 3.0]) >>> z = ivy.atan2(x, y) >>> print(z) ivy.array([ 0.464, -1.57 , -0.588]) >>> x = ivy.array([1.0, 2.0]) >>> y = ivy.array([-2.0, 3.0]) >>> z = ivy.zeros(2) >>> ivy.atan2(x, y, out=z) >>> print(z) ivy.array([2.68 , 0.588]) >>> nan = float("nan") >>> x = ivy.array([nan, 1.0, 1.0, -1.0, -1.0]) >>> y = ivy.array([1.0, +0, -0, +0, -0]) >>> z = ivy.atan2(x, y) >>> print(z) ivy.array([ nan, 1.57, 1.57, -1.57, -1.57]) >>> x = ivy.array([+0, +0, +0, +0, -0, -0, -0, -0]) >>> y = ivy.array([1.0, +0, -0, -1.0, 1.0, +0, -0, -1.0]) >>> z = ivy.atan2(x, y) >>> print(z) ivy.array([0. , 0. , 0. , 3.14, 0. , 0. , 0. , 3.14]) >>> inf = float("infinity") >>> x = ivy.array([inf, -inf, inf, inf, -inf, -inf]) >>> y = ivy.array([1.0, 1.0, inf, -inf, inf, -inf]) >>> z = ivy.atan2(x, y) >>> print(z) ivy.array([ 1.57 , -1.57 , 0.785, 2.36 , -0.785, -2.36 ]) >>> x = ivy.array([2.5, -1.75, 3.2, 0, -1.0]) >>> y = ivy.array([-3.5, 2, 0, 0, 5]) >>> z = ivy.atan2(x, y) >>> print(z) ivy.array([ 2.52 , -0.719, 1.57 , 0. , -0.197]) >>> x = ivy.array([[1.1, 2.2, 3.3], [-4.4, -5.5, -6.6]]) >>> y = ivy.atan2(x, x) >>> print(y) ivy.array([[ 0.785, 0.785, 0.785], [-2.36 , -2.36 , -2.36 ]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]), ... b=ivy.array([4.5, -5.3, -0])) >>> y = ivy.array([3.0, 2.0, 1.0]) >>> z = ivy.atan2(x, y) { a: ivy.array([0., 0.915, -1.29]), b: ivy.array([0.983, -1.21, 0.]) } >>> x = ivy.Container(a=ivy.array([0., 2.6, -3.5]), ... b=ivy.array([4.5, -5.3, -0, -2.3])) >>> y = ivy.Container(a=ivy.array([-2.5, 1.75, 3.5]), ... b=ivy.array([2.45, 6.35, 0, 1.5])) >>> z = ivy.atan2(x, y) >>> print(z) { a: ivy.array([3.14, 0.978, -0.785]), b: ivy.array([1.07, -0.696, 0., -0.993]) } """ return ivy.current_backend(x1).atan2(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def atanh( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return a new array with the inverse hyperbolic tangent of the elements of ``x``. Parameters ---------- x input array whose elements each represent the area of a hyperbolic sector. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the inverse hyperbolic tangent of each element in ``x``. The returned array must have a floating-point data type determined by Type Promotion Rules. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.atanh.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments **Special cases** For real-valued floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``-1``, the result is ``NaN``. - If ``x_i`` is greater than ``1``, the result is ``NaN``. - If ``x_i`` is ``-1``, the result is ``-infinity``. - If ``x_i`` is ``+1``, the result is ``+infinity``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is ``+0`` and ``b`` is ``+0``, the result is ``+0 + 0j``. - If ``a`` is ``+0`` and ``b`` is ``NaN``, the result is ``+0 + NaN j``. - If ``a`` is ``1`` and ``b`` is ``+0``, the result is ``+infinity + 0j``. - If ``a`` is a positive (i.e., greater than ``0``) finite number and ``b`` is ``+infinity``, the result is ``+0 + πj/2``. - If ``a`` is a nonzero finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+0 + πj/2``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``+0 + πj/2``. - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``+0 + NaN j``. - If ``a`` is ``NaN`` and ``b`` is a finite number, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``+infinity``, the result is ``±0 + πj/2`` (sign of the real component is unspecified). - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0, -0.5]) >>> y = ivy.atanh(x) >>> print(y) ivy.array([ 0. , -0.549]) >>> x = ivy.array([0.5, -0.5, 0.]) >>> y = ivy.zeros(3) >>> ivy.atanh(x, out=y) >>> print(y) ivy.array([ 0.549, -0.549, 0. ]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., -0.5]), b=ivy.array([ 0., 0.5])) >>> y = ivy.atanh(x) >>> print(y) { a: ivy.array([0., -0.549]), b: ivy.array([0., 0.549]) } """ return ivy.current_backend(x).atanh(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def bitwise_and( x1: Union[int, bool, ivy.Array, ivy.NativeArray], x2: Union[int, bool, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the bitwise AND of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. Parameters ---------- x1 first input array. Should have an integer or boolean data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.bitwise_and.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([2, 3, 7]) >>> y = ivy.array([7, 1, 15]) >>> z = ivy.bitwise_and(x, y) >>> print(z) ivy.array([2, 1, 7]) >>> x = ivy.array([[True], [False]]) >>> y = ivy.array([[True], [True]]) >>> ivy.bitwise_and(x, y, out=x) >>> print(x) ivy.array([[ True],[False]]) >>> x = ivy.array([1]) >>> y = ivy.array([3]) >>> ivy.bitwise_and(x, y, out=y) >>> print(y) ivy.array([1]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6])) >>> y = ivy.Container(a=ivy.array([7, 8, 9]), b=ivy.array([10, 11, 11])) >>> z = ivy.bitwise_and(x, y) >>> print(z) { a: ivy.array([1, 0, 1]), b: ivy.array([0, 1, 2]) } With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x = ivy.array([True, True]) >>> y = ivy.Container(a=ivy.array([True, False]), b=ivy.array([False, True])) >>> z = ivy.bitwise_and(x, y) >>> print(z) { a: ivy.array([True, False]), b: ivy.array([False, True]) } """ return ivy.current_backend(x1, x2).bitwise_and(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def bitwise_invert( x: Union[int, bool, ivy.Array, ivy.NativeArray, ivy.Container], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Inverts (flips) each bit for each element ``x_i`` of the input array ``x``. Parameters ---------- x input array. Should have an integer or boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have the same data type as x. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.bitwise_invert.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1, 6, 9]) >>> y = ivy.bitwise_invert(x) >>> print(y) ivy.array([-2, -7, -10]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([False, True, False]), ... b=ivy.array([True, True, False])) >>> y = ivy.bitwise_invert(x) >>> print(y) { a: ivy.array([True, False, True]), b: ivy.array([False, False, True]) } With :class:`int` input: >>> x = -8 >>> y = ivy.bitwise_invert(x) >>> print(y) ivy.array(7) With :class:`bool` input: >>> x = False >>> y = ivy.bitwise_invert(x) >>> print(y) True """ return ivy.current_backend(x).bitwise_invert(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def bitwise_left_shift( x1: Union[int, ivy.Array, ivy.NativeArray], x2: Union[int, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Shifts the bits of each element ``x1_i`` of the input array ``x1`` to the left by appending ``x2_i`` (i.e., the respective element in the input array ``x2``) zeros to the right of ``x1_i``. Parameters ---------- x1 first input array. Should have an integer data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer data type. Each element must be greater than or equal to ``0``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.bitwise_left_shift.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments """ return ivy.current_backend(x1, x2).bitwise_left_shift(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def bitwise_or( x1: Union[int, bool, ivy.Array, ivy.NativeArray], x2: Union[int, bool, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the bitwise OR of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. Parameters ---------- x1 first input array. Should have an integer or boolean data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.array([4, 5, 6]) >>> z = ivy.bitwise_or(x, y) >>> print(z) ivy.array([5, 7, 7]) >>> x = ivy.array([[[1], [2], [3], [4]]]) >>> y = ivy.array([[[4], [5], [6], [7]]]) >>> ivy.bitwise_or(x, y, out=x) >>> print(x) ivy.array([[[5], [7], [7], [7]]]) >>> x = ivy.array([[[1], [2], [3], [4]]]) >>> y = ivy.array([4, 5, 6, 7]) >>> z = ivy.bitwise_or(x, y) >>> print(z) ivy.array([[[5, 5, 7, 7], [6, 7, 6, 7], [7, 7, 7, 7], [4, 5, 6, 7]]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([1, 2, 3]),b=ivy.array([2, 3, 4])) >>> y = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([5, 6, 7])) >>> z = ivy.bitwise_or(x, y) >>> print(z) { a: ivy.array([5, 7, 7]), b: ivy.array([7, 7, 7]) } With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([5, 6, 7])) >>> z = ivy.bitwise_or(x, y) >>> print(z) { a: ivy.array([5,7,7]), b: ivy.array([5,6,7]) } """ return ivy.current_backend(x1, x2).bitwise_or(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def bitwise_right_shift( x1: Union[int, ivy.Array, ivy.NativeArray], x2: Union[int, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Shifts the bits of each element ``x1_i`` of the input array ``x1`` to the right according to the respective element ``x2_i`` of the input array ``x2``. .. note:: This operation must be an arithmetic shift (i.e., sign-propagating) and thus equivalent to floor division by a power of two. Parameters ---------- x1 first input array. Should have an integer data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer data type. Each element must be greater than or equal to ``0``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.bitwise_right_shift.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> a = ivy.array([2, 9, 16, 31]) >>> b = ivy.array([0, 1, 2, 3]) >>> y = ivy.bitwise_right_shift(a, b) >>> print(y) ivy.array([2, 4, 4, 3]) >>> a = ivy.array([[32, 40, 55], [16, 33, 170]]) >>> b = ivy.array([5, 2, 1]) >>> y = ivy.zeros((2, 3)) >>> ivy.bitwise_right_shift(a, b, out=y) >>> print(y) ivy.array([[ 1., 10., 27.], [ 0., 8., 85.]]) >>> a = ivy.array([[10, 64],[43, 87],[5, 37]]) >>> b = ivy.array([1, 3]) >>> ivy.bitwise_right_shift(a, b, out=a) >>> print(a) ivy.array([[ 5, 8], [21, 10], [ 2, 4]]) With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> a = ivy.array([[10, 64],[43, 87],[5, 37]]) >>> b = ivy.native_array([1, 3]) >>> y = ivy.bitwise_right_shift(a, b) >>> print(y) ivy.array([[ 5, 8],[21, 10],[ 2, 4]]) With one :class:`ivy.Container` input: >>> a = ivy.Container(a = ivy.array([100, 200]), ... b = ivy.array([125, 243])) >>> b = ivy.array([3, 6]) >>> y = ivy.bitwise_right_shift(a, b) >>> print(y) { a: ivy.array([12, 3]), b: ivy.array([15, 3]) } With multiple :class:`ivy.Container` inputs: >>> a = ivy.Container(a = ivy.array([10, 25, 42]), ... b = ivy.array([64, 65]), ... c = ivy.array([200, 225, 255])) >>> b = ivy.Container(a = ivy.array([0, 1, 2]), ... b = ivy.array([6]), ... c = ivy.array([4, 5, 6])) >>> y = ivy.bitwise_right_shift(a, b) >>> print(y) { a: ivy.array([10, 12, 10]), b: ivy.array([1, 1]), c: ivy.array([12, 7, 3]) } """ return ivy.current_backend(x1, x2).bitwise_right_shift(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def bitwise_xor( x1: Union[int, bool, ivy.Array, ivy.NativeArray], x2: Union[int, bool, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the bitwise XOR of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. Parameters ---------- x1 first input array. Should have an integer or boolean data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.bitwise_xor.html>`_ in the standard. Both the description and the type hints above assume an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`int` input: >>> x1 = 4 >>> x2 = 5 >>> y = ivy.bitwise_xor(x1, x2) >>> print(y) ivy.array(1) With :class:`bool` input: >>> x1 = True >>> x2 = False >>> y = ivy.bitwise_xor(x1, x2) >>> print(y) ivy.array(True) With :class:`ivy.Array` inputs: >>> x1 = ivy.array([1, 2, 3]) >>> x2 = ivy.array([3, 5, 7]) >>> y = ivy.zeros(3, dtype=ivy.int32) >>> ivy.bitwise_xor(x1, x2, out=y) >>> print(y) ivy.array([2, 7, 4]) >>> x1 = ivy.array([[True], [True]]) >>> x2 = ivy.array([[False], [True]]) >>> ivy.bitwise_xor(x1, x2, out=x2) >>> print(x2) ivy.array([[True], [False]]) With :class:`ivy.Container` input: >>> x1 = ivy.Container(a=ivy.array([1, 2, 3]), b=ivy.array([4, 5, 6])) >>> x2 = ivy.Container(a=ivy.array([7, 8, 9]), b=ivy.array([10, 11, 12])) >>> y = ivy.bitwise_xor(x1, x2) >>> print(y) { a: ivy.array([6, 10, 10]), b: ivy.array([14, 14, 10]) } With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x1 = ivy.array([True, True]) >>> x2 = ivy.Container(a=ivy.array([True, False]), b=ivy.array([False, True])) >>> y = ivy.bitwise_xor(x1, x2) >>> print(y) { a: ivy.array([False, True]), b: ivy.array([True, False]) } """ return ivy.current_backend(x1, x2).bitwise_xor(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def ceil( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Round each element ``x_i`` of the input array ``x`` to the smallest (i.e., closest to ``-infinity``) integer-valued number that is not less than ``x_i``. **Special cases** - If ``x_i`` is already integer-valued, the result is ``x_i``. For floating-point operands, - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-infinity``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``NaN``, the result is ``NaN``. Parameters ---------- x input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.ceil.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0.1, 0, -0.1]) >>> y = ivy.ceil(x) >>> print(y) ivy.array([1., 0., -0.]) >>> x = ivy.array([2.5, -3.5, 0, -3, -0]) >>> y = ivy.ones(5) >>> ivy.ceil(x, out=y) >>> print(y) ivy.array([ 3., -3., 0., -3., 0.]) >>> x = ivy.array([[3.3, 4.4, 5.5], [-6.6, -7.7, -8.8]]) >>> ivy.ceil(x, out=x) >>> print(x) ivy.array([[ 4., 5., 6.], [-6., -7., -8.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([2.5, 0.5, -1.4]), ... b=ivy.array([5.4, -3.2, -0, 5.2])) >>> y = ivy.ceil(x) >>> print(y) { a: ivy.array([3., 1., -1.]), b: ivy.array([6., -3., 0., 6.]) } """ return ivy.current_backend(x).ceil(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def cos( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the cosine, having domain ``(-infinity, +infinity)`` and codomain ``[-1, +1]``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``1``. - If ``x_i`` is ``-0``, the result is ``1``. - If ``x_i`` is ``+infinity``, the result is ``NaN``. - If ``x_i`` is ``-infinity``, the result is ``NaN``. For complex floating-point operands, special cases must be handled as if the operation is implemented as ``cosh(x*1j)``. Parameters ---------- x input array whose elements are each expressed in radians. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the cosine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.cos.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0., 1., 2.]) >>> y = ivy.cos(x) >>> print(y) ivy.array([1., 0.54, -0.416]) >>> x = ivy.array([4., 0., -6.]) >>> y = ivy.zeros(3) >>> ivy.cos(x, out=y) >>> print(y) ivy.array([-0.654, 1., 0.96]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., -1, 1]), b=ivy.array([1., 0., -6])) >>> y = ivy.cos(x) >>> print(y) { a: ivy.array([1., 0.54, 0.54]), b: ivy.array([0.54, 1., 0.96]) } """ return ivy.current_backend(x).cos(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def cosh( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the hyperbolic cosine, having domain ``[-infinity, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` in the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``1``. - If ``x_i`` is ``-0``, the result is ``1``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``+infinity``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and .. note:: For complex floating-point operands, ``cosh(conj(x))`` must equal ``conj(cosh(x))``. - If ``a`` is ``+0`` and ``b`` is ``+0``, the result is ``1 + 0j``. - If ``a`` is ``+0`` and ``b`` is ``+infinity``, the result is ``NaN + 0j`` (sign of the imaginary component is unspecified). - If ``a`` is ``+0`` and ``b`` is ``NaN``, the result is ``NaN + 0j`` (sign of the imaginary component is unspecified). - If ``a`` is a nonzero finite number and ``b`` is ``+infinity``, the result is ``NaN + NaN j``. - If ``a`` is a nonzero finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is ``+0``, the result is ``+infinity + 0j``. - If ``a`` is ``+infinity`` and ``b`` is a nonzero finite number, the result is ``+infinity * cis(b)``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + NaN j``(sign of the real component is unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is either ``+0`` or ``-0``, the result is ``NaN + 0j`` (sign of the imaginary component is unspecified). - If ``a`` is ``NaN`` and ``b`` is a nonzero finite number, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. where ``cis(v)`` is ``cos(v) + sin(v)*1j``. Parameters ---------- x input array whose elements each represent a hyperbolic angle. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the hyperbolic cosine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.cosh.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1., 2., 3., 4.]) >>> y = ivy.cosh(x) >>> print(y) ivy.array([1.54,3.76,10.1,27.3]) >>> x = ivy.array([0.2, -1.7, -5.4, 1.1]) >>> y = ivy.zeros(4) >>> ivy.cosh(x, out=y) ivy.array([[1.67,4.57,13.6,12.3],[40.7,122.,368.,670.]]) >>> x = ivy.array([[1.1, 2.2, 3.3, 3.2], ... [-4.4, -5.5, -6.6, -7.2]]) >>> y = ivy.cosh(x) >>> print(y) ivy.array([[1.67,4.57,13.6,12.3],[40.7,122.,368.,670.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([1., 2., 3.]), b=ivy.array([6., 7., 8.])) >>> y = ivy.cosh(x) >>> print(y) { a:ivy.array([1.54,3.76,10.1]), b:ivy.array([202.,548.,1490.]) } """ return ivy.current_backend(x).cosh(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def divide( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate the division for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. **Special Cases** For real-valued floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. - If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``. - If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``-0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``-0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is ``+0``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is ``+infinity``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is ``-infinity``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is ``-infinity``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is ``+infinity``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``-infinity``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``-infinity``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``+infinity``. - If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``+0``. - If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``-0``. - If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``-0``. - If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``+0``. - If ``x1_i`` and ``x2_i`` have the same mathematical sign and are both nonzero finite numbers, the result has a positive mathematical sign. - If ``x1_i`` and ``x2_i`` have different mathematical signs and are both nonzero finite numbers, the result has a negative mathematical sign. - In the remaining cases, where neither ``-infinity``, ``+0``, ``-0``, nor ``NaN`` is involved, the quotient must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the operation overflows and the result is an ``infinity`` of appropriate mathematical sign. If the magnitude is too small to represent, the operation underflows and the result is a zero of appropriate mathematical sign. For complex floating-point operands, division is defined according to the following table. For real components ``a`` and ``c`` and imaginary components ``b`` and ``d``, +------------+----------------+-----------------+--------------------------+ | | c | dj | c + dj | +============+================+=================+==========================+ | **a** | a / c | -(a/d)j | special rules | +------------+----------------+-----------------+--------------------------+ | **bj** | (b/c)j | b/d | special rules | +------------+----------------+-----------------+--------------------------+ | **a + bj** | (a/c) + (b/c)j | b/d - (a/d)j | special rules | +------------+----------------+-----------------+--------------------------+ In general, for complex floating-point operands, real-valued floating-point special cases must independently apply to the real and imaginary component operations involving real numbers as described in the above table. When ``a``, ``b``, ``c``, or ``d`` are all finite numbers (i.e., a value other than ``NaN``, ``+infinity``, or ``-infinity``), division of complex floating-point operands should be computed as if calculated according to the textbook formula for complex number division .. math:: \frac{a + bj}{c + dj} = \frac{(ac + bd) + (bc - ad)j}{c^2 + d^2} When at least one of ``a``, ``b``, ``c``, or ``d`` is ``NaN``, ``+infinity``, or ``-infinity``, - If ``a``, ``b``, ``c``, and ``d`` are all ``NaN``, the result is ``NaN + NaN j``. - In the remaining cases, the result is implementation dependent. .. note:: For complex floating-point operands, the results of special cases may be implementation dependent depending on how an implementation chooses to model complex numbers and complex infinity (e.g., complex plane versus Riemann sphere). For those implementations following C99 and its one-infinity model, when at least one component is infinite, even if the other component is ``NaN``, the complex value is infinite, and the usual arithmetic rules do not apply to complex-complex division. In the interest of performance, other implementations may want to avoid the complex branching logic necessary to implement the one-infinity model and choose to implement all complex-complex division according to the textbook formula. Accordingly, special case behavior is unlikely to be consistent across implementations. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.divide.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Parameters ---------- x1 dividend input array. Should have a numeric data type. x2 divisor input array. Must be compatible with x1 (see Broadcasting). Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a floating-point data type determined by Type Promotion Rules. Examples -------- With :class:`ivy.Array` inputs: >>> x1 = ivy.array([2., 7., 9.]) >>> x2 = ivy.array([3., 4., 0.6]) >>> y = ivy.divide(x1, x2) >>> print(y) ivy.array([0.667, 1.75, 15.]) With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x1 = ivy.array([5., 6., 9.]) >>> x2 = ivy.native_array([2., 2., 2.]) >>> y = ivy.divide(x1, x2) >>> print(y) ivy.array([2.5, 3., 4.5]) With :class:`ivy.Container` inputs: >>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9])) >>> x2 = ivy.Container(a=ivy.array([1., 2.3, 3]), b=ivy.array([2.4, 3., 2.])) >>> y = ivy.divide(x1, x2) >>> print(y) { a: ivy.array([12., 1.52, 2.1]), b: ivy.array([1.25, 0.333, 0.45]) } With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs: >>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9])) >>> x2 = ivy.array([4.3, 3., 5.]) >>> y = ivy.divide(x1, x2) { a: ivy.array([2.79, 1.17, 1.26]), b: ivy.array([0.698, 0.333, 0.18]) } """ return ivy.current_backend(x1, x2).divide(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def equal( x1: Union[float, ivy.Array, ivy.NativeArray, ivy.Container], x2: Union[float, ivy.Array, ivy.NativeArray, ivy.Container], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the truth value of x1_i == x2_i for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. Parameters ---------- x1 first input array. May have any data type. x2 second input array. Must be compatible with x1 (with Broadcasting). May have any data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of bool. **Special cases** For real-valued floating-point operands, - If ``x1_i`` is ``NaN`` or ``x2_i`` is ``NaN``, the result is ``False``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``+infinity``, the result is ``True``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``-infinity``, the result is ``True``. - If ``x1_i`` is ``-0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``True``. - If ``x1_i`` is ``+0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``True``. - If ``x1_i`` is a finite number, ``x2_i`` is a finite number, and ``x1_i`` equals ``x2_i``, the result is ``True``. - In the remaining cases, the result is ``False``. For complex floating-point operands, let ``a = real(x1_i)``, ``b = imag(x1_i)``, ``c = real(x2_i)``, ``d = imag(x2_i)``, and - If ``a``, ``b``, ``c``, or ``d`` is ``NaN``, the result is ``False``. - In the remaining cases, the result is the logical AND of the equality comparison between the real values ``a`` and ``c`` (real components) and between the real values ``b`` and ``d`` (imaginary components), as described above for real-valued floating-point operands (i.e., ``a == c AND b == d``). This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.equal.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` inputs: >>> x1 = ivy.array([2., 7., 9.]) >>> x2 = ivy.array([1., 7., 9.]) >>> y = ivy.equal(x1, x2) >>> print(y) ivy.array([False, True, True]) With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x1 = ivy.array([5, 6, 9]) >>> x2 = ivy.native_array([2, 6, 2]) >>> y = ivy.equal(x1, x2) >>> print(y) ivy.array([False, True, False]) With :class:`ivy.Container` inputs: >>> x1 = ivy.Container(a=ivy.array([12, 3.5, 6.3]), b=ivy.array([3., 1., 0.9])) >>> x2 = ivy.Container(a=ivy.array([12, 2.3, 3]), b=ivy.array([2.4, 3., 2.])) >>> y = ivy.equal(x1, x2) >>> print(y) { a: ivy.array([True, False, False]), b: ivy.array([False, False, False]) } With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs: >>> x1 = ivy.Container(a=ivy.array([12., 3.5, 6.3]), b=ivy.array([3., 1., 0.9])) >>> x2 = ivy.array([3., 1., 0.9]) >>> y = ivy.equal(x1, x2) >>> print(y) { a: ivy.array([False, False, False]), b: ivy.array([True, True, True]) } """ return ivy.current_backend(x1, x2).equal(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def exp( x: Union[ivy.Array, ivy.NativeArray, Number], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the exponential function, having domain ``[-infinity, +infinity]`` and codomain ``[+0, +infinity]``, for each element ``x_i`` of the input array ``x`` (``e`` raised to the power of ``x_i``, where ``e`` is the base of the natural logarithm). .. note:: For complex floating-point operands, ``exp(conj(x))`` must equal ``conj(exp(x))``. .. note:: The exponential function is an entire function in the complex plane and has no branch cuts. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``1``. - If ``x_i`` is ``-0``, the result is ``1``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``+0``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is either ``+0`` or ``-0`` and ``b`` is ``+0``, the result is ``1 + 0j``. - If ``a`` is a finite number and ``b`` is ``+infinity``, the result is ``NaN + NaN j``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is ``+0``, the result is ``infinity + 0j``. - If ``a`` is ``-infinity`` and ``b`` is a finite number, the result is ``+0 * cis(b)``. - If ``a`` is ``+infinity`` and ``b`` is a nonzero finite number, the result is ``+infinity * cis(b)``. - If ``a`` is ``-infinity`` and ``b`` is ``+infinity``, the result is ``0 + 0j`` (signs of real and imaginary components are unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``infinity + NaN j`` (sign of real component is unspecified). - If ``a`` is ``-infinity`` and ``b`` is ``NaN``, the result is ``0 + 0j`` (signs of real and imaginary components are unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``infinity + NaN j`` (sign of real component is unspecified). - If ``a`` is ``NaN`` and ``b`` is ``+0``, the result is ``NaN + 0j``. - If ``a`` is ``NaN`` and ``b`` is not equal to ``0``, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. where ``cis(v)`` is ``cos(v) + sin(v)*1j``. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated exponential function result for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.exp.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:Number: >>> x = 3. >>> y = ivy.exp(x) >>> print(y) ivy.array(20.08553692) With :class:`ivy.Array` input: >>> x = ivy.array([1., 2., 3.]) >>> y = ivy.exp(x) >>> print(y) ivy.array([ 2.71828175, 7.38905621, 20.08553696]) With nested inputs in :class:`ivy.Array`: >>> x = ivy.array([[-5.67], [ivy.nan], [0.567]]) >>> y = ivy.exp(x) >>> print(y) ivy.array([[0.00344786], [ nan], [1.76297021]]) With :class:`ivy.NativeArray` input: >>> x = ivy.native_array([0., 4., 2.]) >>> y = ivy.exp(x) >>> print(y) ivy.array([ 1. , 54.59814835, 7.38905621]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=3.1, b=ivy.array([3.2, 1.])) >>> y = ivy.exp(x) >>> print(y) { a: ivy.array(22.197948), b: ivy.array([24.53253174, 2.71828175]) } """ return ivy.current_backend(x).exp(x, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_device def imag( val: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return the imaginary part of a complex number for each element ``x_i`` of the input array ``val``. Parameters ---------- val input array. Should have a complex floating-point data type. out optional output array, for writing the result to. Returns ------- ret Returns an array with the imaginary part of complex numbers. The returned arrau must have a floating-point data type determined by the precision of ``val`` (e.g., if ``val`` is ``complex64``, the returned array must be ``float32``). This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.imag.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- >>> b = ivy.array(np.array([1+2j, 3+4j, 5+6j])) >>> b ivy.array([1.+2.j, 3.+4.j, 5.+6.j]) >>> ivy.imag(b) ivy.array([2., 4., 6.]) """ return ivy.current_backend(val).imag(val, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_device def angle( z: Union[ivy.Array, ivy.NativeArray], /, *, deg: bool = False, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate Element-wise the angle for an array of complex numbers(x+yj). Parameters ---------- z Array-like input. deg optional bool. out optional output array, for writing the result to. Returns ------- ret Returns an array of angles for each complex number in the input. If deg is False(default), angle is calculated in radian and if deg is True, then angle is calculated in degrees. Examples -------- >>> z = ivy.array([-1 + 1j, -2 + 2j, 3 - 3j]) >>> z ivy.array([-1.+1.j, -2.+2.j, 3.-3.j]) >>> ivy.angle(z) ivy.array([ 2.35619449, 2.35619449, -0.78539816]) >>> ivy.angle(z,deg=True) ivy.array([135., 135., -45.]) """ return ivy.current_backend(z).angle(z, deg=deg, out=out) @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def gcd( x1: Union[ivy.Array, ivy.NativeArray, int, list, tuple], x2: Union[ivy.Array, ivy.NativeArray, int, list, tuple], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return the greatest common divisor of |x1| and |x2|. Parameters ---------- x1 First array-like input. x2 Second array-input. out optional output array, for writing the result to. Returns ------- ret Element-wise gcd of |x1| and |x2|. Examples -------- >>> x1 = ivy.array([1, 2, 3]) >>> x2 = ivy.array([4, 5, 6]) >>> ivy.gcd(x1, x2) ivy.array([1., 1., 3.]) >>> x1 = ivy.array([1, 2, 3]) >>> ivy.gcd(x1, 10) ivy.array([1., 2., 1.]) """ return ivy.current_backend(x1, x2).gcd(x1, x2, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_device def exp2( x: Union[ivy.Array, float, list, tuple], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate 2**p for all p in the input array. Parameters ---------- x Array-like input. out optional output array, for writing the result to. Returns ------- ret Element-wise 2 to the power x. This is a scalar if x is a scalar. Examples -------- >>> x = ivy.array([1, 2, 3]) >>> ivy.exp2(x) ivy.array([2., 4., 8.]) >>> x = [5, 6, 7] >>> ivy.exp2(x) ivy.array([32., 64., 128.]) """ return ivy.current_backend(x).exp2(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def expm1( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to ``exp(x)-1``, having domain ``[-infinity, +infinity]`` and codomain ``[-1, +infinity]``, for each element ``x_i`` of the input array ``x``. .. note:: The purpose of this function is to calculate ``exp(x)-1.0`` more accurately when ``x`` is close to zero. Accordingly, conforming implementations should avoid implementing this function as simply ``exp(x)-1.0``. See FDLIBM, or some other IEEE 754-2019 compliant mathematical library, for a potential reference implementation. .. note:: For complex floating-point operands, ``expm1(conj(x))`` must equal ``conj(expm1(x))``. .. note:: The exponential function is an entire function in the complex plane and has no branch cuts. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-1``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is either ``+0`` or ``-0`` and ``b`` is ``+0``, the result is ``0 + 0j``. - If ``a`` is a finite number and ``b`` is ``+infinity``, the result is ``NaN + NaN j``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is ``+0``, the result is ``+infinity + 0j``. - If ``a`` is ``-infinity`` and ``b`` is a finite number, the result is ``+0 * cis(b) - 1.0``. - If ``a`` is ``+infinity`` and ``b`` is a nonzero finite number, the result is ``+infinity * cis(b) - 1.0``. - If ``a`` is ``-infinity`` and ``b`` is ``+infinity``, the result is ``-1 + 0j`` (sign of imaginary component is unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``infinity + NaN j`` (sign of real component is unspecified). - If ``a`` is ``-infinity`` and ``b`` is ``NaN``, the result is ``-1 + 0j`` (sign of imaginary component is unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``infinity + NaN j`` (sign of real component is unspecified). - If ``a`` is ``NaN`` and ``b`` is ``+0``, the result is ``NaN + 0j``. - If ``a`` is ``NaN`` and ``b`` is not equal to ``0``, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. where ``cis(v)`` is ``cos(v) + sin(v)*1j``. Parameters ---------- x input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated result for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.expm1.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([[0, 5, float('-0'), ivy.nan]]) >>> ivy.expm1(x) ivy.array([[ 0., 147., -0., nan]]) >>> x = ivy.array([ivy.inf, 1, float('-inf')]) >>> y = ivy.zeros(3) >>> ivy.expm1(x, out=y) ivy.array([ inf, 1.72, -1. ]) With :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([-1, 0,]), ... b=ivy.array([10, 1])) >>> ivy.expm1(x) { a: ivy.array([-0.632, 0.]), b: ivy.array([2.20e+04, 1.72e+00]) } """ return ivy.current_backend(x).expm1(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def floor( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Round each element ``x_i`` of the input array ``x`` to the greatest (i.e., closest to ``+infinity``) integer-valued number that is not greater than ``x_i``. **Special cases** - If ``x_i`` is already integer-valued, the result is ``x_i``. For floating-point operands, - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-infinity``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``NaN``, the result is ``NaN``. Parameters ---------- x input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.floor.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([2,3,4]) >>> y = ivy.floor(x) >>> print(y) ivy.array([2, 3, 4]) >>> x = ivy.array([1.5, -5.5, 0, -1, -0]) >>> y = ivy.zeros(5) >>> ivy.floor(x, out=y) >>> print(y) ivy.array([ 1., -6., 0., -1., 0.]) >>> x = ivy.array([[1.1, 2.2, 3.3], [-4.4, -5.5, -6.6]]) >>> ivy.floor(x, out=x) >>> print(x) ivy.array([[ 1., 2., 3.], [-5., -6., -7.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1.5, -2.4]), ... b=ivy.array([3.4, -4.2, -0, -1.2])) >>> y = ivy.floor(x) >>> print(y) { a: ivy.array([0., 1., -3.]), b: ivy.array([3., -5., 0., -2.]) } """ return ivy.current_backend(x).floor(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def floor_divide( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Round the result of dividing each element x1_i of the input array x1 by the respective element x2_i of the input array x2 to the greatest (i.e., closest to +infinity) integer-value number that is not greater than the division result. .. note:: For input arrays which promote to an integer data type, the result of division by zero is unspecified and thus implementation-defined. **Special cases** .. note:: Floor division was introduced in Python via `PEP 238 <https://www.python.org/dev/peps/pep-0238/>`_ with the goal to disambiguate "true division" (i.e., computing an approximation to the mathematical operation of division) from "floor division" (i.e., rounding the result of division toward negative infinity). The former was computed when one of the operands was a ``float``, while the latter was computed when both operands were ``int``\s. Overloading the ``/`` operator to support both behaviors led to subtle numerical bugs when integers are possible, but not expected. To resolve this ambiguity, ``/`` was designated for true division, and ``//`` was designated for floor division. Semantically, floor division was `defined <https://www.python.org/dev/peps/pep-0238/#semantics-of-floor-division>`_ as equivalent to ``a // b == floor(a/b)``; however, special floating-point cases were left ill-defined. Accordingly, floor division is not implemented consistently across array libraries for some of the special cases documented below. Namely, when one of the operands is ``infinity``, libraries may diverge with some choosing to strictly follow ``floor(a/b)`` and others choosing to pair ``//`` with ``%`` according to the relation ``b = a % b + b * (a // b)``. The special cases leading to divergent behavior are documented below. This specification prefers floor division to match ``floor(divide(x1, x2))`` in order to avoid surprising and unexpected results; however, array libraries may choose to more strictly follow Python behavior. For floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. - If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``. - If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``-0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``-0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is ``+0``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is ``+infinity``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is ``-infinity``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is ``-infinity``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is ``+infinity``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.) - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``-infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.) - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``-infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.) - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``+infinity``. (**note**: libraries may return ``NaN`` to match Python behavior.) - If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``+0``. - If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``-0``. (**note**: libraries may return ``-1.0`` to match Python behavior.) - If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``-0``. (**note**: libraries may return ``-1.0`` to match Python behavior.) - If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``+0``. - If ``x1_i`` and ``x2_i`` have the same mathematical sign and are both nonzero finite numbers, the result has a positive mathematical sign. - If ``x1_i`` and ``x2_i`` have different mathematical signs and are both nonzero finite numbers, the result has a negative mathematical sign. - In the remaining cases, where neither ``-infinity``, ``+0``, ``-0``, nor ``NaN`` is involved, the quotient must be computed and rounded to the greatest (i.e., closest to `+infinity`) representable integer-value number that is not greater than the division result. If the magnitude is too large to represent, the operation overflows and the result is an ``infinity`` of appropriate mathematical sign. If the magnitude is too small to represent, the operation underflows and the result is a zero of appropriate mathematical sign. Parameters ---------- x1 first input array. Must have a numeric data type. x2 second input array. Must be compatible with x1 (with Broadcasting). Must have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a numeric data type. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.floor_divide.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` inputs: >>> x1 = ivy.array([13., 7., 8.]) >>> x2 = ivy.array([3., 2., 7.]) >>> y = ivy.floor_divide(x1, x2) >>> print(y) ivy.array([4., 3., 1.]) >>> x1 = ivy.array([13., 7., 8.]) >>> x2 = ivy.array([3., 2., 7.]) >>> y = ivy.zeros((2, 3)) >>> ivy.floor_divide(x1, x2, out=y) >>> print(y) ivy.array([4., 3., 1.]) >>> x1 = ivy.array([13., 7., 8.]) >>> x2 = ivy.array([3., 2., 7.]) >>> ivy.floor_divide(x1, x2, out=x1) >>> print(x1) ivy.array([4., 3., 1.]) With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x1 = ivy.array([3., 4., 5.]) >>> x2 = ivy.native_array([5., 2., 1.]) >>> y = ivy.floor_divide(x1, x2) >>> print(y) ivy.array([0., 2., 5.]) With :class:`ivy.Container` inputs: >>> x1 = ivy.Container(a=ivy.array([4., 5., 6.]), b=ivy.array([7., 8., 9.])) >>> x2 = ivy.Container(a=ivy.array([5., 4., 2.5]), b=ivy.array([2.3, 3.7, 5])) >>> y = ivy.floor_divide(x1, x2) >>> print(y) { a: ivy.array([0., 1., 2.]), b: ivy.array([3., 2., 1.]) } With mixed :class:`ivy.Container` and :class:`ivy.Array` inputs: >>> x1 = ivy.Container(a=ivy.array([4., 5., 6.]), b=ivy.array([7., 8., 9.])) >>> x2 = ivy.array([2., 2., 2.]) >>> y = ivy.floor_divide(x1, x2) >>> print(y) { a: ivy.array([2., 2., 3.]), b: ivy.array([3., 4., 4.]) } """ return ivy.current_backend(x1, x2).floor_divide(x1, x2, out=out) @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def fmin( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[Union[ivy.Array, ivy.NativeArray]] = None, ) -> Union[ivy.Array, ivy.NativeArray]: """Compute the element-wise minimums of two arrays. Differs from ivy.minimum in the case where one of the elements is NaN. ivy.minimum returns the NaN element while ivy.fmin returns the non-NaN element. Parameters ---------- x1 First input array. x2 Second input array. out optional output array, for writing the result to. Returns ------- ret Array with element-wise minimums. Examples -------- >>> x1 = ivy.array([2, 3, 4]) >>> x2 = ivy.array([1, 5, 2]) >>> ivy.fmin(x1, x2) ivy.array([1, 3, 2]) >>> x1 = ivy.array([ivy.nan, 0, ivy.nan]) >>> x2 = ivy.array([0, ivy.nan, ivy.nan]) >>> ivy.fmin(x1, x2) ivy.array([ 0., 0., nan]) """ return ivy.current_backend(x1, x2).fmin(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def greater( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the truth value of x1_i < x2_i for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. Parameters ---------- x1 Input array. x2 Input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of bool. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.greater.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.greater(ivy.array([1,2,3]),ivy.array([2,2,2])) >>> print(x) ivy.array([False, False, True]) >>> x = ivy.array([[[1.1], [3.2], [-6.3]]]) >>> y = ivy.array([[8.4], [2.5], [1.6]]) >>> ivy.greater(x, y, out=x) >>> print(x) ivy.array([[[0.], [1.], [0.]]]) With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.native_array([4, 5, 0]) >>> z = ivy.greater(x, y) >>> print(z) ivy.array([False, False, True]) With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x = ivy.array([[5.1, 2.3, -3.6]]) >>> y = ivy.Container(a=ivy.array([[4.], [5.], [6.]]), ... b=ivy.array([[5.], [6.], [7.]])) >>> z = ivy.greater(x, y) >>> print(z) { a: ivy.array([[True, False, False], [True, False, False], [False, False, False]]), b: ivy.array([[True, False, False], [False, False, False], [False, False, False]]) } With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([4, 5, 6]), ... b=ivy.array([2, 3, 4])) >>> y = ivy.Container(a=ivy.array([1, 2, 3]), ... b=ivy.array([5, 6, 7])) >>> z = ivy.greater(x, y) >>> print(z) { a: ivy.array([True, True, True]), b: ivy.array([False, False, False]) } """ return ivy.current_backend(x1, x2).greater(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def greater_equal( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the truth value of x1_i >= x2_i for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. Parameters ---------- x1 first input array. May have any data type. x2 second input array. Must be compatible with x1 (with Broadcasting). May have any data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of bool. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.greater_equal.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.greater_equal(ivy.array([1,2,3]),ivy.array([2,2,2])) >>> print(x) ivy.array([False, True, True]) >>> x = ivy.array([[10.1, 2.3, -3.6]]) >>> y = ivy.array([[4.8], [5.2], [6.1]]) >>> shape = (3,3) >>> fill_value = False >>> z = ivy.full(shape, fill_value) >>> ivy.greater_equal(x, y, out=z) >>> print(z) ivy.array([[ True, False, False], [ True, False, False], [ True, False, False]]) >>> x = ivy.array([[[1.1], [3.2], [-6.3]]]) >>> y = ivy.array([[8.4], [2.5], [1.6]]) >>> ivy.greater_equal(x, y, out=x) >>> print(x) ivy.array([[[0.], [1.], [0.]]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4])) >>> y = ivy.Container(a=ivy.array([1, 2, 3]),b=ivy.array([5, 6, 7])) >>> z = ivy.greater_equal(x, y) >>> print(z) { a:ivy.array([True,True,True]), b:ivy.array([False,False,False]) } """ return ivy.current_backend(x1, x2).greater_equal(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def less_equal( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the truth value of x1_i <= x2_i for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. Parameters ---------- x1 first input array. May have any data type. x2 second input array. Must be compatible with x1 (with Broadcasting). May have any data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of bool. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.less_equal.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.less_equal(ivy.array([1,2,3]),ivy.array([2,2,2])) >>> print(x) ivy.array([True, True, False]) >>> x = ivy.array([[10.1, 2.3, -3.6]]) >>> y = ivy.array([[4.8], [5.2], [6.1]]) >>> shape = (3,3) >>> fill_value = False >>> z = ivy.full(shape, fill_value) >>> ivy.less_equal(x, y, out=z) >>> print(z) ivy.array([[False, True, True], [False, True, True], [False, True, True]]) >>> x = ivy.array([[[1.1], [3.2], [-6.3]]]) >>> y = ivy.array([[8.4], [2.5], [1.6]]) >>> ivy.less_equal(x, y, out=x) >>> print(x) ivy.array([[[1.], [0.], [1.]]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4])) >>> y = ivy.Container(a=ivy.array([1, 2, 3]),b=ivy.array([5, 6, 7])) >>> z = ivy.less_equal(x, y) >>> print(z) { a: ivy.array([False, False, False]), b: ivy.array([True, True, True]) } """ return ivy.current_backend(x1, x2).less_equal(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def multiply( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate the product for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. .. note:: Floating-point multiplication is not always associative due to finite precision. **Special Cases** For real-valued floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``. - If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. - If ``x1_i`` and ``x2_i`` have the same mathematical sign, the result has a positive mathematical sign, unless the result is ``NaN``. If the result is ``NaN``, the "sign" of ``NaN`` is implementation-defined. - If ``x1_i`` and ``x2_i`` have different mathematical signs, the result has a negative mathematical sign, unless the result is ``NaN``. If the result is ``NaN``, the "sign" of ``NaN`` is implementation-defined. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is a signed infinity with the mathematical sign determined by the rule already stated above. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is a nonzero finite number, the result is a signed infinity with the mathematical sign determined by the rule already stated above. - If ``x1_i`` is a nonzero finite number and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is a signed infinity with the mathematical sign determined by the rule already stated above. - In the remaining cases, where neither ``infinity`` nor ``NaN`` is involved, the product must be computed and rounded to the nearest representable value according to IEEE 754-2019 and a supported rounding mode. If the magnitude is too large to represent, the result is an `infinity` of appropriate mathematical sign. If the magnitude is too small to represent, the result is a zero of appropriate mathematical sign. For complex floating-point operands, multiplication is defined according to the following table. For real components ``a`` and ``c`` and imaginary components ``b`` and ``d``, +------------+----------------+-----------------+--------------------------+ | | c | dj | c + dj | +============+================+=================+==========================+ | **a** | a * c | (a*d)j | (a*c) + (a*d)j | +------------+----------------+-----------------+--------------------------+ | **bj** | (b*c)j | -(b*d) | -(b*d) + (b*c)j | +------------+----------------+-----------------+--------------------------+ | **a + bj** | (a*c) + (b*c)j | -(b*d) + (a*d)j | special rules | +------------+----------------+-----------------+--------------------------+ In general, for complex floating-point operands, real-valued floating-point special cases must independently apply to the real and imaginary component operations involving real numbers as described in the above table. When ``a``, ``b``, ``c``, or ``d`` are all finite numbers (i.e., a value other than ``NaN``, ``+infinity``, or ``-infinity``), multiplication of complex floating-point operands should be computed as if calculated according to the textbook formula for complex number multiplication .. math:: (a + bj) \cdot (c + dj) = (ac - bd) + (bc + ad)j When at least one of ``a``, ``b``, ``c``, or ``d`` is ``NaN``, ``+infinity``, or ``-infinity``, - If ``a``, ``b``, ``c``, and ``d`` are all ``NaN``, the result is ``NaN + NaN j``. - In the remaining cases, the result is implementation dependent. .. note:: For complex floating-point operands, the results of special cases may be implementation dependent depending on how an implementation chooses to model complex numbers and complex infinity (e.g., complex plane versus Riemann sphere). For those implementations following C99 and its one-infinity model, when at least one component is infinite, even if the other component is ``NaN``, the complex value is infinite, and the usual arithmetic rules do not apply to complex-complex multiplication. In the interest of performance, other implementations may want to avoid the complex branching logic necessary to implement the one-infinity model and choose to implement all complex-complex multiplication according to the textbook formula. Accordingly, special case behavior is unlikely to be consistent across implementations. Parameters ---------- x1 first input array. Should have a numeric data type. x2 second input array. Must be compatible with ``x1`` (see :ref'`broadcasting`). Should have a numeric data type out optional output array, for writing the array result to. It must have a shape that the inputs broadcast to. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.multiply.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Returns ------- ret an array containing the element-wise products. The returned array must have a data type determined by :ref:`Type Promotion Rules`. Examples -------- With :code:`ivy.Array` inputs: >>> x1 = ivy.array([3., 5., 7.]) >>> x2 = ivy.array([4., 6., 8.]) >>> y = ivy.multiply(x1, x2) >>> print(y) ivy.array([12., 30., 56.]) With :code:`ivy.NativeArray` inputs: >>> x1 = ivy.native_array([1., 3., 9.]) >>> x2 = ivy.native_array([4., 7.2, 1.]) >>> y = ivy.multiply(x1, x2) >>> print(y) ivy.array([ 4. , 21.6, 9. ]) With mixed :code:`ivy.Array` and :code:`ivy.NativeArray` inputs: >>> x1 = ivy.array([8., 6., 7.]) >>> x2 = ivy.native_array([1., 2., 3.]) >>> y = ivy.multiply(x1, x2) >>> print(y) ivy.array([ 8., 12., 21.]) With :code:`ivy.Container` inputs: >>> x1 = ivy.Container(a=ivy.array([12.,4.,6.]), b=ivy.array([3.,1.,5.])) >>> x2 = ivy.Container(a=ivy.array([1.,3.,4.]), b=ivy.array([3.,3.,2.])) >>> y = ivy.multiply(x1, x2) >>> print(y) { a: ivy.array([12.,12.,24.]), b: ivy.array([9.,3.,10.]) } With mixed :code:`ivy.Container` and :code:`ivy.Array` inputs: >>> x1 = ivy.Container(a=ivy.array([3., 4., 5.]), b=ivy.array([2., 2., 1.])) >>> x2 = ivy.array([1.,2.,3.]) >>> y = ivy.multiply(x1, x2) >>> print(y) { a: ivy.array([3.,8.,15.]), b: ivy.array([2.,4.,3.]) } """ return ivy.current_backend(x1, x2).multiply(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def isfinite( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Test each element ``x_i`` of the input array ``x`` to determine if finite (i.e., not ``NaN`` and not equal to positive or negative infinity). Parameters ---------- x input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing test results. An element ``out_i`` is ``True`` if ``x_i`` is finite and ``False`` otherwise. The returned array must have a data type of ``bool``. **Special Cases** For real-valued floating-point operands, - If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``False``. - if ``x_i`` is ``NaN``, the result is ``False``. - if ``x_i`` is a finite number, the result is ``True``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is ``NaN`` or ``b`` is ``NaN``, the result is ``False``. _ If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is any value, the result is ``False``. - If ``a`` is any value and ``b`` is either ``+infinity`` or ``-infinity``, the result is ``False``. - If ``a`` is a finite number and ``b`` is a finite number, the result is ``True``. This method conforms to the `Array API Standard<https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.isfinite.html>` _ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0, ivy.nan, -ivy.inf, float('inf')]) >>> y = ivy.isfinite(x) >>> print(y) ivy.array([ True, False, False, False]) >>> x = ivy.array([0, ivy.nan, -ivy.inf]) >>> y = ivy.zeros(3) >>> ivy.isfinite(x, out=y) >>> print(y) ivy.array([1., 0., 0.]) >>> x = ivy.array([[9, float('-0')], [ivy.nan, ivy.inf]]) >>> ivy.isfinite(x, out=x) >>> print(x) ivy.array([[1., 1.], [0., 0.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 999999999999]), ... b=ivy.array([float('-0'), ivy.nan])) >>> y = ivy.isfinite(x) >>> print(y) { a: ivy.array([True, True]), b: ivy.array([True, False]) } """ return ivy.current_backend(x).isfinite(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def isinf( x: Union[ivy.Array, ivy.NativeArray], /, *, detect_positive: bool = True, detect_negative: bool = True, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Test each element x_i of the input array x to determine if equal to positive or negative infinity. Parameters ---------- x input array. Should have a numeric data type. detect_positive if ``True``, positive infinity is detected. detect_negative if ``True``, negative infinity is detected. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing test results. An element out_i is True if x_i is either positive or negative infinity and False otherwise. The returned array must have a data type of bool. **Special Cases** For real-valued floating-point operands, - If x_i is either +infinity or -infinity, the result is ``True``. - In the remaining cases, the result is ``False``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is any value (including ``NaN``), the result is ``True``. - If ``a`` is either a finite number or ``NaN`` and ``b`` is either ``+infinity`` or ``-infinity``, the result is ``True``. - In the remaining cases, the result is ``False``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.isinf.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([1, 2, 3]) >>> z = ivy.isinf(x) >>> print(z) ivy.array([False, False, False]) >>> x = ivy.array([[1.1, 2.3, -3.6]]) >>> z = ivy.isinf(x) >>> print(z) ivy.array([[False, False, False]]) >>> x = ivy.array([[[1.1], [float('inf')], [-6.3]]]) >>> z = ivy.isinf(x) >>> print(z) ivy.array([[[False], [True], [False]]]) >>> x = ivy.array([[-float('inf'), float('inf'), 0.0]]) >>> z = ivy.isinf(x) >>> print(z) ivy.array([[ True, True, False]]) >>> x = ivy.zeros((3, 3)) >>> z = ivy.isinf(x) >>> print(z) ivy.array([[False, False, False], [False, False, False], [False, False, False]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([-1, -float('inf'), 1.23]), ... b=ivy.array([float('inf'), 3.3, -4.2])) >>> z = ivy.isinf(x) >>> print(z) { a: ivy.array([False, True, False]), b: ivy.array([True, False, False]) } With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([-1, -float('inf'), 1.23]), ... b=ivy.array([float('inf'), 3.3, -4.2])) >>> x.isinf() { a: ivy.array([False, True, False]), b: ivy.array([True, False, False]) } """ return ivy.current_backend(x).isinf( x, detect_positive=detect_positive, detect_negative=detect_negative, out=out ) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def isnan( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Test each element ``x_i`` of the input array ``x`` to determine whether the element is ``NaN``. Parameters ---------- x input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing test results. An element ``out_i`` is ``True`` if ``x_i`` is ``NaN`` and ``False`` otherwise. The returned array should have a data type of ``bool``. **Special Cases** For real-valued floating-point operands, - If ``x_i`` is ``NaN``, the result is ``True``. - In the remaining cases, the result is ``False``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` or ``b`` is ``NaN``, the result is ``True``. - In the remaining cases, the result is ``False``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.isnan.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([1, 2, 3]) >>> z = ivy.isnan(x) >>> print(z) ivy.array([False, False, False]) >>> x = ivy.array([[1.1, 2.3, -3.6]]) >>> z = ivy.isnan(x) >>> print(z) ivy.array([[False, False, False]]) >>> x = ivy.array([[[1.1], [float('inf')], [-6.3]]]) >>> z = ivy.isnan(x) >>> print(z) ivy.array([[[False], [False], [False]]]) >>> x = ivy.array([[-float('nan'), float('nan'), 0.0]]) >>> z = ivy.isnan(x) >>> print(z) ivy.array([[ True, True, False]]) >>> x = ivy.array([[-float('nan'), float('inf'), float('nan'), 0.0]]) >>> z = ivy.isnan(x) >>> print(z) ivy.array([[ True, False, True, False]]) >>> x = ivy.zeros((3, 3)) >>> z = ivy.isnan(x) >>> print(z) ivy.array([[False, False, False], [False, False, False], [False, False, False]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([-1, -float('nan'), 1.23]), ... b=ivy.array([float('nan'), 3.3, -4.2])) >>> z = ivy.isnan(x) >>> print(z) { a: ivy.array([False, True, False]), b: ivy.array([True, False, False]) } """ return ivy.current_backend(x).isnan(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def less( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the truth value of ``x1_i < x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. Parameters ---------- x1 first input array. Should have a numeric data type. x2 second input array. Must be compatible with ``x1`` (see ref:`broadcasting`). Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of ``bool``. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.less(ivy.array([1,2,3]),ivy.array([2,2,2])) >>> print(x) ivy.array([ True, False, False]) >>> x = ivy.array([[[1.1], [3.2], [-6.3]]]) >>> y = ivy.array([[8.4], [2.5], [1.6]]) >>> ivy.less(x, y, out=x) >>> print(x) ivy.array([[[1.], [0.], [1.]]]) With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.native_array([4, 5, 0]) >>> z = ivy.less(x, y) >>> print(z) ivy.array([ True, True, False]) With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x = ivy.array([[5.1, 2.3, -3.6]]) >>> y = ivy.Container(a=ivy.array([[4.], [5.], [6.]]), ... b=ivy.array([[5.], [6.], [7.]])) >>> z = ivy.less(x, y) >>> print(z) { a: ivy.array([[False, True, True], [False, True, True], [True, True, True]]), b: ivy.array([[False, True, True], [True, True, True], [True, True, True]]) } With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([4, 5, 6]),b=ivy.array([2, 3, 4])) >>> y = ivy.Container(a=ivy.array([1, 2, 3]),b=ivy.array([5, 6, 7])) >>> z = ivy.less(x, y) >>> print(z) { a: ivy.array([False, False, False]), b: ivy.array([True, True, True]) } """ return ivy.current_backend(x1).less(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def log( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the natural (base ``e``) logarithm, having domain ``[0, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``0``, the result is ``NaN``. - If ``x_i`` is either ``+0`` or ``-0``, the result is ``-infinity``. - If ``x_i`` is ``1``, the result is ``+0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is ``-0`` and ``b`` is ``+0``, the result is ``-infinity + πj``. - If ``a`` is ``+0`` and ``b`` is ``+0``, the result is ``-infinity + 0j``. - If ``a`` is a finite number and ``b`` is ``+infinity``, the result is ``+infinity + πj/2``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``-infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity + πj``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity + 0j``. - If ``a`` is ``-infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + 3πj/4``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + πj/4``. - If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is ``NaN``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is a finite number, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``+infinity``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated natural logarithm for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([4.0, 1, -0.0, -5.0]) >>> y = ivy.log(x) >>> print(y) ivy.array([1.39, 0., -inf, nan]) >>> x = ivy.array([[float('nan'), 1, 5.0, float('+inf')], ... [+0, -1.0, -5, float('-inf')]]) >>> y = ivy.log(x) >>> print(y) ivy.array([[nan, 0., 1.61, inf], [-inf, nan, nan, nan]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0.0, float('nan')]), ... b=ivy.array([-0., -3.9, float('+inf')]), ... c=ivy.array([7.9, 1.1, 1.])) >>> y = ivy.log(x) >>> print(y) { a: ivy.array([-inf, nan]), b: ivy.array([-inf, nan, inf]), c: ivy.array([2.07, 0.0953, 0.]) } """ return ivy.current_backend(x).log(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def log10( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate an implementation-dependent approximation to the base ``10`` logarithm, having domain ``[0, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``0``, the result is ``NaN``. - If ``x_i`` is either ``+0`` or ``-0``, the result is ``-infinity``. - If ``x_i`` is ``1``, the result is ``+0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. For complex floating-point operands, special cases must be handled as if the operation is implemented using the standard change of base formula .. math:: \log_{10} x = \frac{\log_{e} x}{\log_{e} 10} where :math:`\log_{e}` is the natural logarithm. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated base ``10`` logarithm for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.log10.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([4.0, 1, -0.0, -5.0]) >>> y = ivy.log10(x) >>> print(y) ivy.array([0.602, 0., -inf, nan]) >>> x = ivy.array([[float('nan'), 1, 5.0, float('+inf')], ... [+0, -1.0, -5, float('-inf')]]) >>> y = ivy.log10(x) >>> print(y) ivy.array([[nan, 0., 0.699, inf], [-inf, nan, nan, nan]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0.0, float('nan')]), ... b=ivy.array([-0., -3.9, float('+inf')]), ... c=ivy.array([7.9, 1.1, 1.])) >>> y = ivy.log10(x) >>> print(y) { a: ivy.array([-inf, nan]), b: ivy.array([-inf, nan, inf]), c: ivy.array([0.898, 0.0414, 0.]) } """ return ivy.current_backend(x).log10(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def log1p( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to log(1+x), where log refers to the natural (base e) logarithm. .. note:: The purpose of this function is to calculate ``log(1+x)`` more accurately when `x` is close to zero. Accordingly, conforming implementations should avoid implementing this function as simply ``log(1+x)``. See FDLIBM, or some other IEEE 754-2019 compliant mathematical library, for a potential reference implementation. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``-1``, the result is ``NaN``. - If ``x_i`` is ``-1``, the result is ``-infinity``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is ``-1`` and ``b`` is ``+0``, the result is ``-infinity + 0j``. - If ``a`` is a finite number and ``b`` is ``+infinity``, the result is ``+infinity + πj/2``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``-infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity + πj``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+infinity + 0j``. - If ``a`` is ``-infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + 3πj/4``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``+infinity + πj/4``. - If ``a`` is either ``+infinity`` or ``-infinity`` and ``b`` is ``NaN``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is a finite number, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``+infinity``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. Parameters ---------- x input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated Natural logarithm of 1 + x for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.log1p.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1., 2., 3.]) >>> y = x.log1p() >>> print(y) ivy.array([0.693, 1.1 , 1.39 ]) >>> x = ivy.array([0. , 1.]) >>> y = ivy.zeros(2) >>> ivy.log1p(x , out = y) >>> print(y) ivy.array([0. , 0.693]) >>> x = ivy.array([[1.1, 2.2, 3.3],[4.4, 5.5, 6.6]]) >>> ivy.log1p(x, out = x) >>> print(x) ivy.array([[0.742, 1.16 , 1.46 ],[1.69 , 1.87 , 2.03 ]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.1])) >>> y = ivy.log1p(x) >>> print(y) { a: ivy.array([0., 0.693, 1.1]), b: ivy.array([1.39, 1.61, 1.81]) } """ return ivy.current_backend(x).log1p(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def log2( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate an implementation-dependent approximation to the base ``2`` logarithm, having domain ``[0, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``0``, the result is ``NaN``. - If ``x_i`` is either ``+0`` or ``-0``, the result is ``-infinity``. - If ``x_i`` is ``1``, the result is ``+0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. For complex floating-point operands, special cases must be handled as if the operation is implemented using the standard change of base formula .. math:: \log_{2} x = \frac{\log_{e} x}{\log_{e} 2} where :math:`\log_{e}` is the natural logarithm. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated base ``2`` logarithm for each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.log2.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([5.0, 1, -0.0, -6.0]) >>> y = ivy.log2(x) >>> print(y) ivy.array([2.32, 0., -inf, nan]) >>> x = ivy.array([[float('nan'), 1, 6.0, float('+inf')], ... [+0, -2.0, -7, float('-inf')]]) >>> y = ivy.empty_like(x) >>> ivy.log2(x, out=y) >>> print(y) ivy.array([[nan, 0., 2.58, inf],[-inf, nan, nan, nan]]) >>> x = ivy.array([[float('nan'), 1, 7.0, float('+inf')], ... [+0, -3.0, -8, float('-inf')]]) >>> ivy.log2(x, out=x) >>> print(x) ivy.array([[nan, 0., 2.81, inf],[-inf, nan, nan, nan]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0.0, float('nan')]), ... b=ivy.array([-0., -4.9, float('+inf')]), ... c=ivy.array([8.9, 2.1, 1.])) >>> y = ivy.log2(x) >>> print(y) { a: ivy.array([-inf, nan]), b: ivy.array([-inf, nan, inf]), c: ivy.array([3.15, 1.07, 0.]) } """ return ivy.current_backend(x).log2(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def logaddexp( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate the logarithm of the sum of exponentiations ``log(exp(x1) + exp(x2))`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. **Special cases** For floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is not ``NaN``, the result is ``+infinity``. - If ``x1_i`` is not ``NaN`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``. Parameters ---------- x1 first input array. Should have a floating-point data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.logaddexp.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([2., 5., 15.]) >>> y = ivy.array([3., 2., 4.]) >>> z = ivy.logaddexp(x, y) >>> print(z) ivy.array([ 3.31, 5.05, 15. ]) >>> x = ivy.array([[[1.1], [3.2], [-6.3]]]) >>> y = ivy.array([[8.4], [2.5], [1.6]]) >>> ivy.logaddexp(x, y, out=x) >>> print(x) ivy.array([[[8.4], [3.6], [1.6]]]) With one :class:`ivy.Container` input: >>> x = ivy.array([[5.1, 2.3, -3.6]]) >>> y = ivy.Container(a=ivy.array([[4.], [5.], [6.]]), ... b=ivy.array([[5.], [6.], [7.]])) >>> z = ivy.logaddexp(x, y) >>> print(z) { a: ivy.array([[5.39, 4.17, 4.], [5.74, 5.07, 5.], [6.34, 6.02, 6.]]), b: ivy.array([[5.74, 5.07, 5.], [6.34, 6.02, 6.], [7.14, 7.01, 7.]]) } With multiple :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([4., 5., 6.]),b=ivy.array([2., 3., 4.])) >>> y = ivy.Container(a=ivy.array([1., 2., 3.]),b=ivy.array([5., 6., 7.])) >>> z = ivy.logaddexp(y,x) >>> print(z) { a: ivy.array([4.05, 5.05, 6.05]), b: ivy.array([5.05, 6.05, 7.05]) } """ return ivy.current_backend(x1, x2).logaddexp(x1, x2, out=out) @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def logaddexp2( x1: Union[ivy.Array, ivy.NativeArray, float, list, tuple], x2: Union[ivy.Array, ivy.NativeArray, float, list, tuple], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate log2(2**x1 + 2**x2). Parameters ---------- x1 First array-like input. x2 Second array-input. out optional output array, for writing the result to. Returns ------- ret Element-wise logaddexp2 of x1 and x2. Examples -------- >>> x1 = ivy.array([1, 2, 3]) >>> x2 = ivy.array([4, 5, 6]) >>> ivy.logaddexp2(x1, x2) ivy.array([4.169925, 5.169925, 6.169925]) """ return ivy.current_backend(x1, x2).logaddexp2(x1, x2, out=out) # ToDo: compare the examples against special case for zeros. @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def logical_and( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the logical AND for each element x1_i of the input array x1 with the respective element x2_i of the input array x2. Parameters ---------- x1 first input array. Should have a boolean data type. x2 second input array. Must be compatible with x1. Should have a boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of bool. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.logical_and.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([True, True, False]) >>> y = ivy.array([True, False, True]) >>> print(ivy.logical_and(x, y)) ivy.array([True,False,False]) >>> ivy.logical_and(x, y, out=y) >>> print(y) ivy.array([True,False,False]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([False, True, True]), ... b=ivy.array([True, False, False])) >>> y = ivy.Container(a=ivy.array([True, True, False]), ... b=ivy.array([False, False, True])) >>> print(ivy.logical_and(y, x)) { a: ivy.array([False, True, False]), b: ivy.array([False, False, False]) } >>> ivy.logical_and(y, x, out=y) >>> print(y) { a: ivy.array([False, True, False]), b: ivy.array([False, False, False]) } >>> x = ivy.Container(a=ivy.array([False, True, True]), ... b=ivy.array([True, False, False])) >>> y = ivy.array([True, False, True]) >>> print(ivy.logical_and(y, x)) { a: ivy.array([False, False, True]), b: ivy.array([True, False, False]) } >>> x = ivy.Container(a=ivy.array([False, True, True]), ... b=ivy.array([True, False, False])) >>> y = ivy.array([True, False, True]) >>> ivy.logical_and(y, x, out=x) >>> print(x) { a: ivy.array([False, False, True]), b: ivy.array([True, False, False]) } """ return ivy.current_backend(x1, x2).logical_and(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def logical_not( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the logical NOT for each element ``x_i`` of the input array ``x``. .. note:: While this specification recommends that this function only accept input arrays having a boolean data type, specification-compliant array libraries may choose to accept input arrays having numeric data types. If non-boolean data types are supported, zeros must be considered the equivalent of ``False``, while non-zeros must be considered the equivalent of ``True``. **Special cases** For this particular case, - If ``x_i`` is ``NaN``, the result is ``False``. - If ``x_i`` is ``-0``, the result is ``True``. - If ``x_i`` is ``-infinity``, the result is ``False``. - If ``x_i`` is ``+infinity``, the result is ``False``. Parameters ---------- x input array. Should have a boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of ``bool``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.logical_not.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x=ivy.array([1,0,1,1,0]) >>> y=ivy.logical_not(x) >>> print(y) ivy.array([False, True, False, False, True]) >>> x=ivy.array([2,0,3,5]) >>> y=ivy.logical_not(x) >>> print(y) ivy.array([False, True, False, False]) >>> x=ivy.native_array([1,0,6,5]) >>> y=ivy.logical_not(x) >>> print(y) ivy.array([False, True, False, False]) With :class:`ivy.Container` input: >>> x=ivy.Container(a=ivy.array([1,0,1,1]), b=ivy.array([1,0,8,9])) >>> y=ivy.logical_not(x) >>> print(y) { a: ivy.array([False, True, False, False]), b: ivy.array([False, True, False, False]) } >>> x=ivy.Container(a=ivy.array([1,0,1,0]), b=ivy.native_array([5,2,0,3])) >>> y=ivy.logical_not(x) >>> print(y) { a: ivy.array([False, True, False, True]), b: ivy.array([False, False, True, False]) } """ return ivy.current_backend(x).logical_not(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def logical_or( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the logical OR for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. .. note:: While this specification recommends that this function only accept input arrays having a boolean data type, specification-compliant array libraries may choose to accept input arrays having numeric data types. If non-boolean data types are supported, zeros must be considered the equivalent of ``False``, while non-zeros must be considered the equivalent of ``True``. Parameters ---------- x1 first input array. Should have a boolean data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of ``bool``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.logical_or.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([True, False, True]) >>> y = ivy.array([True, True, False]) >>> print(ivy.logical_or(x, y)) ivy.array([ True, True, True]) >>> x = ivy.array([[False, False, True], [True, False, True]]) >>> y = ivy.array([[False, True, False], [True, True, False]]) >>> z = ivy.zeros_like(x) >>> ivy.logical_or(x, y, out=z) >>> print(z) ivy.array([[False, True, True], [ True, True, True]]) >>> x = ivy.array([False, 3, 0]) >>> y = ivy.array([2, True, False]) >>> ivy.logical_or(x, y, out=x) >>> print(x) ivy.array([1, 1, 0]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([False, False, True]), ... b=ivy.array([True, False, True])) >>> y = ivy.Container(a=ivy.array([False, True, False]), ... b=ivy.array([True, True, False])) >>> z = ivy.logical_or(x, y) >>> print(z) { a: ivy.array([False, True, True]), b: ivy.array([True, True, True]) } """ return ivy.current_backend(x1, x2).logical_or(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def logical_xor( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the bitwise XOR of the underlying binary representation of each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. Parameters ---------- x1 first input array. Should have an integer or boolean data type. x2 second input array. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have an integer or boolean data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.logical_xor.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([1,0,1,1,0]) >>> y = ivy.array([1,0,1,1,0]) >>> z = ivy.logical_xor(x,y) >>> print(z) ivy.array([False, False, False, False, False]) >>> x = ivy.array([[[1], [2], [3], [4]]]) >>> y = ivy.array([[[4], [5], [6], [7]]]) >>> z = ivy.logical_xor(x,y) >>> print(z) ivy.array([[[False], [False], [False], [False]]]) >>> x = ivy.array([[[1], [2], [3], [4]]]) >>> y = ivy.array([4, 5, 6, 7]) >>> z = ivy.logical_xor(x,y) >>> print(z) ivy.array([[[False, False, False, False], [False, False, False, False], [False, False, False, False], [False, False, False, False]]]) With :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([1,0,0,1,0]), b=ivy.array([1,0,1,0,0])) >>> y = ivy.Container(a=ivy.array([0,0,1,1,0]), b=ivy.array([1,0,1,1,0])) >>> z = ivy.logical_xor(x,y) >>> print(z) { a: ivy.array([True, False, True, False, False]), b: ivy.array([False, False, False, True, False]) } With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([1,0,0,1,0]), b=ivy.array([1,0,1,0,0])) >>> y = ivy.array([0,0,1,1,0]) >>> z = ivy.logical_xor(x,y) >>> print(z) { a: ivy.array([True, False, True, False, False]), b: ivy.array([True, False, False, True, False]) } """ return ivy.current_backend(x1, x2).logical_xor(x1, x2, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_device def nan_to_num( x: Union[ivy.Array, ivy.NativeArray], /, *, copy: bool = True, nan: Union[float, int] = 0.0, posinf: Optional[Union[float, int]] = None, neginf: Optional[Union[float, int]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Replace NaN with zero and infinity with large finite numbers (default behaviour) or with the numbers defined by the user using the nan, posinf and/or neginf keywords. Parameters ---------- x Array input. copy Whether to create a copy of x (True) or to replace values in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. nan Value to be used to fill NaN values. If no value is passed then NaN values will be replaced with 0.0. posinf Value to be used to fill positive infinity values. If no value is passed then positive infinity values will be replaced with a very large number. neginf Value to be used to fill negative infinity values. If no value is passed then negative infinity values will be replaced with a very small (or negative) number. out optional output array, for writing the result to. Returns ------- ret Array with the non-finite values replaced. If copy is False, this may be x itself. Examples -------- >>> x = ivy.array([1, 2, 3, nan]) >>> ivy.nan_to_num(x) ivy.array([1., 1., 3., 0.0]) >>> x = ivy.array([1, 2, 3, inf]) >>> ivy.nan_to_num(x, posinf=5e+100) ivy.array([1., 2., 3., 5e+100]) """ return ivy.current_backend(x).nan_to_num( x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=out ) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def negative( x: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return a new array with the negative value of each element in ``x``. .. note:: For signed integer data types, the numerical negative of the minimum representable integer is implementation-dependent. .. note:: If ``x`` has a complex floating-point data type, both the real and imaginary components for each ``x_i`` must be negated (a result which follows from the rules of complex number multiplication). Parameters ---------- x Input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret A new array with the negative value of each element in ``x``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.negative.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0,1,1,2]) >>> y = ivy.negative(x) >>> print(y) ivy.array([ 0, -1, -1, -2]) >>> x = ivy.array([0,-1,-0.5,2,3]) >>> y = ivy.zeros(5) >>> ivy.negative(x, out=y) >>> print(y) ivy.array([-0. , 1. , 0.5, -2. , -3. ]) >>> x = ivy.array([[1.1, 2.2, 3.3], ... [-4.4, -5.5, -6.6]]) >>> ivy.negative(x,out=x) >>> print(x) ivy.array([[-1.1, -2.2, -3.3], [4.4, 5.5, 6.6]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), ... b=ivy.array([3., 4., -5.])) >>> y = ivy.negative(x) >>> print(y) { a: ivy.array([-0., -1., -2.]), b: ivy.array([-3., -4., 5.]) } """ return ivy.current_backend(x).negative(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def not_equal( x1: Union[float, ivy.Array, ivy.NativeArray, ivy.Container], x2: Union[float, ivy.Array, ivy.NativeArray, ivy.Container], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the truth value of ``x1_i != x2_i`` for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. **Special Cases** For real-valued floating-point operands, - If ``x1_i`` is ``NaN`` or ``x2_i`` is ``NaN``, the result is ``True``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is ``-infinity``, the result is ``True``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is ``+infinity``, the result is ``True``. - If ``x1_i`` is a finite number, ``x2_i`` is a finite number, and ``x1_i`` does not equal ``x2_i``, the result is ``True``. - In the remaining cases, the result is ``False``. For complex floating-point operands, let ``a = real(x1_i)``, ``b = imag(x1_i)``, ``c = real(x2_i)``, ``d = imag(x2_i)``, and - If ``a``, ``b``, ``c``, or ``d`` is ``NaN``, the result is ``True``. - In the remaining cases, the result is the logical OR of the equality comparison between the real values ``a`` and ``c`` (real components) and between the real values ``b`` and ``d`` (imaginary components), as described above for real-valued floating-point operands (i.e., ``a != c OR b != d``). Parameters ---------- x1 first input array. Should have a numeric data type. x2 second input array. Must be compatible with ``x1`` (see ref:`broadcasting`). Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type of ``bool``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.not_equal.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` inputs: >>> x1 = ivy.array([1, 0, 1, 1]) >>> x2 = ivy.array([1, 0, 0, -1]) >>> y = ivy.not_equal(x1, x2) >>> print(y) ivy.array([False, False, True, True]) >>> x1 = ivy.array([1, 0, 1, 0]) >>> x2 = ivy.array([0, 1, 0, 1]) >>> y = ivy.not_equal(x1, x2) >>> print(y) ivy.array([True, True, True, True]) >>> x1 = ivy.array([1, -1, 1, -1]) >>> x2 = ivy.array([0, -1, 1, 0]) >>> y = ivy.zeros(4) >>> ivy.not_equal(x1, x2, out=y) >>> print(y) ivy.array([1., 0., 0., 1.]) >>> x1 = ivy.array([1, -1, 1, -1]) >>> x2 = ivy.array([0, -1, 1, 0]) >>> y = ivy.not_equal(x1, x2, out=x1) >>> print(y) ivy.array([1, 0, 0, 1]) With a mix of :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x1 = ivy.native_array([1, 2]) >>> x2 = ivy.array([1, 2]) >>> y = ivy.not_equal(x1, x2) >>> print(y) ivy.array([False, False]) >>> x1 = ivy.native_array([1, -1]) >>> x2 = ivy.array([0, 1]) >>> y = ivy.not_equal(x1, x2) >>> print(y) ivy.array([True, True]) >>> x1 = ivy.native_array([1, -1, 1, -1]) >>> x2 = ivy.native_array([0, -1, 1, 0]) >>> y = ivy.zeros(4) >>> ivy.not_equal(x1, x2, out=y) >>> print(y) ivy.array([1., 0., 0., 1.]) >>> x1 = ivy.native_array([1, 2, 3, 4]) >>> x2 = ivy.native_array([0, 2, 3, 4]) >>> y = ivy.zeros(4) >>> ivy.not_equal(x1, x2, out=y) >>> print(y) ivy.array([1., 0., 0., 0.]) With :class:`ivy.Container` input: >>> x1 = ivy.Container(a=ivy.array([1, 0, 3]), ... b=ivy.array([1, 2, 3]), ... c=ivy.native_array([1, 2, 4])) >>> x2 = ivy.Container(a=ivy.array([1, 2, 3]), ... b=ivy.array([1, 2, 3]), ... c=ivy.native_array([1, 2, 4])) >>> y = ivy.not_equal(x1, x2) >>> print(y) { a: ivy.array([False, True, False]), b: ivy.array([False, False, False]), c: ivy.array([False, False, False]) } >>> x1 = ivy.Container(a=ivy.native_array([0, 1, 0]), ... b=ivy.array([1, 2, 3]), ... c=ivy.native_array([1.0, 2.0, 4.0])) >>> x2 = ivy.Container(a=ivy.array([1, 2, 3]), ... b=ivy.native_array([1.1, 2.1, 3.1]), ... c=ivy.native_array([1, 2, 4])) >>> y = ivy.not_equal(x1, x2) >>> print(y) { a: ivy.array([True, True, True]), b: ivy.array([True, True, True]), c: ivy.array([False, False, False]) } With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x1 = ivy.Container(a=ivy.array([1, 2, 3]), ... b=ivy.array([1, 3, 5])) >>> x2 = ivy.Container(a=ivy.array([1, 2, 3]), ... b=ivy.array([1, 4, 5])) >>> y = ivy.not_equal(x1, x2) >>> print(y) { a: ivy.array([False, False, False]), b: ivy.array([False, True, False]) } >>> x1 = ivy.Container(a=ivy.array([1.0, 2.0, 3.0]), ... b=ivy.array([1, 4, 5])) >>> x2 = ivy.Container(a=ivy.array([1, 2, 3.0]), ... b=ivy.array([1.0, 4.0, 5.0])) >>> y = ivy.not_equal(x1, x2) >>> print(y) { a: ivy.array([False, False, False]), b: ivy.array([False, False, False]) } """ return ivy.current_backend(x1, x2).not_equal(x1, x2, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def positive( x: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return a new array with the positive value of each element in ``x``. Parameters ---------- x Input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret A new array with the positive value of each element in ``x``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.positive.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([2, 3 ,5, 7]) >>> y = ivy.positive(x) >>> print(y) ivy.array([2, 3, 5, 7]) >>> x = ivy.array([0, -1, -0.5, 2, 3]) >>> y = ivy.zeros(5) >>> ivy.positive(x, out=y) >>> print(y) ivy.array([0., -1., -0.5, 2., 3.]) >>> x = ivy.array([[1.1, 2.2, 3.3], ... [-4.4, -5.5, -6.6]]) >>> ivy.positive(x,out=x) >>> print(x) ivy.array([[ 1.1, 2.2, 3.3], [-4.4, -5.5, -6.6]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), ... b=ivy.array([3., 4., -5.])) >>> y = ivy.positive(x) >>> print(y) { a: ivy.array([0., 1., 2.]), b: ivy.array([3., 4., -5.]) } """ return ivy.current_backend(x).positive(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def pow( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[int, float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation of exponentiation by raising each element ``x1_i`` (the base) of the input array ``x1`` to the power of ``x2_i`` (the exponent), where ``x2_i`` is the corresponding element of the input array ``x2``. **Special cases** For floating-point operands, - If ``x1_i`` is not equal to ``1`` and ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x2_i`` is ``+0``, the result is ``1``, even if ``x1_i`` is ``NaN``. - If ``x2_i`` is ``-0``, the result is ``1``, even if ``x1_i`` is ``NaN``. - If ``x1_i`` is ``NaN`` and ``x2_i`` is not equal to ``0``, the result is ``NaN``. - If ``abs(x1_i)`` is greater than ``1`` and ``x2_i`` is ``+infinity``, the result is ``+infinity``. - If ``abs(x1_i)`` is greater than ``1`` and ``x2_i`` is ``-infinity``, the result is ``+0``. - If ``abs(x1_i)`` is ``1`` and ``x2_i`` is ``+infinity``, the result is ``1``. - If ``abs(x1_i)`` is ``1`` and ``x2_i`` is ``-infinity``, the result is ``1``. - If ``x1_i`` is ``1`` and ``x2_i`` is not ``NaN``, the result is ``1``. - If ``abs(x1_i)`` is less than ``1`` and ``x2_i`` is ``+infinity``, the result is ``+0``. - If ``abs(x1_i)`` is less than ``1`` and ``x2_i`` is ``-infinity``, the result is ``+infinity``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is greater than ``0``, the result is ``+infinity``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is less than ``0``, the result is ``+0``. - If ``x1_i`` is ``-infinity``, ``x2_i`` is greater than ``0``, and ``x2_i`` is an odd integer value, the result is ``-infinity``. - If ``x1_i`` is ``-infinity``, ``x2_i`` is greater than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+infinity``. - If ``x1_i`` is ``-infinity``, ``x2_i`` is less than ``0``, and ``x2_i`` is an odd integer value, the result is ``-0``. - If ``x1_i`` is ``-infinity``, ``x2_i`` is less than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``+infinity``. - If ``x1_i`` is ``-0``, ``x2_i`` is greater than ``0``, and ``x2_i`` is an odd integer value, the result is ``-0``. - If ``x1_i`` is ``-0``, ``x2_i`` is greater than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+0``. - If ``x1_i`` is ``-0``, ``x2_i`` is less than ``0``, and ``x2_i`` is an odd integer value, the result is ``-infinity``. - If ``x1_i`` is ``-0``, ``x2_i`` is less than ``0``, and ``x2_i`` is not an odd integer value, the result is ``+infinity``. - If ``x1_i`` is less than ``0``, ``x1_i`` is a finite number, ``x2_i`` is a finite number, and ``x2_i`` is not an integer value, the result is ``NaN``. For complex floating-point operands, special cases should be handled as if the operation is implemented as ``exp(x2*log(x1))``. .. note:: Conforming implementations are allowed to treat special cases involving complex floating-point operands more carefully than as described in this specification. Parameters ---------- x1 first input array whose elements correspond to the exponentiation base. Should have a numeric data type. x2 second input array whose elements correspond to the exponentiation exponent. Must be compatible with ``x1`` (see :ref:`broadcasting`). Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.pow.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.pow(x, 3) >>> print(y) ivy.array([1, 8, 27]) >>> x = ivy.array([1.5, -0.8, 0.3]) >>> y = ivy.zeros(3) >>> ivy.pow(x, 2, out=y) >>> print(y) ivy.array([2.25, 0.64, 0.09]) >>> x = ivy.array([[1.2, 2, 3.1], [1, 2.5, 9]]) >>> ivy.pow(x, 2.3, out=x) >>> print(x) ivy.array([[ 1.52095687, 4.92457771, 13.49372482], [ 1. , 8.22738838, 156.5877228 ]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3])) >>> y = ivy.pow(x, 3) >>> print(y) { a:ivy.array([0,1]), b:ivy.array([8,27]) } """ return ivy.current_backend(x1, x2).pow(x1, x2, out=out) pow.unsupported_gradients = {"torch": ["float16"]} def _complex_to_inf(exponent): if exponent < 0: return float("inf") + ivy.nan * 1j else: return -0 * 1j @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_device def real( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Test each element ``x_i`` of the input array ``x`` to take only real part from it. Returns a float array, where it only contains . If element has complex type with zero complex part, the return value will be that element, else it only returns real part. Parameters ---------- x input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing test results. An element ``out_i`` is ``real number`` if ``x_i`` contain real number part only and if it is ``real number with complex part also`` then it returns the real number part. The returned array must have a floating-point data type with the same floating-point precision as ``x`` (e.g., if ``x`` is ``complex64``, the returned array must have the floating-point precision of ``float32``). The descriptions above assume an array input for simplicity, but the method also accepts :class:`ivy.Container` instances in place of: class:`ivy.Array` or :class:`ivy.NativeArray` instances, as shown in the type hints and also the examples below. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([[[1.1], [2], [-6.3]]]) >>> z = ivy.real(x) >>> print(z) ivy.array([[[1.1], [2.], [-6.3]]]) >>> x = ivy.array([4.2-0j, 3j, 7+5j]) >>> z = ivy.real(x) >>> print(z) ivy.array([4.2, 0., 7.]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([-6.7-7j, 0.314+0.355j, 1.23]),\ b=ivy.array([5j, 5.32-6.55j, 3.001])) >>> z = ivy.real(x) >>> print(z) { a: ivy.array([-6.7, 0.314, 1.23]), b: ivy.array([0., 5.32, 3.001]) } """ return ivy.current_backend(x).real(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def remainder( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, modulus: bool = True, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return the remainder of division for each element ``x1_i`` of the input array ``x1`` and the respective element ``x2_i`` of the input array ``x2``. .. note:: This function is equivalent to the Python modulus operator ``x1_i % x2_i``. For input arrays which promote to an integer data type, the result of division by zero is unspecified and thus implementation-defined. In general, similar to Python’s ``%`` operator, this function is not recommended for floating-point operands as semantics do not follow IEEE 754. That this function is specified to accept floating-point operands is primarily for reasons of backward compatibility. **Special Cases** For floating-point operands, - If either ``x1_i`` or ``x2_i`` is ``NaN``, the result is ``NaN``. - If ``x1_i`` is either ``+infinity`` or ``-infinity`` and ``x2_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. - If ``x1_i`` is either ``+0`` or ``-0`` and ``x2_i`` is either ``+0`` or ``-0``, the result is ``NaN``. - If ``x1_i`` is ``+0`` and ``x2_i`` is greater than ``0``, the result is ``+0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is greater than ``0``, the result is ``+0``. - If ``x1_i`` is ``+0`` and ``x2_i`` is less than ``0``, the result is ``-0``. - If ``x1_i`` is ``-0`` and ``x2_i`` is less than ``0``, the result is ``-0``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``+0``, the result is ``NaN``. - If ``x1_i`` is greater than ``0`` and ``x2_i`` is ``-0``, the result is ``NaN``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``+0``, the result is ``NaN``. - If ``x1_i`` is less than ``0`` and ``x2_i`` is ``-0``, the result is ``NaN``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``NaN``. - If ``x1_i`` is ``+infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``NaN``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a positive (i.e., greater than ``0``) finite number, the result is ``NaN``. - If ``x1_i`` is ``-infinity`` and ``x2_i`` is a negative (i.e., less than ``0``) finite number, the result is ``NaN``. - If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``x1_i``. (note: this result matches Python behavior.) - If ``x1_i`` is a positive (i.e., greater than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``x2_i``. (note: this result matches Python behavior.) - If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``+infinity``, the result is ``x2_i``. (note: this results matches Python behavior.) - If ``x1_i`` is a negative (i.e., less than ``0``) finite number and ``x2_i`` is ``-infinity``, the result is ``x1_i``. (note: this result matches Python behavior.) - In the remaining cases, the result must match that of the Python ``%`` operator. Parameters ---------- x1 dividend input array. Should have a numeric data type. x2 divisor input array. Must be compatible with ``x1`` (see ref:`Broadcasting`). Should have a numeric data type. modulus whether to compute the modulus instead of the remainder. Default is ``True``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. Each element-wise result must have the same sign as the respective element ``x2_i``. The returned array must have a data type determined by :ref:`Type Promotion Rules`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.remainder.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` inputs: >>> x1 = ivy.array([2., 5., 15.]) >>> x2 = ivy.array([3., 2., 4.]) >>> y = ivy.remainder(x1, x2) >>> print(y) ivy.array([2., 1., 3.]) With mixed :class:`ivy.Array` and :class:`ivy.NativeArray` inputs: >>> x1 = ivy.array([23., 1., 6.]) >>> x2 = ivy.native_array([11., 2., 4.]) >>> y = ivy.remainder(x1, x2) >>> print(y) ivy.array([1., 1., 2.]) With :class:`ivy.Container` inputs: >>> x1 = ivy.Container(a=ivy.array([2., 3., 5.]), b=ivy.array([2., 2., 4.])) >>> x2 = ivy.Container(a=ivy.array([1., 3., 4.]), b=ivy.array([1., 3., 3.])) >>> y = ivy.remainder(x1, x2) >>> print(y) { a: ivy.array([0., 0., 1.]), b: ivy.array([0., 2., 1.]) } """ return ivy.current_backend(x1, x2).remainder(x1, x2, modulus=modulus, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def round( x: Union[ivy.Array, ivy.NativeArray], /, *, decimals: Optional[int] = 0, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Round each element ``x_i`` of the input array ``x`` to the nearest integer-valued number. .. note:: For complex floating-point operands, real and imaginary components must be independently rounded to the nearest integer-valued number. Rounded real and imaginary components must be equal to their equivalent rounded real-valued floating-point counterparts (i.e., for complex-valued ``x``, ``real(round(x))`` must equal ``round(real(x)))`` and ``imag(round(x))`` must equal ``round(imag(x))``). **Special cases** - If ``x_i`` is already an integer-valued, the result is ``x_i``. For floating-point operands, - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-infinity``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``NaN``, the result is ``NaN``. - If two integers are equally close to ``x_i``, the result is the even integer closest to ``x_i``. .. note:: For complex floating-point operands, the following special cases apply to real and imaginary components independently (e.g., if ``real(x_i)`` is ``NaN``, the rounded real component is ``NaN``). - If ``x_i`` is already integer-valued, the result is ``x_i``. Parameters ---------- x input array containing elements to round. decimals number of decimal places to round to. Default is ``0``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret An array of the same shape and type as x, with the elements rounded to integers. Note: PyTorch supports an additional argument :code:`decimals` for the `round function <https://pytorch.org/docs/stable/generated/torch.round.html>`_. It has been deliberately omitted here due to the imprecise nature of the argument in :code:`torch.round`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.round.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1.2, 2.4, 3.6]) >>> y = ivy.round(x) >>> print(y) ivy.array([1.,2.,4.]) >>> x = ivy.array([-0, 5, 4.5]) >>> y = ivy.round(x) >>> print(y) ivy.array([0.,5.,4.]) >>> x = ivy.array([1.5654, 2.034, 15.1, -5.0]) >>> y = ivy.zeros(4) >>> ivy.round(x, out=y) >>> print(y) ivy.array([2.,2.,15.,-5.]) >>> x = ivy.array([[0, 5.433, -343.3, 1.5], ... [-5.5, 44.2, 11.5, 12.01]]) >>> ivy.round(x, out=x) >>> print(x) ivy.array([[0.,5.,-343.,2.],[-6.,44.,12.,12.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([4.20, 8.6, 6.90, 0.0]), ... b=ivy.array([-300.9, -527.3, 4.5])) >>> y = ivy.round(x) >>> print(y) { a:ivy.array([4.,9.,7.,0.]), b:ivy.array([-301.,-527.,4.]) } """ return ivy.current_backend(x).round(x, decimals=decimals, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def sign( x: Union[ivy.Array, ivy.NativeArray], /, *, np_variant: Optional[bool] = True, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Return an indication of the sign of a number for each element ``x_i`` of the input array ``x``. The sign function (also known as the **signum function**) of a number :math:`x_{i}` is defined as .. math:: \operatorname{sign}(x_i) = \begin{cases} 0 & \textrm{if } x_i = 0 \\ \frac{x}{|x|} & \textrm{otherwise} \end{cases} where :math:`|x_i|` is the absolute value of :math:`x_i`. **Special cases** - If ``x_i`` is less than ``0``, the result is ``-1``. - If ``x_i`` is either ``-0`` or ``+0``, the result is ``0``. - If ``x_i`` is greater than ``0``, the result is ``+1``. - For complex numbers ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j`` For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is either ``-0`` or ``+0`` and ``b`` is either ``-0`` or ``+0``, the result is ``0 + 0j``. - If ``a`` is ``NaN`` or ``b`` is ``NaN``, the result is ``NaN + NaN j``. - In the remaining cases, special cases must be handled according to the rules of complex number division. Parameters ---------- x input array. Should have a numeric data type. np_variant Handles complex numbers like numpy does If ``True``, ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. otherwise, For complex numbers, ``y = sign(x) = x / |x| if x != 0, otherwise y = 0.`` out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated result for each element in ``x``. The returned array must have the same data type as ``x``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.sign.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([8.3, -0, 6.8, 0.07]) >>> y = ivy.sign(x) >>> print(y) ivy.array([1., 0., 1., 1.]) >>> x = ivy.array([[5.78, -4., -6.9, 0], ... [-.4, 0.5, 8, -0.01]]) >>> y = ivy.sign(x) >>> print(y) ivy.array([[ 1., -1., -1., 0.], [-1., 1., 1., -1.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., -0.]), ... b=ivy.array([1.46, 5.9, -0.0]), ... c=ivy.array([-8.23, -4.9, -2.6, 7.4])) >>> y = ivy.sign(x) >>> print(y) { a: ivy.array([0., 0.]), b: ivy.array([1., 1., 0.]), c: ivy.array([-1., -1., -1., 1.]) } """ return ivy.current_backend(x).sign(x, np_variant=np_variant, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def sin( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate an implementation-dependent approximation to the sine, having domain ``(-infinity, +infinity)`` and codomain ``[-1, +1]``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians. .. note:: The sine is an entire function on the complex plane and has no branch cuts. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. For complex floating-point operands, special cases must be handled as if the operation is implemented as ``-1j * sinh(x*1j)``. Parameters ---------- x input array whose elements are each expressed in radians. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.sin.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0., 1., 2.]) >>> y = ivy.sin(x) >>> print(y) ivy.array([0., 0.841, 0.909]) >>> x = ivy.array([0., 1.2, -2.3, 3.6]) >>> y = ivy.zeros(4) >>> ivy.sin(x, out=y) >>> print(y) ivy.array([0., 0.932, -0.746, -0.443]) >>> x = ivy.array([[1., 2., 3.], [-4., -5., -6.]]) >>> ivy.sin(x, out=x) >>> print(x) ivy.array([[0.841, 0.909, 0.141], [0.757, 0.959, 0.279]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1., 2., 3.]), ... b=ivy.array([-4., -5., -6., -7.])) >>> y = ivy.sin(x) >>> print(y) { a: ivy.array([0., 0.841, 0.909, 0.141]), b: ivy.array([0.757, 0.959, 0.279, -0.657]) } """ return ivy.current_backend(x).sin(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def sinh( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate an implementation-dependent approximation to the hyperbolic sine, having domain ``[-infinity, +infinity]`` and codomain ``[-infinity, +infinity]``, for each element ``x_i`` of the input array ``x``. .. math:: \operatorname{sinh}(x) = \frac{e^x - e^{-x}}{2} .. note:: The hyperbolic sine is an entire function in the complex plane and has no branch cuts. The function is periodic, with period :math:`2\pi j`, with respect to the imaginary component. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-infinity``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and .. note:: For complex floating-point operands, ``sinh(conj(x))`` must equal ``conj(sinh(x))``. - If ``a`` is ``+0`` and ``b`` is ``+0``, the result is ``+0 + 0j``. - If ``a`` is ``+0`` and ``b`` is ``+infinity``, the result is ``0 + NaN j`` (sign of the real component is unspecified). - If ``a`` is ``+0`` and ``b`` is ``NaN``, the result is ``0 + NaN j`` (sign of the real component is unspecified). - If ``a`` is a positive (i.e., greater than ``0``) finite number and ``b`` is ``+infinity``, the result is ``NaN + NaN j``. - If ``a`` is a positive (i.e., greater than ``0``) finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is ``+0``, the result is ``+infinity + 0j``. - If ``a`` is ``+infinity`` and ``b`` is a positive finite number, the result is ``+infinity * cis(b)``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``infinity + NaN j`` (sign of the real component is unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``infinity + NaN j`` (sign of the real component is unspecified). - If ``a`` is ``NaN`` and ``b`` is ``+0``, the result is ``NaN + 0j``. - If ``a`` is ``NaN`` and ``b`` is a nonzero finite number, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. where ``cis(v)`` is ``cos(v) + sin(v)*1j``. Parameters ---------- x input array whose elements each represent a hyperbolic angle. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the hyperbolic sine of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.sinh.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1., 2., 3.]) >>> y = ivy.sinh(x) >>> print(y) ivy.array([1.18, 3.63, 10.]) >>> x = ivy.array([0.23, 3., -1.2]) >>> ivy.sinh(x, out=x) >>> print(x) ivy.array([0.232, 10., -1.51]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0.23, -0.25, 1]), b=ivy.array([3, -4, 1.26])) >>> y = ivy.sinh(x) >>> print(y) { a: ivy.array([0.232, -0.253, 1.18]), b: ivy.array([10., -27.3, 1.62]) } """ return ivy.current_backend(x).sinh(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def sqrt( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate the square root, having domain ``[0, +infinity]`` and codomain ``[0, +infinity]``, for each element ``x_i`` of the input array ``x``. After rounding, each result must be indistinguishable from the infinitely precise result (as required by IEEE 754). .. note:: After rounding, each result must be indistinguishable from the infinitely precise result (as required by IEEE 754). .. note:: For complex floating-point operands, ``sqrt(conj(x))`` must equal ``conj(sqrt(x))``. .. note:: By convention, the branch cut of the square root is the negative real axis :math:`(-\infty, 0)`. The square root is a continuous function from above the branch cut, taking into account the sign of the imaginary component. Accordingly, for complex arguments, the function returns the square root in the range of the right half-plane, including the imaginary axis (i.e., the plane defined by :math:`[0, +\infty)` along the real axis and :math:`(-\infty, +\infty)` along the imaginary axis). **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is less than ``0``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+infinity``, the result is ``+infinity``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and - If ``a`` is either ``+0`` or ``-0`` and ``b`` is ``+0``, the result is ``+0 + 0j``. - If ``a`` is any value (including ``NaN``) and ``b`` is ``+infinity``, the result is ``+infinity + infinity j``. - If ``a`` is a finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` ``-infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``NaN + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``+0 + infinity j``. - If ``a`` is ``-infinity`` and ``b`` is ``NaN``, the result is ``NaN + infinity j`` (sign of the imaginary component is unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``+infinity + NaN j``. - If ``a`` is ``NaN`` and ``b`` is any value, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. Parameters ---------- x input array. Should have a floating-point data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the square root of each element in ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.sqrt.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0, 4., 8.]) >>> y = ivy.sqrt(x) >>> print(y) ivy.array([0., 2., 2.83]) >>> x = ivy.array([1, 2., 4.]) >>> y = ivy.zeros(3) >>> ivy.sqrt(x, out=y) ivy.array([1., 1.41, 2.]) >>> X = ivy.array([40., 24., 100.]) >>> ivy.sqrt(x, out=x) >>> ivy.array([6.32455532, 4.89897949, 10.]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([44., 56., 169.]), b=ivy.array([[49.,1.], [0,20.]])) # noqa >>> y = ivy.sqrt(x) >>> print(y) { a: ivy.array([6.63, 7.48, 13.]), b: ivy.array([[7., 1.], [0., 4.47]]) } """ # noqa: E501 return ivy.current_backend(x).sqrt(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def square( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Each element ``x_i`` of the input array ``x``. Parameters ---------- x Input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the evaluated result for each element in ``x``. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.square.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([1, 2, 3]) >>> y = ivy.square(x) >>> print(y) ivy.array([1, 4, 9]) >>> x = ivy.array([1.5, -0.8, 0.3]) >>> y = ivy.zeros(3) >>> ivy.square(x, out=y) >>> print(y) ivy.array([2.25, 0.64, 0.09]) >>> x = ivy.array([[1.2, 2, 3.1], [-1, -2.5, -9]]) >>> ivy.square(x, out=x) >>> print(x) ivy.array([[1.44,4.,9.61],[1.,6.25,81.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0, 1]), b=ivy.array([2, 3])) >>> y = ivy.square(x) >>> print(y) { a:ivy.array([0,1]), b:ivy.array([4,9]) } """ return ivy.current_backend(x).square(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def subtract( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, alpha: Optional[Union[int, float]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate the difference for each element ``x1_i`` of the input array ``x1`` with the respective element ``x2_i`` of the input array ``x2``. Parameters ---------- x1 first input array. Should have a numeric data type. x2 second input array. Must be compatible with ``x1`` (see ref:`broadcasting`). Should have a numeric data type. alpha optional scalar multiplier for ``x2``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise differences. This method conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.subtract.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- >>> x = ivy.array([3, 6, 3]) >>> y = ivy.array([2, 1, 6]) >>> z = ivy.subtract(x, y) >>> print(z) ivy.array([ 1, 5, -3]) >>> x = ivy.array([3, 6, 3]) >>> y = ivy.array([2, 1, 6]) >>> z = ivy.subtract(x, y, alpha=2) >>> print(z) ivy.array([-1, 4, -9]) """ return ivy.current_backend(x1).subtract(x1, x2, alpha=alpha, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def tan( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: r"""Calculate an implementation-dependent approximation to the tangent, having domain ``(-infinity, +infinity)`` and codomain ``(-infinity, +infinity)``, for each element ``x_i`` of the input array ``x``. Each element ``x_i`` is assumed to be expressed in radians. .. note:: Tangent is an analytical function on the complex plane and has no branch cuts. The function is periodic, with period :math:`\pi j`, with respect to the real component and has first order poles along the real line at coordinates :math:`(\pi (\frac{1}{2} + n), 0)`. However, IEEE 754 binary floating-point representation cannot represent the value :math:`\pi / 2` exactly, and, thus, no argument value is possible for which a pole error occurs. where :math:`{tanh}` is the hyperbolic tangent. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is either ``+infinity`` or ``-infinity``, the result is ``NaN``. For complex floating-point operands, special cases must be handled as if the operation is implemented as ``-1j * tanh(x*1j)``. Parameters ---------- x input array whose elements are expressed in radians. Should have a floating-point data type. out optional output, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the tangent of each element in ``x``. The return must have a floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.tan.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0., 1., 2.]) >>> y = ivy.tan(x) >>> print(y) ivy.array([0., 1.56, -2.19]) >>> x = ivy.array([0.5, -0.7, 2.4]) >>> y = ivy.zeros(3) >>> ivy.tan(x, out=y) >>> print(y) ivy.array([0.546, -0.842, -0.916]) >>> x = ivy.array([[1.1, 2.2, 3.3], ... [-4.4, -5.5, -6.6]]) >>> ivy.tan(x, out=x) >>> print(x) ivy.array([[1.96, -1.37, 0.16], [-3.1, 0.996, -0.328]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), b=ivy.array([3., 4., 5.])) >>> y = ivy.tan(x) >>> print(y) { a: ivy.array([0., 1.56, -2.19]), b: ivy.array([-0.143, 1.16, -3.38]) } """ return ivy.current_backend(x).tan(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device @handle_complex_input def tanh( x: Union[ivy.Array, ivy.NativeArray], /, *, complex_mode: Literal["split", "magnitude", "jax"] = "jax", out: Optional[ivy.Array] = None, ) -> ivy.Array: """Calculate an implementation-dependent approximation to the hyperbolic tangent, having domain ``[-infinity, +infinity]`` and codomain ``[-1, +1]``, for each element ``x_i`` of the input array ``x``. **Special cases** For floating-point operands, - If ``x_i`` is ``NaN``, the result is ``NaN``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``+infinity``, the result is ``+1``. - If ``x_i`` is ``-infinity``, the result is ``-1``. For complex floating-point operands, let ``a = real(x_i)``, ``b = imag(x_i)``, and .. note:: For complex floating-point operands, ``tanh(conj(x))`` must equal ``conj(tanh(x))``. - If ``a`` is ``+0`` and ``b`` is ``+0``, the result is ``+0 + 0j``. - If ``a`` is a nonzero finite number and ``b`` is ``+infinity``, the result is ``NaN + NaN j``. - If ``a`` is ``+0`` and ``b`` is ``+infinity``, the result is ``+0 + NaN j``. - If ``a`` is a nonzero finite number and ``b`` is ``NaN``, the result is ``NaN + NaN j``. - If ``a`` is ``+0`` and ``b`` is ``NaN``, the result is ``+0 + NaN j``. - If ``a`` is ``+infinity`` and ``b`` is a positive (i.e., greater than ``0``) finite number, the result is ``1 + 0j``. - If ``a`` is ``+infinity`` and ``b`` is ``+infinity``, the result is ``1 + 0j`` (sign of the imaginary component is unspecified). - If ``a`` is ``+infinity`` and ``b`` is ``NaN``, the result is ``1 + 0j`` (sign of the imaginary component is unspecified). - If ``a`` is ``NaN`` and ``b`` is ``+0``, the result is ``NaN + 0j``. - If ``a`` is ``NaN`` and ``b`` is a nonzero number, the result is ``NaN + NaN j``. - If ``a`` is ``NaN`` and ``b`` is ``NaN``, the result is ``NaN + NaN j``. .. warning:: For historical reasons stemming from the C standard, array libraries may not return the expected result when ``a`` is ``+0`` and ``b`` is either ``+infinity`` or ``NaN``. The result should be ``+0 + NaN j`` in both cases; however, for libraries compiled against older C versions, the result may be ``NaN + NaN j``. Array libraries are not required to patch these older C versions, and, thus, users are advised that results may vary across array library implementations for these special cases. Parameters ---------- x input array whose elements each represent a hyperbolic angle. Should have a real-valued floating-point data type. complex_mode optional specifier for how to handle complex data types. See ``ivy.func_wrapper.handle_complex_input`` for more detail. out optional output, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the hyperbolic tangent of each element in ``x``. The returned array must have a real-valued floating-point data type determined by :ref:`type-promotion`. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.tanh.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([0., 1., 2.]) >>> y = ivy.tanh(x) >>> print(y) ivy.array([0., 0.762, 0.964]) >>> x = ivy.array([0.5, -0.7, 2.4]) >>> y = ivy.zeros(3) >>> ivy.tanh(x, out=y) >>> print(y) ivy.array([0.462, -0.604, 0.984]) >>> x = ivy.array([[1.1, 2.2, 3.3], ... [-4.4, -5.5, -6.6]]) >>> ivy.tanh(x, out=x) >>> print(x) ivy.array([[0.8, 0.976, 0.997], [-1., -1., -1.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0., 1., 2.]), ... b=ivy.array([3., 4., 5.])) >>> y = ivy.tanh(x) >>> print(y) { a: ivy.array([0., 0.762, 0.964]), b: ivy.array([0.995, 0.999, 1.]) } """ return ivy.current_backend(x).tanh(x, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_device def trapz( y: ivy.Array, /, *, x: Optional[ivy.Array] = None, dx: float = 1.0, axis: int = -1, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Integrate along the given axis using the composite trapezoidal rule. If x is provided, the integration happens in sequence along its elements - they are not sorted.. Parameters ---------- y The array that should be integrated. x The sample points corresponding to the input array values. If x is None, the sample points are assumed to be evenly spaced dx apart. The default is None. dx The spacing between sample points when x is None. The default is 1. axis The axis along which to integrate. out optional output array, for writing the result to. Returns ------- ret Definite integral of n-dimensional array as approximated along a single axis by the trapezoidal rule. If the input array is a 1-dimensional array, then the result is a float. If n is greater than 1, then the result is an n-1 dimensional array. Examples -------- >>> y = ivy.array([1, 2, 3]) >>> ivy.trapz([1,2,3]) 4.0 >>> y = ivy.array([1, 2, 3]) >>> ivy.trapz([1,2,3], x=[4, 6, 8]) 8.0 >>> y = ivy.array([1, 2, 3]) >>> ivy.trapz([1,2,3], dx=2) 8.0 """ return ivy.current_backend(y).trapz(y, x=x, dx=dx, axis=axis, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def trunc( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Round each element x_i of the input array x to the integer-valued number that is closest to but no greater than x_i. **Special cases** - If ``x_i`` is already an integer-valued, the result is ``x_i``. For floating-point operands, - If ``x_i`` is ``+infinity``, the result is ``+infinity``. - If ``x_i`` is ``-infinity``, the result is ``-infinity``. - If ``x_i`` is ``+0``, the result is ``+0``. - If ``x_i`` is ``-0``, the result is ``-0``. - If ``x_i`` is ``NaN``, the result is ``NaN``. Parameters ---------- x input array. Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the rounded result for each element in ``x``. The returned array must have the same data type as ``x``. This function conforms to the `Array API Standard <https://data-apis.org/array-api/latest/>`_. This docstring is an extension of the `docstring <https://data-apis.org/array-api/latest/ API_specification/generated/array_api.trunc.html>`_ in the standard. Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([-1, 0.54, 3.67, -0.025]) >>> y = ivy.trunc(x) >>> print(y) ivy.array([-1., 0., 3., -0.]) >>> x = ivy.array([0.56, 7, -23.4, -0.0375]) >>> ivy.trunc(x, out=x) >>> print(x) ivy.array([ 0., 7., -23., -0.]) >>> x = ivy.array([[0.4, -8, 0.55], [0, 0.032, 2]]) >>> y = ivy.zeros([2,3]) >>> ivy.trunc(x, out=y) >>> print(y) ivy.array([[ 0., -8., 0.], [ 0., 0., 2.]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([-0.25, 4, 1.3]), b=ivy.array([12, -3.5, 1.234])) >>> y = ivy.trunc(x) >>> print(y) { a: ivy.array([-0., 4., 1.]), b: ivy.array([12., -3., 1.]) } """ return ivy.current_backend(x).trunc(x, out=out) # Extra # # ------# @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def erf( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the Gauss error function of ``x`` element-wise. Parameters ---------- x input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The Gauss error function of x. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([0, 0.3, 0.7]) >>> y = ivy.erf(x) >>> print(y) ivy.array([0., 0.32862675, 0.67780113]) >>> x = ivy.array([0.1, 0.3, 0.4, 0.5]) >>> ivy.erf(x, out=x) >>> print(x) ivy.array([0.11246294, 0.32862675, 0.42839241, 0.52050018]) >>> x = ivy.array([[0.15, 0.28], [0.41, 1.75]]) >>> y = ivy.zeros((2, 2)) >>> ivy.erf(x, out=y) >>> print(y) ivy.array([[0.16799599, 0.30787992], [0.43796915, 0.98667163]]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([0.9, 1.1, 1.2]), b=ivy.array([1.3, 1.4, 1.5])) >>> y = ivy.erf(x) >>> print(y) { a: ivy.array([0.79690808, 0.88020504, 0.91031402]), b: ivy.array([0.934008, 0.95228523, 0.96610528]) } """ return ivy.current_backend(x).erf(x, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def maximum( x1: Union[ivy.Array, ivy.NativeArray, Number], x2: Union[ivy.Array, ivy.NativeArray, Number], /, *, use_where: bool = True, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return the max of x1 and x2 (i.e. x1 > x2 ? x1 : x2) element-wise. Parameters ---------- x1 Input array containing elements to maximum threshold. x2 Tensor containing maximum values, must be broadcastable to x1. use_where Whether to use :func:`where` to calculate the maximum. If ``False``, the maximum is calculated using the ``(x + y + |x - y|)/2`` formula. Default is ``True``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret An array with the elements of x1, but clipped to not be lower than the x2 values. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([7, 9, 5]) >>> y = ivy.array([9, 3, 2]) >>> z = ivy.maximum(x, y) >>> print(z) ivy.array([9, 9, 5]) >>> x = ivy.array([1, 5, 9, 8, 3, 7]) >>> y = ivy.array([[9], [3], [2]]) >>> z = ivy.zeros((3, 6), dtype=ivy.int32) >>> ivy.maximum(x, y, out=z) >>> print(z) ivy.array([[9, 9, 9, 9, 9, 9], [3, 5, 9, 8, 3, 7], [2, 5, 9, 8, 3, 7]]) >>> x = ivy.array([[7, 3]]) >>> y = ivy.array([0, 7]) >>> ivy.maximum(x, y, out=x) >>> print(x) ivy.array([[7, 7]]) With one :class:`ivy.Container` input: >>> x = ivy.array([[1, 3], [2, 4], [3, 7]]) >>> y = ivy.Container(a=ivy.array([1, 0,]), ... b=ivy.array([-5, 9])) >>> z = ivy.maximum(x, y) >>> print(z) { a: ivy.array([[1, 3], [2, 4], [3, 7]]), b: ivy.array([[1, 9], [2, 9], [3, 9]]) } With multiple :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([1, 3, 1]),b=ivy.array([2, 8, 5])) >>> y = ivy.Container(a=ivy.array([1, 5, 6]),b=ivy.array([5, 9, 7])) >>> z = ivy.maximum(x, y) >>> print(z) { a: ivy.array([1, 5, 6]), b: ivy.array([5, 9, 7]) } """ return ivy.current_backend(x1).maximum(x1, x2, use_where=use_where, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def minimum( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, use_where: bool = True, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return the min of x1 and x2 (i.e. x1 < x2 ? x1 : x2) element-wise. Parameters ---------- x1 Input array containing elements to minimum threshold. x2 Tensor containing minimum values, must be broadcastable to x1. use_where Whether to use :func:`where` to calculate the minimum. If ``False``, the minimum is calculated using the ``(x + y - |x - y|)/2`` formula. Default is ``True``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret An array with the elements of x1, but clipped to not exceed the x2 values. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([7, 9, 5]) >>> y = ivy.array([9, 3, 2]) >>> z = ivy.minimum(x, y) >>> print(z) ivy.array([7, 3, 2]) >>> x = ivy.array([1, 5, 9, 8, 3, 7]) >>> y = ivy.array([[9], [3], [2]]) >>> z = ivy.zeros((3, 6), dtype=ivy.int32) >>> ivy.minimum(x, y, out=z) >>> print(z) ivy.array([[1, 5, 9, 8, 3, 7], [1, 3, 3, 3, 3, 3], [1, 2, 2, 2, 2, 2]]) >>> x = ivy.array([[7, 3]]) >>> y = ivy.array([0, 7]) >>> ivy.minimum(x, y, out=x) >>> print(x) ivy.array([[0, 3]]) With one :class:`ivy.Container` input: >>> x = ivy.array([[1, 3], [2, 4], [3, 7]]) >>> y = ivy.Container(a=ivy.array([1, 0,]),b=ivy.array([-5, 9])) >>> z = ivy.minimum(x, y) >>> print(z) { a: ivy.array([[1, 0], [1, 0], [1, 0]]), b: ivy.array([[-5, 3], [-5, 4], [-5, 7]]) } With multiple :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([1, 3, 1]), ... b=ivy.array([2, 8, 5])) >>> y = ivy.Container(a=ivy.array([1, 5, 6]), ... b=ivy.array([5, 9, 7])) >>> z = ivy.minimum(x, y) >>> print(z) { a: ivy.array([1, 3, 1]), b: ivy.array([2, 8, 5]) } """ return ivy.current_backend(x1).minimum(x1, x2, use_where=use_where, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def reciprocal( x: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Return a new array with the reciprocal of each element in ``x``. Parameters ---------- x Input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret A new array with the positive value of each element in ``x``. Examples -------- >>> x = ivy.array([1, 2, 3]) >>> y = ivy.reciprocal(x) >>> print(y) ivy.array([1. , 0.5 , 0.33333333]) """ return ivy.current_backend(x).reciprocal(x, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def deg2rad( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Convert the input from degrees to radians. Parameters ---------- x input array whose elements are each expressed in degrees. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array with each element in ``x`` converted from degrees to radians. Examples -------- With :class:`ivy.Array` input: >>> x=ivy.array([0,90,180,270,360], dtype=ivy.float32) >>> y=ivy.deg2rad(x) >>> print(y) ivy.array([0., 1.57079633, 3.14159265, 4.71238898, 6.28318531]) >>> x=ivy.array([0,-1.5,-50,ivy.nan]) >>> y=ivy.zeros(4) >>> ivy.deg2rad(x,out=y) >>> print(y) ivy.array([ 0., -0.02617994, -0.87266463, nan]) >>> x = ivy.array([[1.1, 2.2, 3.3],[-4.4, -5.5, -6.6]]) >>> ivy.deg2rad(x, out=x) >>> print(x) ivy.array([[ 0.01919862, 0.03839725, 0.05759586], [-0.07679449, -0.09599311, -0.11519173]]) >>> x=ivy.native_array([-0,20.1,ivy.nan]) >>> y=ivy.zeros(3) >>> ivy.deg2rad(x,out=y) >>> print(y) ivy.array([0., 0.35081118, nan]) With :class:`ivy.Container` input: >>> x=ivy.Container(a=ivy.array([-0,20.1,-50.5,-ivy.nan]), ... b=ivy.array([0,90.,180,270,360], dtype=ivy.float32)) >>> y=ivy.deg2rad(x) >>> print(y) { a: ivy.array([0., 0.35081118, -0.88139129, nan]), b: ivy.array([0., 1.57079633, 3.14159265, 4.71238898, 6.28318531]) } >>> x=ivy.Container(a=ivy.array([0,90,180,270,360], dtype=ivy.float32), ... b=ivy.native_array([0,-1.5,-50,ivy.nan])) >>> y=ivy.deg2rad(x) >>> print(y) { a: ivy.array([0., 1.57079633, 3.14159265, 4.71238898, 6.28318531]), b: ivy.array([0., -0.02617994, -0.87266463, nan]) } """ return ivy.current_backend(x).deg2rad(x, out=out) @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def rad2deg( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Convert the input from radians to degrees. Parameters ---------- x input array whose elements are each expressed in radians. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array with each element in ``x`` converted from radians to degrees. Examples -------- With :class:`ivy.Array` input: >>> x=ivy.array([0.,1.57,3.14,4.71,6.28]) >>> y=ivy.rad2deg(x) >>> print(y) ivy.array([ 0., 90., 180., 270., 360.]) >>> x=ivy.array([0.,-0.0262,-0.873,ivy.nan]) >>> y=ivy.zeros(4) >>> ivy.rad2deg(x,out=y) >>> print(y) ivy.array([ 0. , -1.5, -50. , nan]) >>> x = ivy.array([[1.1, 2.2, 3.3],[-4.4, -5.5, -6.6]]) >>> ivy.rad2deg(x, out=x) >>> print(x) ivy.array([[ 63., 126., 189.], [-252., -315., -378.]]) >>> x=ivy.native_array([-0,20.1,ivy.nan]) >>> y=ivy.zeros(3) >>> ivy.rad2deg(x,out=y) >>> print(y) ivy.array([ 0., 1150., nan]) With :class:`ivy.Container` input: >>> x=ivy.Container(a=ivy.array([-0., 20.1, -50.5, -ivy.nan]), ... b=ivy.array([0., 1., 2., 3., 4.])) >>> y=ivy.rad2deg(x) >>> print(y) { a: ivy.array([0., 1150., -2890., nan]), b: ivy.array([0., 57.3, 115., 172., 229.]) } >>> x=ivy.Container(a=ivy.array([0,10,180,8.5,6]), ... b=ivy.native_array([0,-1.5,0.5,ivy.nan])) >>> y=ivy.rad2deg(x) >>> print(y) { a: ivy.array([0., 573., 10300., 487., 344.]), b: ivy.array([0., -85.9, 28.6, nan]) } """ return ivy.current_backend(x).rad2deg(x, out=out) @handle_exceptions @handle_nestable @handle_array_like_without_promotion @inputs_to_ivy_arrays @handle_array_function def trunc_divide( x1: Union[float, ivy.Array, ivy.NativeArray], x2: Union[float, ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Perform element-wise integer division of the inputs rounding the results towards zero. Parameters ---------- x1 dividend input array. Should have a numeric data type. x2 divisor input array. Must be compatible with x1 (see Broadcasting). Should have a numeric data type. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing the element-wise results. The returned array must have a floating-point data type determined by Type Promotion Rules. Examples -------- With :class:`ivy.Array` inputs: >>> x1 = ivy.array([2., 7., 9.]) >>> x2 = ivy.array([3., -4., 0.6]) >>> y = ivy.trunc_divide(x1, x2) >>> print(y) ivy.array([ 0., -1., 14.]) """ return ivy.trunc(ivy.divide(x1, x2), out=out) trunc_divide.mixed_backend_wrappers = { "to_add": ( "handle_backend_invalid", "handle_out_argument", "inputs_to_native_arrays", "outputs_to_ivy_arrays", "handle_device", "handle_backend_invalid", ), "to_skip": ("inputs_to_ivy_arrays",), } @handle_exceptions @handle_backend_invalid @handle_nestable @handle_array_like_without_promotion @handle_out_argument @to_native_arrays_and_back @handle_array_function @handle_device def isreal( x: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Test each element ``x_i`` of the input array ``x`` to determine whether the element is real number. Returns a bool array, where True if input element is real. If element has complex type with zero complex part, the return value for that element is True. Parameters ---------- x input array. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret an array containing test results. An element ``out_i`` is ``True`` if ``x_i`` is real number and ``False`` otherwise. The returned array should have a data type of ``bool``. The descriptions above assume an array input for simplicity, but the method also accepts :class:`ivy.Container` instances in place of :class:`ivy.Array` or :class:`ivy.NativeArray` instances, as shown in the type hints and also the examples below. Examples -------- With :class:`ivy.Array` inputs: >>> x = ivy.array([[[1.1], [float('inf')], [-6.3]]]) >>> z = ivy.isreal(x) >>> print(z) ivy.array([[[True], [True], [True]]]) >>> x = ivy.array([1-0j, 3j, 7+5j]) >>> z = ivy.isreal(x) >>> print(z) ivy.array([ True, False, False]) With :class:`ivy.Container` input: >>> x = ivy.Container(a=ivy.array([-6.7-7j, -np.inf, 1.23]),\ b=ivy.array([5j, 5-6j, 3])) >>> z = ivy.isreal(x) >>> print(z) { a: ivy.array([False, True, True]), b: ivy.array([False, False, True]) } """ return ivy.current_backend(x).isreal(x, out=out) @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def fmod( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[Union[ivy.Array, ivy.NativeArray]] = None, ) -> Union[ivy.Array, ivy.NativeArray]: """Compute the element-wise remainder of divisions of two arrays. Parameters ---------- x1 First input array. x2 Second input array out optional output array, for writing the result to. Returns ------- ret Array with element-wise remainder of divisions. Examples -------- >>> x1 = ivy.array([2, 3, 4]) >>> x2 = ivy.array([1, 5, 2]) >>> ivy.fmod(x1, x2) ivy.array([ 0, 3, 0]) >>> x1 = ivy.array([ivy.nan, 0, ivy.nan]) >>> x2 = ivy.array([0, ivy.nan, ivy.nan]) >>> ivy.fmod(x1, x2) ivy.array([ nan, nan, nan]) """ return ivy.current_backend(x1, x2).fmod(x1, x2, out=out) @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def lcm( x1: Union[ivy.Array, ivy.NativeArray], x2: Union[ivy.Array, ivy.NativeArray], /, *, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Compute the element-wise least common multiple (LCM) of x1 and x2. Parameters ---------- x1 first input array, must be integers x2 second input array, must be integers out optional output array, for writing the result to. Returns ------- ret an array that includes the element-wise least common multiples of x1 and x2 Examples -------- With :class:`ivy.Array` input: >>> x1=ivy.array([2, 3, 4]) >>> x2=ivy.array([5, 7, 15]) >>> x1.lcm(x1, x2) ivy.array([10, 21, 60]) """ return ivy.current_backend(x1, x2).lcm(x1, x2, out=out)
ivy/ivy/functional/ivy/elementwise.py/0
{ "file_path": "ivy/ivy/functional/ivy/elementwise.py", "repo_id": "ivy", "token_count": 101502 }
47
from typing import Literal, Union, Optional, Tuple # local import ivy from ivy.utils.backend import current_backend from ivy.func_wrapper import ( handle_partial_mixed_function, to_native_arrays_and_back, handle_out_argument, handle_nestable, handle_array_like_without_promotion, inputs_to_ivy_arrays, handle_array_function, handle_device, handle_backend_invalid, ) from ivy.utils.exceptions import handle_exceptions @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def l1_normalize( x: Union[ivy.Array, ivy.NativeArray], /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Normalize the input array along the given axis to have L1 norm equal to 1. Parameters ---------- x Input array. axis Axis or axes along which to normalize. If ``None``, the whole array is normalized. out Optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The normalized array. Examples -------- >>> x = ivy.array([[1., 2.], [3., 4.]]) >>> y = ivy.l1_normalize(x, axis=1) >>> print(y) ivy.array([[0.33333334, 1.33333337], [1.28571439, 2.28571439]]) """ return current_backend(x).l1_normalize(x, axis=axis, out=out) @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def l2_normalize( x: Union[ivy.Array, ivy.NativeArray], /, *, axis: Optional[int] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Normalize the input array along the given axis to have L2 norm equal to 1. Parameters ---------- x Input array. axis Axis along which to normalize. If ``None``, the whole array is normalized. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The normalized array. Examples -------- >>> x = ivy.array([[1., 2.], [3., 4.]]) >>> y = ivy.l2_normalize(x, axis=1) >>> print(y) ivy.array([[0.44721359, 0.89442718], [0.60000002, 0.80000001]]) """ return current_backend(x).l2_normalize(x, axis=axis, out=out) @handle_exceptions @handle_nestable @handle_partial_mixed_function @handle_array_like_without_promotion @inputs_to_ivy_arrays @handle_array_function def local_response_norm( x: Union[ivy.NativeArray, ivy.Array], size, /, *, bias: Optional[float] = 1.0, alpha: Optional[float] = 1.0, beta: Optional[float] = 0.5, average: bool = False, data_format: Optional[Literal["NHWC", "NCHW"]] = "NHWC", out: Optional[Tuple[ivy.Array, ivy.Array, ivy.Array]] = None, ) -> ivy.Array: """Apply local response normalization across the channels of a 4D input array. The 4-D array is treated as a 3-D array of 1-D vectors (along the channel dimension), and each vector is normalized independently. Within a given vector, each component is divided by the squared sum of the neighbouring components. Parameters ---------- x Input array of default shape (N, H, W, C), where N is the batch dimension, H and W correspond to the spatial dimensions and C corresponds to the channel dimension. size The width of the normalization window. alpha The multiplicative factor. beta The exponent. bias An additive factor. average If True, each component is divided by the **averaged** squared sum. data_format The ordering of the dimensions in the input, either "NHWC" or "NCHW". out optional output arrays, for writing the result to. Returns ------- ret The normalized array. """ if data_format == "NHWC": x = ivy.permute_dims(x, axes=(0, 3, 1, 2)) x_shape = x.shape alpha = alpha * size if not average else alpha ret = ivy.square(x) ret = ivy.reshape(ret, (x_shape[0], 1, x_shape[1], x_shape[2], -1)) ret = ivy.zero_pad( ret, ((0, 0), (0, 0), (size // 2, (size - 1) // 2), (0, 0), (0, 0)) ) ret = ivy.avg_pool3d( ret, (size, 1, 1), 1, "VALID", count_include_pad=True, data_format="NCDHW" ) ret = ivy.squeeze(ret, axis=1) ret = ivy.reshape(ret, x_shape) ret = ivy.pow(ivy.add(ivy.multiply(ret, alpha), bias), beta) ret = ivy.divide(x, ret) if data_format == "NHWC": ret = ivy.permute_dims(ret, axes=(0, 2, 3, 1)) return ret local_response_norm.mixed_backend_wrappers = { "to_add": ( "handle_backend_invalid", "handle_out_argument", "inputs_to_native_arrays", "outputs_to_ivy_arrays", "handle_device", ), "to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"), } @handle_exceptions @handle_nestable @handle_partial_mixed_function @handle_array_like_without_promotion @inputs_to_ivy_arrays @handle_array_function def batch_norm( x: Union[ivy.NativeArray, ivy.Array], mean: Union[ivy.NativeArray, ivy.Array], variance: Union[ivy.NativeArray, ivy.Array], /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array]] = None, training: Optional[bool] = False, eps: Optional[float] = 1e-5, momentum: Optional[float] = 1e-1, data_format: Optional[str] = "NSC", out: Optional[Tuple[ivy.Array, ivy.Array, ivy.Array]] = None, ) -> Tuple[ivy.Array, ivy.Array, ivy.Array]: """ Apply batch normalization to the input array and returns the normalized input, running mean and running variance arrays as output. If ``training == False``, the mean and variance arrays passed as input are used for normalization and the same arrays are returned as running mean and running variance respectively. However, when ``training ==True``, this function computes the batch mean and batch variance which is then used for normalization.In this case, the function returns the running mean and running variance calculated using the following formula: running_mean = (1 - momentum) * running_mean + momentum * batch_mean running_var = (1 - momentum) * running_var + momentum * frac{n}{n-1} * batch_var Parameters ---------- x Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. mean Mean array used for input's normalization. It can be of any shape braodcastable to (N,*S,C). variance Variance array used for input's normalization. It can be of any shape braodcastable to (N,*S,C). offset An offset array. If present, will be added to the normalized input. It can be of any shape broadcastable to (N,*S,C). scale A scale array. If present, the scale is applied to the normalized input. It can be of any shape broadcastable to (N,*S,C). training If true, calculate and use the mean and variance of `x`. Otherwise, use the provided `mean` and `variance`. eps A small float number to avoid dividing by 0. momentum the value used for the running_mean and running_var computation. Default value is 0.1. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. Returns ------- ret Tuple of arrays containing the normalized input, running_mean, and running_variance. """ xdims = len(x.shape) if data_format == "NCS": x = ivy.permute_dims(x, axes=(0, *range(2, xdims), 1)) runningmean = mean runningvariance = variance if training: n = x.size if xdims == 1 else x.size / x.shape[-1] dims = (0, *range(1, xdims - 1)) mean = ivy.mean(x, axis=dims) variance = ivy.var(x, axis=dims) runningmean = (1 - momentum) * runningmean + momentum * mean runningvariance = (1 - momentum) * runningvariance + momentum * variance * n / ( n - 1 ) inv = 1.0 / ivy.sqrt(variance + eps) offset = 0 if offset is None else offset if scale is not None: inv = inv * scale xnormalized = x * inv + offset - mean * inv if data_format == "NCS": xnormalized = ivy.permute_dims( xnormalized, axes=(0, xdims - 1, *range(1, xdims - 1)) ) if ivy.exists(out): xnormalized = ivy.inplace_update(out[0], xnormalized) runningmean = ivy.inplace_update(out[1], runningmean) runningvariance = ivy.inplace_update(out[2], runningvariance) return xnormalized, runningmean, runningvariance batch_norm.mixed_backend_wrappers = { "to_add": ( "handle_backend_invalid", "handle_out_argument", "inputs_to_native_arrays", "outputs_to_ivy_arrays", "handle_device", ), "to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"), } @handle_exceptions @handle_nestable @handle_partial_mixed_function @handle_array_like_without_promotion @inputs_to_ivy_arrays @handle_array_function def instance_norm( x: Union[ivy.NativeArray, ivy.Array], mean: Union[ivy.NativeArray, ivy.Array], variance: Union[ivy.NativeArray, ivy.Array], /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array]] = None, training: Optional[bool] = False, eps: Optional[float] = 0e-5, momentum: Optional[float] = 1e-1, data_format: Optional[str] = "NSC", out: Optional[Tuple[ivy.Array, ivy.Array, ivy.Array]] = None, ) -> Tuple[ivy.Array, ivy.Array, ivy.Array]: """ Apply instance normalization to the input array and returns the normalized input, running mean and running variance arrays as output. If ``training == False``, the mean and variance arrays passed as input are used for normalization and the same arrays are returned as running mean and running variance respectively. However, when ``training ==True``, this function computes the mean and variance across the spatial dimensions which is then used for normalization.In this case, the function returns the running mean and running variance calculated using the following formula: running_mean = (1 - momentum) * running_mean + momentum * batch_mean running_var = (1 - momentum) * running_var + momentum * frac{n}{n-1} * batch_var Parameters ---------- x Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. mean Mean array of size C used for input's normalization. variance Variance array of size C used for input's normalization. offset An offset array of size C. If present, will be added to the normalized input. scale A scale array of size C. If present, the scale is applied to the normalized input. training If true, calculate and use the mean and variance of `x`. Otherwise, use the provided `mean` and `variance`. eps A small float number to avoid dividing by 0. momentum the value used for the running_mean and running_var computation. Default value is 0.1. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. Returns ------- ret Tuple of arrays containing the normalized input, running_mean, and running_variance. """ xdims = len(x.shape) if data_format == "NCS": x = ivy.permute_dims(x, axes=(*range(2, xdims), 0, 1)) elif data_format == "NSC": x = ivy.permute_dims(x, axes=(*range(1, xdims - 1), 0, xdims - 1)) else: raise ValueError(f"Invalid data_format: {data_format}.") N = x.shape[-2] C = x.shape[-1] S = x.shape[0:-2] x = x.reshape((1, *S, N * C)) mean = ivy.tile(mean, N) variance = ivy.tile(variance, N) if scale is not None: scale = ivy.tile(scale, N) if offset is not None: offset = ivy.tile(offset, N) xnormalized, runningmean, runningvariance = batch_norm( x, mean, variance, scale=scale, offset=offset, training=training, eps=eps, momentum=momentum, ) xnormalized = xnormalized.reshape((*S, N, C)) if data_format == "NCS": xnormalized = ivy.permute_dims( xnormalized, axes=(xdims - 2, xdims - 1, *range(0, xdims - 2)) ) else: xnormalized = ivy.permute_dims( xnormalized, axes=(xdims - 2, *range(0, xdims - 2), xdims - 1) ) runningmean = runningmean.reshape((N, C)).mean(axis=0) runningvariance = runningvariance.reshape((N, C)).mean(axis=0) if ivy.exists(out): xnormalized = ivy.inplace_update(out[0], xnormalized) runningmean = ivy.inplace_update(out[1], runningmean) runningvariance = ivy.inplace_update(out[2], runningvariance) return (xnormalized, runningmean, runningvariance) instance_norm.mixed_backend_wrappers = { "to_add": ( "handle_backend_invalid", "handle_out_argument", "inputs_to_native_arrays", "outputs_to_ivy_arrays", "handle_device", ), "to_skip": ("inputs_to_ivy_arrays", "handle_partial_mixed_function"), } @handle_exceptions @handle_nestable @handle_array_like_without_promotion @inputs_to_ivy_arrays @handle_array_function def group_norm( x: Union[ivy.NativeArray, ivy.Array], num_groups: int = 1, /, *, offset: Optional[Union[ivy.NativeArray, ivy.Array]] = None, scale: Optional[Union[ivy.NativeArray, ivy.Array]] = None, eps: Optional[float] = 1e-5, data_format: Optional[str] = "NSC", out: Optional[ivy.Array] = None, ) -> ivy.Array: """Apply group normalization to the input array and returns the normalized input. Parameters ---------- x Input array of default shape (N, *S, C), where N is the batch dimension, *S corresponds to any number of spatial dimensions and C corresponds to the channel dimension. num_groups number of groups to separate the channels into offset An offset array of size C. If present, will be added to the normalized input. scale A scale array of size C. If present, the scale is applied to the normalized input. eps A small float number to avoid dividing by 0. data_format The ordering of the dimensions in the input, one of "NSC" or "NCS", where N is the batch dimension, S represents any number of spatial dimensions and C is the channel dimension. Default is "NSC". out optional output arrays, for writing the result to. Returns ------- ret The normalized array. """ xdims = ivy.get_num_dims(x) if data_format == "NSC": x = ivy.permute_dims(x, axes=(0, xdims - 1, *range(1, xdims - 1))) N = x.shape[0] C = x.shape[1] S = int(ivy.to_scalar(ivy.prod(x.shape[2:])) if xdims > 2 else 1) assert C % num_groups == 0 x_ = ivy.reshape(x, [N, num_groups, C // num_groups, S]) mean = ivy.mean(x_, axis=(2, 3), keepdims=True) var = ivy.var(x_, axis=(2, 3), keepdims=True) x_normalized = (x_ - mean) / ivy.sqrt(var + eps) x_normalized = ivy.reshape(x_normalized, x.shape) if ivy.exists(scale): scale = ivy.expand_dims(scale, axis=[0, *(range(2, xdims))]) x_normalized = x_normalized * scale if ivy.exists(offset): offset = ivy.expand_dims(offset, axis=[0, *(range(2, xdims))]) x_normalized = x_normalized + offset if data_format == "NSC": x_normalized = ivy.permute_dims(x_normalized, axes=(0, *range(2, xdims), 1)) if ivy.exists(out): x_normalized = ivy.inplace_update(out, x_normalized) return x_normalized group_norm.mixed_backend_wrappers = { "to_add": ( "handle_backend_invalid", "handle_out_argument", "inputs_to_native_arrays", "outputs_to_ivy_arrays", ), "to_skip": ("inputs_to_ivy_arrays",), } @handle_exceptions @handle_backend_invalid @handle_nestable @handle_out_argument @to_native_arrays_and_back @handle_device def lp_normalize( x: Union[ivy.Array, ivy.NativeArray], /, *, p: float = 2, axis: Optional[int] = None, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Normalize the input array along the given axis to have Lp norm equal to 1. Parameters ---------- x Input array. p The Lp norm to use for normalization. Default is L2 norm (p=2). axis Axis along which to normalize. If ``None``, the whole array is normalized. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The normalized array. Examples -------- >>> x = ivy.array([[1., 2.], [3., 4.]]) >>> y = ivy.lp_normalize(x, p=1, axis=1) >>> print(y) ivy.array([[0.33333334, 0.66666669], [0.42857143, 0.5714286 ]]) """ return current_backend(x).lp_normalize(x, p=p, axis=axis, out=out)
ivy/ivy/functional/ivy/experimental/norms.py/0
{ "file_path": "ivy/ivy/functional/ivy/experimental/norms.py", "repo_id": "ivy", "token_count": 7417 }
48
"""Collection of Ivy normalization functions.""" # local from typing import List, Union, Optional import ivy from ivy.func_wrapper import ( handle_array_like_without_promotion, handle_nestable, handle_array_function, inputs_to_ivy_arrays, ) from ivy.utils.exceptions import handle_exceptions # Extra # # ------# @handle_exceptions @handle_nestable @handle_array_like_without_promotion @inputs_to_ivy_arrays @handle_array_function def layer_norm( x: Union[ivy.Array, ivy.NativeArray], normalized_idxs: List[int], /, *, scale: Optional[Union[ivy.Array, ivy.NativeArray]] = None, offset: Optional[Union[ivy.Array, ivy.NativeArray]] = None, eps: float = 1e-05, new_std: float = 1.0, out: Optional[ivy.Array] = None, ) -> ivy.Array: """Apply Layer Normalization over a mini-batch of inputs. Parameters ---------- x Input array normalized_idxs Indices to apply the normalization to. scale Learnable gamma variables for elementwise post-multiplication, default is ``None``. offset Learnable beta variables for elementwise post-addition, default is ``None``. eps small constant to add to the denominator. Default is ``1e-05`` new_std The standard deviation of the new normalized values. Default is ``1``. out optional output array, for writing the result to. It must have a shape that the inputs broadcast to. Returns ------- ret The layer after applying layer normalization. Examples -------- With :class:`ivy.Array` input: >>> x = ivy.array([[1.0, 2.0], [3.0, 4.0]]) >>> y = ivy.layer_norm(x, [0, 1], new_std=2.0) >>> print(y) ivy.array([[-2.68 , -0.894], [ 0.894, 2.68 ]]) >>> x = ivy.array([[1., 2., 3.], [4., 5., 6.]]) >>> y = ivy.zeros((2, 3)) >>> ivy.layer_norm(x, [0], out=y) >>> print(y) ivy.array([[-1., -1., -1.], [ 1., 1., 1.]]) >>> x = ivy.array([[0.0976, -0.3452, 1.2740], ... [0.1047, 0.5886, 1.2732], ... [0.7696, -1.7024, -2.2518]]) >>> y = ivy.layer_norm(x, [0, 1], eps=0.001, ... new_std=1.5, scale=0.5, offset=[0.5, 0.02, 0.1]) >>> print(y) ivy.array([[ 0.826, -0.178, 0.981 ], [ 0.831, 0.421, 0.981 ], [ 1.26 , -1.05 , -1.28 ]]) With a mix of :class:`ivy.Array` and :class:`ivy.Container` inputs: >>> x = ivy.array([[1., 2., 3.], [4., 5., 6.]]) >>> normalized_idxs = ivy.Container({'a': [0], 'b': [1]}) >>> y = ivy.layer_norm(x, normalized_idxs, new_std=1.25, offset=0.2) >>> print(y) { a: ivy.array([[-1.25, -1.25, -1.25], [1.25, 1.25, 1.25]]), b: ivy.array([[-1.53, 0., 1.53], [-1.53, 0., 1.53]]) } With one :class:`ivy.Container` input: >>> x = ivy.Container({'a': ivy.array([7., 10., 12.]), ... 'b': ivy.array([[1., 2., 3.], [4., 5., 6.]])}) >>> normalized_idxs = [0] >>> y = ivy.layer_norm(x, normalized_idxs, eps=1.25, scale=0.3) >>> print(y) { a: ivy.array([-0.34198591, 0.04274819, 0.29923761]), b: ivy.array([[-0.24053511, -0.24053511, -0.24053511], [0.24053511, 0.24053511, 0.24053511]]) } With multiple :class:`ivy.Container` inputs: >>> x = ivy.Container(a=ivy.array([7.0, 10.0, 12.0]), ... b=ivy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])) >>> normalized_idxs = ivy.Container(a=[0], b=[1]) >>> new_std = ivy.Container(a=1.25, b=1.5) >>> bias = ivy.Container(a=[0.2, 0.5, 0.7], b=0.3) >>> y = ivy.layer_norm(x, normalized_idxs, new_std=new_std, offset=0.2) >>> print(y) { a: ivy.array([-1.62, 0.203, 1.42]), b: ivy.array([[-1.84, 0., 1.84], [-1.84, 0., 1.84]]) } # Both the description and the type hints above assumes an array input for simplicity, but this function is *nestable*, and therefore also accepts :class:`ivy.Container` instances in place of any of the arguments. """ mean = ivy.mean(x, axis=normalized_idxs, keepdims=True) var = ivy.var(x, axis=normalized_idxs, keepdims=True) x = (x - mean) / (var + eps) ** 0.5 if scale is not None: if offset is not None: return ivy.multiply( ivy.add(ivy.multiply(x, scale), offset), new_std, out=out ) return ivy.multiply(ivy.multiply(x, scale), new_std, out=out) return ivy.multiply(x, new_std, out=out) layer_norm.mixed_backend_wrappers = { "to_add": ( "handle_backend_invalid", "handle_out_argument", "inputs_to_native_arrays", "outputs_to_ivy_arrays", "handle_device", ), "to_skip": ("inputs_to_ivy_arrays",), }
ivy/ivy/functional/ivy/norms.py/0
{ "file_path": "ivy/ivy/functional/ivy/norms.py", "repo_id": "ivy", "token_count": 2438 }
49
# For Review """Collection of Ivy optimizers.""" # global import abc from typing import Union, Optional, Callable # local import ivy # Base # # -----# class Optimizer(abc.ABC): def __init__( self, lr: Union[float, Callable], inplace: bool = True, stop_gradients: bool = True, init_on_first_step: bool = False, trace_on_next_step: bool = False, fallback_to_non_traced: bool = False, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, ): """Construct a general Optimizer. This is an abstract class, and must be derived. Parameters ---------- lr Learning rate. inplace Whether to update the variables in-place, or to create new variable handles. This is only relevant for frameworks with stateful variables such as PyTorch. Default is ``True``, provided the backend framework supports it. stop_gradients Whether to stop the gradients of the variables after each gradient step. Default is ``True``. init_on_first_step Whether the optimizer is initialized on the first step. Default is ``False``. trace_on_next_step Whether to trace the optimizer on the next step. Default is ``False``. fallback_to_non_traced Whether to fall back to non-traced forward call in the case that an error is raised during the traced forward pass. Default is ``True``. device Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None) """ self._lr = lr self._inplace = inplace self._stop_gradients = stop_gradients self._init_on_first_step = init_on_first_step self._initialized = not init_on_first_step self._trace_on_next_step = trace_on_next_step self._fallback_to_non_traced = fallback_to_non_traced self._dev = ivy.default(device, ivy.default_device()) self._count = ivy.array([0], device=self._dev) self._traced_step_fn = None self._traced = False # Private # # --------# # Abstract # @abc.abstractmethod def _step(self, v: ivy.Container, grads: ivy.Container): """Update nested variables container v from update step, using nested grads container. Override this abstract method with child class custom implementation. Parameters ---------- v Nested variables to update. grads Nested gradients to update. Returns ------- ret The updated variables, following update step. """ raise ivy.utils.exceptions.IvyNotImplementedException # Given # def _step_fn( self, v: ivy.Container, grads: ivy.Container, ignore_missing: bool = False ): """Call the custom child step function implementation. Parameters ---------- v Nested variables to update. grads Nested gradients to update. ignore_missing Whether to ignore keys missing from the gradients which exist in the variables. Default is ``False`` """ if ignore_missing: return v.cont_set_at_keys(self._step(v.cont_at_key_chains(grads), grads)) return self._step(v, grads) # Public # # -------# # Abstract # @abc.abstractmethod def set_state(self, state: ivy.Container): """Set state of the optimizer. Parameters ---------- state Nested state to update. """ raise ivy.utils.exceptions.IvyNotImplementedException # Given # def step( self, v: ivy.Container, grads: ivy.Container, ignore_missing: bool = False ): """Update nested variables container v from overridden private self._step. Parameters ---------- v Nested variables to update. grads Nested gradients to update. ignore_missing Whether to ignore keys missing from the gradients which exist in the variables. Default is ``False``. Returns ------- ret The updated variables, following update step. """ self._count += 1 self._initialized = True return self._step_fn(v, grads, ignore_missing) # Optimizers # # -----------# class SGD(Optimizer): def __init__( self, lr: float = 1e-4, inplace: bool = True, stop_gradients: bool = True, trace_on_next_step: bool = False, ): """Construct a Stochastic-Gradient-Descent (SGD) optimizer. Parameters ---------- lr Learning rate, default is ``1e-4``. inplace Whether to update the variables in-place, or to create new variable handles. This is only relevant for frameworks with stateful variables such as PyTorch. Default is ``True``, provided the backend framework supports it. stop_gradients Whether to stop the gradients of the variables after each gradient step. Default is ``True``. trace_on_next_step Whether to trace the optimizer on the next step. Default is ``False``. """ Optimizer.__init__( self, lr, inplace, stop_gradients, trace_on_next_step=trace_on_next_step ) # Custom Step def _step(self, v: ivy.Container, grads: ivy.Container): """Update nested variables container v by gradient descent step, using nested gradients container. Parameters ---------- v Nested variables to update. grads Nested gradients to update. Returns ------- ret The new updated variables container, following gradient descent step. """ return ivy.gradient_descent_update( v, grads, self._lr if isinstance(self._lr, float) else self._lr(), stop_gradients=self._stop_gradients, ) def set_state(self, state: ivy.Container): """Set state of the optimizer. Parameters ---------- state Nested state to update. """ pass @property def state(self): return ivy.Container({}) class LARS(Optimizer): def __init__( self, lr: float = 1e-4, decay_lambda: float = 0, inplace: bool = True, stop_gradients: bool = True, trace_on_next_step: bool = False, ): """Construct a Layer-wise Adaptive Rate Scaling (LARS) optimizer. Parameters ---------- lr Learning rate, default is ``1e-4``. decay_lambda The factor used for weight decay. Default is ``0``. inplace Whether to update the variables in-place, or to create new variable handles. This is only relevant for frameworks with stateful variables such as PyTorch. Default is ``True``, provided the backend framework supports it. stop_gradients Whether to stop the gradients of the variables after each gradient step. Default is ``True``. trace_on_next_step Whether to trace the optimizer on the next step. Default is ``False``. """ self._decay_lambda = decay_lambda Optimizer.__init__( self, lr, inplace, stop_gradients, trace_on_next_step=trace_on_next_step ) # Custom Step def _step(self, v: ivy.Container, grads: ivy.Container): """Update nested variables container v by gradient descent step, using nested gradients container. Parameters ---------- v Nested variables to update. grads Nested gradients to update. Returns ------- ret The new updated variables container, following LARS step. """ return ivy.lars_update( v, grads, self._lr if isinstance(self._lr, float) else self._lr(), decay_lambda=self._decay_lambda, stop_gradients=self._stop_gradients, ) def set_state(self, state: ivy.Container): """Set state of the optimizer. Parameters ---------- state Nested state to update. """ pass @property def state(self): return ivy.Container({}) class Adam(Optimizer): def __init__( self, lr: float = 1e-4, beta1: float = 0.9, beta2: float = 0.999, epsilon: float = 1e-07, inplace: bool = True, stop_gradients: bool = True, trace_on_next_step: bool = False, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, ): """Construct an ADAM optimizer. Parameters ---------- lr Learning rate, default is ``1e-4``. beta1 gradient forgetting factor, default is ``0.9`` beta2 second moment of gradient forgetting factor, default is ``0.999`` epsilon divisor during adam update, preventing division by zero, default is ``1e-07`` inplace Whether to update the variables in-place, or to create new variable handles. This is only relevant for frameworks with stateful variables such as PyTorch. Default is ``True``, provided the backend framework supports it. stop_gradients Whether to stop the gradients of the variables after each gradient step. Default is ``True``. trace_on_next_step Whether to trace the optimizer on the next step. Default is ``False``. device Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None) """ self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon self._mw = None self._vw = None self._first_pass = True self._should_trace = False Optimizer.__init__( self, lr, inplace, stop_gradients, True, trace_on_next_step, device=device ) # Custom Step def _step(self, v: ivy.Container, grads: ivy.Container): """Update nested variables container v by Adam update step, using nested grads container. Parameters ---------- v Nested variables to update. grads Nested gradients to update. Returns ------- ret The updated variables, following Adam update step. """ if self._first_pass: self._mw = grads self._vw = grads**2 self._first_pass = False new_v, self._mw, self._vw = ivy.adam_update( v, grads, self._lr if isinstance(self._lr, float) else self._lr(), self._mw, self._vw, self._count, beta1=self._beta1, beta2=self._beta2, epsilon=self._epsilon, stop_gradients=self._stop_gradients, ) return new_v def set_state(self, state: ivy.Container): """Set state of the optimizer. Parameters ---------- state Nested state to update. """ self._mw = state.mw self._vw = state.vw @property def state(self): return ivy.Container({"mw": self._mw, "vw": self._vw}) class AdamW(Adam): def __init__( self, lr: float = 1e-4, beta1: float = 0.9, beta2: float = 0.999, epsilon: float = 1e-07, weight_decay: float = 0.0, inplace: bool = True, stop_gradients: bool = True, trace_on_next_step: bool = False, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, ): """Construct an ADAMW optimizer. Parameters ---------- lr Learning rate, default is ``1e-4``. beta1 gradient forgetting factor, default is ``0.9`` beta2 second moment of gradient forgetting factor, default is ``0.999`` epsilon divisor during adamw update, preventing division by zero, default is ``1e-07`` weight_decay weight decay coefficient, default is ``0.0`` inplace Whether to update the variables in-place, or to create new variable handles. This is only relevant for frameworks with stateful variables such as PyTorch. Default is ``True``, provided the backend framework supports it. stop_gradients Whether to stop the gradients of the variables after each gradient step. Default is ``True``. trace_on_next_step Whether to trace the optimizer on the next step. Default is ``False``. device Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None) """ self._weight_decay = weight_decay super().__init__( lr, beta1, beta2, epsilon, inplace, stop_gradients, trace_on_next_step, device, ) def _step(self, v: ivy.Container, grads: ivy.Container): """Update nested variables container v by AdamW update step, using nested grads container. Parameters ---------- v Nested variables to update. grads Nested gradients to update. Returns ------- ret The updated variables, following AdamW update step. """ # Apply L2 regularization directly to the parameters if self._weight_decay != 0: grads += self._weight_decay * v return super()._step(v, grads) class LAMB(Optimizer): def __init__( self, lr: float = 1e-4, beta1: float = 0.9, beta2: float = 0.999, epsilon: float = 1e-07, max_trust_ratio: float = 10, decay_lambda: float = 0, inplace: bool = True, stop_gradients: bool = True, trace_on_next_step: bool = False, device: Optional[Union[ivy.Device, ivy.NativeDevice]] = None, ): """Construct an LAMB optimizer. Parameters ---------- lr Learning rate, default is ``1e-4``. beta1 gradient forgetting factor, default is ``0.9`` beta2 second moment of gradient forgetting factor, default is ``0.999`` epsilon divisor during adam update, preventing division by zero, default is ``1e-07`` max_trust_ratio The max value of the trust ratio; the ratio between the norm of the layer weights and norm of gradients update. Default is ``10``. decay_lambda The factor used for weight decay. Default is ``0``. inplace Whether to update the variables in-place, or to create new variable handles. This is only relevant for frameworks with stateful variables such as PyTorch. Default is ``True``, provided the backend framework supports it. stop_gradients Whether to stop the gradients of the variables after each gradient step. Default is ``True``. trace_on_next_step Whether to trace the optimizer on the next step. Default is ``False``. device Device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc. (Default value = None) """ Optimizer.__init__( self, lr, inplace, stop_gradients, True, trace_on_next_step, device=device ) self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon self._mw = None self._vw = None self._max_trust_ratio = max_trust_ratio self._decay_lambda = decay_lambda self._first_pass = True # Custom Step def _step(self, v: ivy.Container, grads: ivy.Container): """Update nested variables container v by LAMB update step, using nested grads container. Parameters ---------- v Nested variables to update. grads Nested gradients to update. Returns ------- ret The updated variables, following LAMB update step. """ if self._first_pass: self._mw = grads self._vw = grads**2 self._first_pass = False new_v, self._mw, self._vw = ivy.lamb_update( v, grads, self._lr if isinstance(self._lr, float) else self._lr(), self._mw, self._vw, self._count, beta1=self._beta1, beta2=self._beta2, epsilon=self._epsilon, max_trust_ratio=self._max_trust_ratio, decay_lambda=self._decay_lambda, stop_gradients=self._stop_gradients, ) return new_v def set_state(self, state: ivy.Container): """Set state of the optimizer. Parameters ---------- state Nested state to update. """ self._mw = state.mw self._vw = state.vw @property def state(self): return ivy.Container({"mw": self._mw, "vw": self._vw})
ivy/ivy/stateful/optimizers.py/0
{ "file_path": "ivy/ivy/stateful/optimizers.py", "repo_id": "ivy", "token_count": 8251 }
50
import cProfile import pstats import subprocess import logging from tempfile import NamedTemporaryFile from importlib.util import find_spec is_snakeviz = find_spec("snakeviz") class Profiler(cProfile.Profile): """A Profiler class that allows code profiling. Attributes ---------- print_stats (bool, optional): prints profiling statistics. viz (bool, optional): visualizes the results using `snakeviz`. Bonus args and kwargs are passed to cProfile.Profile __init__ Example ------- with Profiler(print_stats=False, viz=True): fn(x, y) """ def __init__(self, *args, **kwargs): self.print_stats = kwargs.pop("print_stats", True) self.viz = kwargs.pop("viz", False) super().__init__(*args, **kwargs) def __enter__(self, *args, **kwargs): self.pr = super().__enter__(*args, **kwargs) return self.pr def __exit__(self, *exc): super().__exit__(*exc) if exc == (None, None, None): stats = pstats.Stats(self.pr) stats.sort_stats(pstats.SortKey.TIME) if self.viz: if is_snakeviz: # creates a temp file that gets automatically # deleted when everything is done with NamedTemporaryFile(suffix=".prof") as f: stats.dump_stats(filename=f.name) subprocess.run(["snakeviz", f"{f.name}"]) else: logging.warning("snakeviz must be installed for visualization") if self.print_stats: stats.print_stats() def tensorflow_profile_start( logdir: str, host_tracer_level: int = 2, python_tracer_level: int = 0, device_tracer_level: int = 1, delay_ms: int = None, ): """Initialize and start the profiler. Parameters ---------- logdir: str Directory where the profile data will be saved to. host_tracer_level: int Adjust CPU tracing level. Values are: 1 - critical info only, 2 - info, 3 - verbose. [default value is 2] python_tracer_level: int Toggle tracing of Python function calls. Values are: 1 - enabled, 0 - disabled [default value is 0] device_tracer_level: int Adjust device (TPU/GPU) tracing level. Values are: 1 - enabled, 0 - disabled [default value is 1] delay_ms: int Requests for all hosts to start profiling at a timestamp that is delay_ms away from the current time. delay_ms is in milliseconds. If zero, each host will start profiling immediately upon receiving the request. Default value is None, allowing the profiler guess the best value. Save the weights on the Module. Returns ------- None """ # noqa: E501 from tensorflow.profiler.experimental import ProfilerOptions, start options = ProfilerOptions( host_tracer_level=host_tracer_level, python_tracer_level=python_tracer_level, device_tracer_level=device_tracer_level, delay_ms=delay_ms, ) start(logdir, options=options) def tensorflow_profile_stop(): """Stop the profiler.""" from tensorflow.profiler.experimental import stop stop() def torch_profiler_init( logdir=None, activities=None, schedule=None, on_trace_ready=None, record_shapes=False, profile_memory=False, with_stack=False, with_flops=False, with_modules=False, experimental_config=None, ): """Initialize and returns a Torch profiler instance. Parameters ---------- logdir : str Directory where the profile data will be saved to. activities : iterable list of activity groups (CPU, CUDA) to use in profiling, supported values: ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. schedule : Callable callable that takes step (int) as a single parameter and returns ``ProfilerAction`` value that specifies the profiler action to perform at each step. on_trace_ready : Callable callable that is called at each step when ``schedule`` returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling. record_shapes : bool save information about operator's input shapes. profile_memory : bool track tensor memory allocation/deallocation. with_stack : bool record source information (file and line number) for the ops. with_flops : bool use formula to estimate the FLOPs (floating point operations) of specific operators (matrix multiplication and 2D convolution). with_modules : bool record module hierarchy (including function names) corresponding to the callstack of the op. e.g. If module A's forward call's module B's forward which contains an aten::add op, then aten::add's module hierarchy is A.B Note that this support exist, at the moment, only for TorchScript models and not eager mode models. experimental_config _ExperimentalConfig : _ExperimentalConfig A set of experimental options used for Kineto library features. Note, backward compatibility is not guaranteed. Returns ------- Torch profiler instance. """ # noqa: E501 from torch.profiler import profile, tensorboard_trace_handler profiler = profile( activities=activities, schedule=schedule, on_trace_ready=( tensorboard_trace_handler(logdir) if on_trace_ready is None and logdir is not None else on_trace_ready ), record_shapes=record_shapes, profile_memory=profile_memory, with_stack=with_stack, with_flops=with_flops, with_modules=with_modules, experimental_config=experimental_config, ) return profiler def torch_profiler_start(profiler): """Start the profiler. Parameters ---------- profiler : torch.profiler.profile Torch profiler instance. Returns ------- None """ profiler.start() def torch_profiler_stop(profiler): """Start the profiler. Parameters ---------- profiler : torch.profiler.profile Torch profiler instance. Returns ------- None """ from torch.autograd.profiler import KinetoStepTracker from torch.profiler.profiler import PROFILER_STEP_NAME profiler.stop() KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME)
ivy/ivy/utils/profiler.py/0
{ "file_path": "ivy/ivy/utils/profiler.py", "repo_id": "ivy", "token_count": 2647 }
51
from importlib.util import find_spec import os # A list of available backends that can be used for testing. def _available_frameworks(path="/opt/fw/"): ret = [] for backend in ["numpy", "jax", "tensorflow", "torch", "paddle"]: if find_spec(backend) is not None: ret.append(backend) elif os.path.exists(f"{path}{backend}"): ret.append(backend) return ret available_frameworks = _available_frameworks()
ivy/ivy_tests/test_ivy/helpers/available_frameworks.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/helpers/available_frameworks.py", "repo_id": "ivy", "token_count": 184 }
52
from .base import FrontendConfig, SupportedDeviecs, SupportedDtypes import mxnet as mx def get_config(): return MXNetFrontendConfig() class MXNetFrontendConfig(FrontendConfig): Dtype = mx.numpy.dtype Device = mx.Context valid_devices = ("cpu", "gpu") invalid_devices = ("tpu",) valid_dtypes = [ "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "bfloat16", "float16", "float32", "float64", "complex64", "complex128", "bool", ] invalid_dtypes = [] valid_numeric_dtypes = [ "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "bfloat16", "float16", "float32", "float64", "complex64", "complex128", ] invalid_numeric_dtypes = [] valid_int_dtypes = [ "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", ] invalid_int_dtypes = [] valid_uint_dtypes = [ "uint8", "uint16", "uint32", "uint64", ] invalid_uint_dtypes = [] valid_float_dtypes = [ "bfloat16", "float16", "float32", "float64", ] invalid_float_dtypes = [] valid_complex_dtypes = [ "complex64", "complex128", ] invalid_complex_dtypes = [] @property def supported_devices(self): return SupportedDeviecs( valid_devices=self.valid_devices, invalid_devices=self.invalid_devices ) @property def supported_dtypes(self): return SupportedDtypes( valid_dtypes=self.valid_dtypes, invalid_dtypes=self.invalid_dtypes, valid_numeric_dtypes=self.valid_numeric_dtypes, invalid_numeric_dtypes=self.invalid_numeric_dtypes, valid_int_dtypes=self.valid_int_dtypes, invalid_int_dtypes=self.invalid_int_dtypes, valid_uint_dtypes=self.valid_uint_dtypes, invalid_uint_dtypes=self.invalid_uint_dtypes, valid_float_dtypes=self.valid_float_dtypes, invalid_float_dtypes=self.invalid_float_dtypes, valid_complex_dtypes=self.valid_complex_dtypes, invalid_complex_dtypes=self.invalid_complex_dtypes, ) def native_array(self, x): return mx.np.array(x) def is_native_array(self, x): return isinstance(x, (mx.np.ndarray, mx.gluon.Parameter)) def to_numpy(self, x): return x.asnumpy() def as_native_dtype(self, dtype: str): return mx.np.array([], dtype=dtype).dtype def as_native_device(self, device: str): return mx.Context(device) def isscalar(self, x): return x.ndim == 0
ivy/ivy_tests/test_ivy/test_frontends/config/mxnet.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/config/mxnet.py", "repo_id": "ivy", "token_count": 1507 }
53
# global from hypothesis import given, strategies as st, assume import numpy as np # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_method, BackendHandler from ivy_tests.test_ivy.test_functional.test_core.test_statistical import ( _get_castable_dtype, ) from ivy_tests.test_ivy.test_frontends.test_jax.test_numpy.test_manipulations import ( _get_input_and_reshape, ) CLASS_TREE = "ivy.functional.frontends.jax.numpy.ndarray" # --- Helpers --- # # --------------- # @st.composite def _at_helper(draw): _, data, shape = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), num_arrays=2, shared_dtype=True, min_num_dims=1, ret_shape=True, ) ) axis = draw(helpers.get_axis(shape=shape, force_tuple=True)) index = () for a in axis: index = index + (draw(st.integers(min_value=0, max_value=shape[a] - 1)),) return data, index @st.composite def _get_dtype_input_and_vectors(draw): dim_size = draw(helpers.ints(min_value=2, max_value=5)) dtype = draw(helpers.get_dtypes("numeric", index=1, full=False)) vec1 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5 ) ) vec2 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5 ) ) return dtype, [vec1, vec2] @st.composite def _get_dtype_x_and_int(draw, *, dtype="numeric"): x_dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes(dtype), large_abs_safety_factor=2, small_abs_safety_factor=2, safety_factor_scale="log", ) ) pow_dtype, x_int = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), min_value=0, max_value=10, max_num_dims=0, max_dim_size=1, small_abs_safety_factor=2, large_abs_safety_factor=2, safety_factor_scale="log", ) ) x_dtype = x_dtype + pow_dtype return x_dtype, x, x_int # shifting helper @st.composite def _get_dtype_x_and_int_shift(draw, dtype): x_dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes(dtype), num_arrays=2, shared_dtype=True, ) ) x_dtype = x_dtype x[1] = np.asarray(np.clip(x[0], 0, np.iinfo(x_dtype[0]).bits - 1), dtype=x_dtype[0]) return x_dtype, x[0], x[1] # repeat @st.composite def _repeat_helper(draw): shape = draw(st.shared(helpers.get_shape(min_num_dims=1), key="value_shape")) axis = draw( st.shared( st.one_of(st.none(), helpers.get_axis(shape=shape, max_size=1)), key="axis" ) ) if not isinstance(axis, int) and axis is not None: axis = axis[0] repeat_shape = ( (draw(st.one_of(st.just(1), st.just(shape[axis]))),) if axis is not None else (1,) ) repeat = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), shape=repeat_shape, min_value=0, max_value=10, ) ) return repeat # searchsorted @st.composite def _searchsorted(draw): dtype_x, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes( "numeric", full=False, key="searchsorted" ), shape=(draw(st.integers(min_value=1, max_value=10)),), ), ) dtype_v, v = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes( "numeric", full=False, key="searchsorted" ), min_num_dims=1, ) ) input_dtypes = dtype_x + dtype_v xs = x + v side = draw(st.sampled_from(["left", "right"])) sorter = None xs[0] = np.sort(xs[0], axis=-1) return input_dtypes, xs, side, sorter # squeeze @st.composite def _squeeze_helper(draw): shape = draw(st.shared(helpers.get_shape(), key="shape")) valid_axes = [idx for idx in range(len(shape)) if shape[idx] == 1] + [None] return draw(st.sampled_from(valid_axes)) @st.composite def _transpose_helper(draw): dtype_x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), min_num_dims=2, max_num_dims=2, min_dim_size=2, ) ) _, data = dtype_x x = data[0] xT = np.transpose(x) return x, xT # swapaxes @st.composite def dtype_x_axis(draw): dtype, x, x_shape = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=5, ret_shape=True, ) ) axis1, axis2 = draw( helpers.get_axis( shape=x_shape, sort_values=False, unique=True, min_size=2, max_size=2, force_tuple=True, ) ) return dtype, x, axis1, axis2 # --- Main --- # # ------------ # @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__abs__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_jax___abs__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__add__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___add__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__and__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, shared_dtype=True, ), ) def test_jax___and__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__div__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___div__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__eq__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_jax___eq__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x assume("bfloat16" not in input_dtype) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__ge__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_integer"), num_arrays=2, ), ) def test_jax___ge__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x assume("bfloat16" not in input_dtype) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __getitem__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__getitem__", dtype_x_index=helpers.dtype_array_query( available_dtypes=helpers.get_dtypes("valid"), ).filter(lambda x: not (isinstance(x[-1], np.ndarray) and x[-1].dtype == np.bool_)), ) def test_jax___getitem__( dtype_x_index, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, index = dtype_x_index helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"object": x}, method_input_dtypes=[*input_dtype[1:]], method_all_as_kwargs_np={"idx": index}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__gt__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_integer"), num_arrays=2, ), ) def test_jax___gt__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x assume("bfloat16" not in input_dtype) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__invert__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), ), ) def test_jax___invert__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__le__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_integer"), num_arrays=2, ), ) def test_jax___le__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x assume("bfloat16" not in input_dtype) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__lshift__", dtype_x_shift=_get_dtype_x_and_int_shift(dtype="signed_integer"), ) def test_jax___lshift__( dtype_x_shift, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, shift = dtype_x_shift helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": shift}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__lt__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_integer"), num_arrays=2, shared_dtype=True, ), ) def test_jax___lt__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__matmul__", dtype_x=_get_dtype_input_and_vectors(), ) def test_jax___matmul__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__mod__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___mod__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__mul__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___mul__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__ne__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_jax___ne__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x assume("bfloat16" not in input_dtype) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__neg__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("signed_integer"), ), ) def test_jax___neg__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__or__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, shared_dtype=True, ), ) def test_jax___or__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__pos__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_jax___pos__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, on_device=on_device, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__pow__", dtype_x_pow=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True, ), ) def test_jax___pow__( dtype_x_pow, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x_pow helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__radd__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___radd__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rand__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, shared_dtype=True, ), ) def test_jax___rand__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rdiv__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___rdiv__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x assume(not np.any(np.isclose(x[0], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rlshift__", dtype_x_shift=_get_dtype_x_and_int_shift(dtype="signed_integer"), ) def test_jax___rlshift__( dtype_x_shift, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, shift = dtype_x_shift helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": shift, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rmatmul__", dtype_x=_get_dtype_input_and_vectors(), ) def test_jax___rmatmul__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rmod__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___rmod__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x assume(not np.any(np.isclose(x[0], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rmul__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___rmul__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__ror__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, shared_dtype=True, ), ) def test_jax___ror__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rpow__", dtype_x_pow=_get_dtype_x_and_int(), ) def test_jax___rpow__( dtype_x_pow, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, pow = dtype_x_pow helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": pow[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[0], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rrshift__", dtype_x_shift=_get_dtype_x_and_int_shift(dtype="signed_integer"), ) def test_jax___rrshift__( dtype_x_shift, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, shift = dtype_x_shift helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": shift, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rshift__", dtype_x_shift=_get_dtype_x_and_int_shift(dtype="signed_integer"), ) def test_jax___rshift__( dtype_x_shift, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, shift = dtype_x_shift helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": shift}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rsub__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___rsub__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rtruediv__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___rtruediv__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x assume(not np.any(np.isclose(x[0], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__rxor__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, shared_dtype=True, ), ) def test_jax___rxor__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__sub__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, ), ) def test_jax___sub__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__truediv__", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shared_dtype=True, num_arrays=2, large_abs_safety_factor=2, small_abs_safety_factor=2, safety_factor_scale="log", ), ) def test_jax___truediv__( dtype_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="__xor__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, shared_dtype=True, ), ) def test_jax___xor__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="all", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), force_int_axis=True, valid_axis=True, min_num_dims=1, ), keepdims=st.booleans(), ) def test_jax_array_all( dtype_x_axis, keepdims, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="any", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), force_int_axis=True, valid_axis=True, min_num_dims=1, ), keepdims=st.booleans(), ) def test_jax_array_any( dtype_x_axis, keepdims, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="argmax", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), force_int_axis=True, min_num_dims=1, valid_axis=True, ), keepdims=st.booleans(), ) def test_jax_array_argmax( dtype_and_x, keepdims, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="argmin", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), force_int_axis=True, min_num_dims=1, valid_axis=True, ), keepdims=st.booleans(), ) def test_jax_array_argmin( dtype_and_x, keepdims, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="argsort", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_axis=-1, max_axis=0, min_num_dims=1, force_int_axis=True, ), ) def test_jax_array_argsort( dtype_x_axis, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, }, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="astype", dtype_and_x=_get_castable_dtype(), ) def test_jax_array_astype( dtype_and_x, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, _, castable_dtype = dtype_and_x helpers.test_frontend_method( backend_to_test=backend_fw, init_input_dtypes=[input_dtype], init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=[input_dtype], method_all_as_kwargs_np={ "dtype": castable_dtype, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( x_y_index=_at_helper(), ) def test_jax_array_at(x_y_index, backend_fw): with BackendHandler.update_backend(backend_fw) as ivy_backend: jax_frontend = ivy_backend.utils.dynamic_import.import_module( "ivy.functional.frontends.jax" ) xy, idx = x_y_index x = jax_frontend.Array(xy[0]) y = jax_frontend.Array(xy[1]) idx = idx[0] x_set = x.at[idx].set(y[idx]) assert x_set[idx] == y[idx] assert x.at[idx].get() == x[idx] @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="conj", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("real_and_complex"), ), ) def test_jax_array_conj( dtype_and_x, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="conjugate", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("real_and_complex"), ), ) def test_jax_array_conjugate( dtype_and_x, on_device, frontend, frontend_method_data, backend_fw, init_flags, method_flags, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="copy", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, ), ) def test_jax_array_copy( dtype_x, on_device, frontend, frontend_method_data, backend_fw, init_flags, method_flags, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="cumprod", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=5, min_value=-100, max_value=100, valid_axis=True, allow_neg_axes=False, max_axes_size=1, force_int_axis=True, ), ) def test_jax_array_cumprod( dtype_and_x, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="cumsum", dtype_and_x=_get_castable_dtype(), ) def test_jax_array_cumsum( dtype_and_x, on_device, frontend, frontend_method_data, backend_fw, init_flags, method_flags, ): input_dtype, x, axis, dtype = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype], init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=[input_dtype], method_all_as_kwargs_np={ "axis": axis, "dtype": dtype, }, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="diagonal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=2, ), ) def test_jax_array_diagonal( dtype_and_x, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( backend_to_test=backend_fw, init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_jax_array_dtype( dtype_x, backend_fw, ): dtype, data = dtype_x with BackendHandler.update_backend(backend_fw) as ivy_backend: jax_frontend = ivy_backend.utils.dynamic_import.import_module( "ivy.functional.frontends.jax" ) x = jax_frontend.Array(data[0]) assert x.dtype == dtype[0] # max @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="max", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), force_int_axis=True, min_num_dims=1, valid_axis=True, ), keepdims=st.booleans(), ) def test_jax_array_max( dtype_and_x, keepdims, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="mean", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=24, small_abs_safety_factor=24, safety_factor_scale="log", force_int_axis=True, min_num_dims=1, valid_axis=True, ), keepdims=st.booleans(), ) def test_jax_array_mean( dtype_and_x, keepdims, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, rtol_=1e-3, atol_=1e-3, ) # min @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="min", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), force_int_axis=True, min_num_dims=1, valid_axis=True, ), keepdims=st.booleans(), ) def test_jax_array_min( dtype_and_x, keepdims, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_jax_array_ndim( dtype_x, backend_fw, ): dtype, data = dtype_x with BackendHandler.update_backend(backend_fw) as ivy_backend: jax_frontend = ivy_backend.utils.dynamic_import.import_module( "ivy.functional.frontends.jax" ) x = jax_frontend.Array(data[0]) assert x.ndim == data[0].ndim @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="nonzero", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, ), ) def test_jax_array_nonzero( dtype_and_x, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given(x_transpose=_transpose_helper()) def test_jax_array_property_T(x_transpose, backend_fw): with BackendHandler.update_backend(backend_fw) as ivy_backend: x, xT = x_transpose jax_frontend = ivy_backend.utils.dynamic_import.import_module( "ivy.functional.frontends.jax" ) x = jax_frontend.Array(x) assert np.array_equal(x.T, xT) # ptp @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="ptp", dtype_and_x_axis_dtype=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, num_arrays=1, large_abs_safety_factor=24, small_abs_safety_factor=24, safety_factor_scale="log", min_num_dims=1, valid_axis=True, ), keep_dims=st.booleans(), ) def test_jax_array_ptp( dtype_and_x_axis_dtype, keep_dims, frontend, frontend_method_data, backend_fw, init_flags, method_flags, on_device, ): input_dtypes, x, axis = dtype_and_x_axis_dtype helpers.test_frontend_method( init_input_dtypes=input_dtypes, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtypes, method_all_as_kwargs_np={ "axis": axis, "out": None, "keepdims": keep_dims, }, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="ravel", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10, shape=helpers.get_shape( min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=10 ), ), order=st.sampled_from(["C", "F"]), ) def test_jax_array_ravel( dtype_and_x, order, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "order": order, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="reshape", dtype_and_x_shape=_get_input_and_reshape(), order=st.sampled_from(["C", "F"]), input=st.booleans(), ) def test_jax_array_reshape( dtype_and_x_shape, order, input, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, shape = dtype_and_x_shape if input: method_flags.num_positional_args = len(shape) kwargs = {f"{i}": shape[i] for i in range(len(shape))} else: kwargs = {"shape": shape} method_flags.num_positional_args = 1 kwargs["order"] = order helpers.test_frontend_method( backend_to_test=backend_fw, init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np=kwargs, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="round", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), decimals=st.one_of( st.integers(min_value=-10, max_value=10), ), ) def test_jax_array_round( dtype_x, decimals, frontend, frontend_method_data, backend_fw, init_flags, method_flags, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"decimals": decimals}, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="searchsorted", dtype_x_v_side_sorter=_searchsorted(), ) def test_jax_array_searchsorted( dtype_x_v_side_sorter, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, xs, side, sorter = dtype_x_v_side_sorter helpers.test_frontend_method( init_input_dtypes=input_dtypes, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": xs[0], }, method_input_dtypes=input_dtypes, method_all_as_kwargs_np={"v": xs[0], "side": side, "sorter": sorter}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x_shape=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ret_shape=True, ), ) def test_jax_array_shape( dtype_x_shape, backend_fw, ): _, data, shape = dtype_x_shape with BackendHandler.update_backend(backend_fw) as ivy_backend: jax_frontend = ivy_backend.utils.dynamic_import.import_module( "ivy.functional.frontends.jax" ) x = jax_frontend.Array(data[0]) assert x.shape == shape @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="sort", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=["int64"], force_int_axis=True, min_axis=-1, max_axis=-1, min_dim_size=2, max_dim_size=100, min_num_dims=2, ), ) def test_jax_array_sort( dtype_x_axis, on_device, frontend, frontend_method_data, backend_fw, init_flags, method_flags, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( backend_to_test=backend_fw, init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="squeeze", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="shape"), ), axis=_squeeze_helper(), ) def test_jax_array_squeeze( dtype_and_x, axis, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, on_device=on_device, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="std", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid") ), ddof=st.booleans(), keepdims=st.booleans(), ) def test_jax_array_std( dtype_x_axis, backend_fw, frontend, ddof, keepdims, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, init_all_as_kwargs_np={ "object": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "ddof": ddof, "keepdims": keepdims, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # var @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="var", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=24, small_abs_safety_factor=24, safety_factor_scale="log", force_int_axis=True, min_num_dims=1, valid_axis=True, ), ddof=st.booleans(), keepdims=st.booleans(), ) def test_jax_array_var( dtype_and_x, keepdims, on_device, frontend, ddof, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, "ddof": ddof, # You can adjust the ddof value as needed "keepdims": keepdims, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, rtol_=1e-3, atol_=1e-3, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_jax_ivy_array( dtype_x, backend_fw, ): _, data = dtype_x with BackendHandler.update_backend(backend_fw) as ivy_backend: jax_frontend = ivy_backend.utils.dynamic_import.import_module( "ivy.functional.frontends.jax" ) x = jax_frontend.Array(data[0]) ret = helpers.flatten_and_to_np(ret=x.ivy_array.data, backend=backend_fw) ret_gt = helpers.flatten_and_to_np(ret=data[0], backend=backend_fw) helpers.value_test( ret_np_flat=ret, ret_np_from_gt_flat=ret_gt, backend=backend_fw, ground_truth_backend="jax", ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="prod", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), force_int_axis=True, valid_axis=True, min_dim_size=2, max_dim_size=10, min_num_dims=2, ), ) def test_jax_prod( dtype_x_axis, on_device, frontend, frontend_method_data, init_flags, method_flags, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, atol_=1e-04, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="repeat", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"), ), axis=st.shared( st.one_of( st.none(), helpers.get_axis( shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"), max_size=1, ), ), key="axis", ), repeat=st.one_of(st.integers(1, 10), _repeat_helper()), # test_with_out=st.just(False), ) def test_jax_repeat( *, dtype_value, axis, repeat, on_device, frontend, frontend_method_data, backend_fw, init_flags, method_flags, ): input_dtype, x = dtype_value if not isinstance(repeat, int): repeat_dtype, repeat_list = repeat repeat = repeat_list[0] input_dtype += repeat_dtype # Skip the test if the backend is torch and the input data type is 'Float' or 'bool' if backend_fw == "torch" and ("float" in input_dtype or "bool" in input_dtype): return if backend_fw == "jax": return if backend_fw == "paddle" and "bool" in input_dtype: return if not isinstance(axis, int) and axis is not None: axis = axis[0] helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=[input_dtype[0]], method_all_as_kwargs_np={"repeats": repeat, "axis": axis}, frontend=frontend, backend_to_test=backend_fw, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="sum", dtype_and_x=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10, valid_axis=True, force_int_axis=True, ), ) def test_jax_sum( dtype_and_x, on_device, frontend, frontend_method_data, backend_fw, init_flags, method_flags, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( backend_to_test=backend_fw, init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "object": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "axis": axis, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, atol_=1e-04, ) # swapaxes @handle_frontend_method( class_tree=CLASS_TREE, init_tree="jax.numpy.array", method_name="swapaxes", dtype_x_axis=dtype_x_axis(), ) def test_jax_swapaxes( dtype_x_axis, frontend, frontend_method_data, backend_fw, init_flags, method_flags, on_device, ): input_dtypes, x, axis1, axis2 = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtypes, backend_to_test=backend_fw, method_input_dtypes=input_dtypes, init_all_as_kwargs_np={ "object": x[0], }, method_all_as_kwargs_np={ "axis1": axis1, "axis2": axis2, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, )
ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_array.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_array.py", "repo_id": "ivy", "token_count": 36636 }
54
# global import sys import numpy as np from hypothesis import strategies as st, assume # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import ( assert_all_close, handle_frontend_test, BackendHandler, ) from ivy_tests.test_ivy.test_functional.test_core.test_linalg import ( _get_dtype_and_matrix, _matrix_rank_helper, ) from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import ( matrix_is_stable, ) # --- Helpers --- # # --------------- # # tensorinv @st.composite def _get_inv_square_matrices(draw): dim_size = draw(helpers.ints(min_value=1, max_value=9)) batch_shape = draw(st.sampled_from([2, 4, 6, 8])) generated_shape = (dim_size,) * batch_shape generated_ind = int(np.floor(len(generated_shape) / 2)) handpicked_shape, handpicked_ind = draw( st.sampled_from([[(24, 6, 4), 1], [(8, 3, 6, 4), 2], [(6, 7, 8, 16, 21), 3]]) ) shape, ind = draw( st.sampled_from( [(generated_shape, generated_ind), (handpicked_shape, handpicked_ind)] ) ) input_dtype = draw( helpers.get_dtypes("float", index=1, full=False).filter( lambda x: x not in ["float16", "bfloat16"] ) ) invertible = False while not invertible: a = draw( helpers.array_values( dtype=input_dtype[0], shape=shape, large_abs_safety_factor=24, small_abs_safety_factor=24, safety_factor_scale="log", ).filter(lambda x: helpers.matrix_is_stable(x)) ) try: np.linalg.tensorinv(a, ind) invertible = True except np.linalg.LinAlgError: pass return input_dtype, a, ind # tensorsolve @st.composite def _get_solve_matrices(draw): # batch_shape, random_size, shared # float16 causes a crash when filtering out matrices # for which `np.linalg.cond` is large. input_dtype_strategy = st.shared( st.sampled_from(draw(helpers.get_dtypes("float"))).filter( lambda x: "float16" not in x ), key="shared_dtype", ) input_dtype = draw(input_dtype_strategy) dim = draw(helpers.ints(min_value=2, max_value=5)) first_matrix = draw( helpers.array_values( dtype=input_dtype, shape=(dim, dim, dim, dim), min_value=1.2, max_value=5, ).filter(lambda x: np.linalg.det(x.reshape((dim**2, dim**2))) != 0) ) second_matrix = draw( helpers.array_values( dtype=input_dtype, shape=(dim, dim), min_value=1.2, max_value=3, ) ) return input_dtype, first_matrix, second_matrix @st.composite def norm_helper(draw): dtype, x = draw( helpers.dtype_and_values( shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, safety_factor_scale="log", large_abs_safety_factor=2, ) ) axis = draw( helpers.get_axis( shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ) ) if type(axis) in [tuple, list]: if len(axis) == 2: ord_param = draw( st.sampled_from(["fro", "nuc", 1, 2, -1, -2, np.inf, -np.inf]) ) else: axis = axis[0] ord_param = draw(st.sampled_from([0, 1, 2, -1, -2, np.inf, -np.inf])) else: ord_param = draw(st.sampled_from([0, 1, 2, -1, -2, np.inf, -np.inf])) keepdims = draw(st.booleans()) return dtype, x, ord_param, axis, keepdims # --- Main --- # # ------------ # # cholesky @handle_frontend_test( fn_tree="jax.numpy.linalg.cholesky", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(x[1][0]) != 0 ), test_with_out=st.just(False), ) def test_jax_cholesky( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) # make symmetric positive-definite x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-02, a=x, ) @handle_frontend_test( fn_tree="jax.numpy.linalg.cond", dtype_x_p=helpers.cond_data_gen_helper(), test_with_out=st.just(False), ) def test_jax_cond( *, dtype_x_p, test_flags, on_device, fn_tree, frontend, backend_fw, ): dtype, x = dtype_x_p helpers.test_frontend_function( input_dtypes=dtype, test_flags=test_flags, rtol=1e-01, atol=1e-01, frontend=frontend, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], p=x[1], ) # det @handle_frontend_test( fn_tree="jax.numpy.linalg.det", dtype_and_x=_get_dtype_and_matrix(), test_with_out=st.just(False), ) def test_jax_det( *, dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-04, atol=1e-04, a=x[0], ) # eig @handle_frontend_test( fn_tree="jax.numpy.linalg.eig", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), test_with_out=st.just(False), ) def test_jax_eig( *, dtype_and_x, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtype, x = dtype_and_x x = np.array(x[0], dtype=dtype[0]) """Make symmetric positive-definite since ivy does not support complex data dtypes currently.""" x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, a=x, ) with BackendHandler.update_backend(backend_fw) as ivy_backend: ret = [ivy_backend.to_numpy(x).astype(np.float64) for x in ret] frontend_ret = [x.astype(np.float64) for x in frontend_ret] L, Q = ret frontend_L, frontend_Q = frontend_ret assert_all_close( ret_np=Q @ np.diag(L) @ Q.T, ret_from_gt_np=frontend_Q @ np.diag(frontend_L) @ frontend_Q.T, atol=1e-02, backend=backend_fw, ground_truth_backend=frontend, ) # eigh @handle_frontend_test( fn_tree="jax.numpy.linalg.eigh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), UPLO=st.sampled_from(("L", "U")), symmetrize_input=st.booleans(), test_with_out=st.just(False), ) def test_jax_eigh( *, dtype_and_x, UPLO, symmetrize_input, backend_fw, on_device, fn_tree, frontend, test_flags, ): dtype, x = dtype_and_x x = np.array(x[0], dtype=dtype[0]) # make symmetric positive-definite beforehand x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, a=x, UPLO=UPLO, symmetrize_input=symmetrize_input, ) with BackendHandler.update_backend(backend_fw) as ivy_backend: ret = [ivy_backend.to_numpy(x) for x in ret] frontend_ret = [np.asarray(x) for x in frontend_ret] L, Q = ret frontend_L, frontend_Q = frontend_ret assert_all_close( ret_np=Q @ np.diag(L) @ Q.T, ret_from_gt_np=frontend_Q @ np.diag(frontend_L) @ frontend_Q.T, atol=1e-02, backend=backend_fw, ground_truth_backend=frontend, ) # eigvals @handle_frontend_test( fn_tree="jax.numpy.linalg.eigvals", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), ) def test_jax_eigvals( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtypes, x = dtype_and_x x = np.array(x[0], dtype=dtypes[0]) # make symmetric positive-definite beforehand x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, a=x, ) with BackendHandler.update_backend(backend_fw) as ivy_backend: # Calculate the magnitude of the complex numbers then sort them for testing ret = np.sort(np.abs(ivy_backend.to_numpy(ret))).astype(np.float64) frontend_ret = np.sort(np.abs(frontend_ret)).astype(np.float64) assert_all_close( ret_np=ret, ret_from_gt_np=frontend_ret, backend=backend_fw, ground_truth_backend=frontend, atol=1e-2, rtol=1e-2, ) # eigvalsh @handle_frontend_test( fn_tree="jax.numpy.linalg.eigvalsh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), UPLO=st.sampled_from(("L", "U")), test_with_out=st.just(False), ) def test_jax_eigvalsh( *, dtype_and_x, UPLO, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) # make symmetric positive-definite beforehand x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-02, atol=1e-02, a=x, UPLO=UPLO, ) # inv @handle_frontend_test( fn_tree="jax.numpy.linalg.inv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-100, max_value=100, shape=helpers.ints(min_value=1, max_value=10).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), test_with_out=st.just(False), ) def test_jax_inv( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, rtol=1e-01, atol=1e-01, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], ) # least squares @handle_frontend_test( fn_tree="jax.numpy.linalg.lstsq", dtype_and_a=helpers.get_first_solve_matrix(adjoint=True), dtype_and_b=helpers.get_second_solve_matrix(), test_with_out=st.just(False), ) def test_jax_lstsq( *, dtype_and_a, dtype_and_b, on_device, fn_tree, frontend, test_flags, backend_fw, ): a_dtype, a, _ = dtype_and_a b_dtype, b = dtype_and_b helpers.test_frontend_function( input_dtypes=[a_dtype, b_dtype], rtol=1e-01, atol=1e-01, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, a=a, b=b, test_values=False, ) # matrix_power @handle_frontend_test( fn_tree="jax.numpy.linalg.matrix_power", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-100, max_value=100, shape=helpers.ints(min_value=1, max_value=10).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), n=helpers.ints(min_value=1, max_value=8), test_with_out=st.just(False), ) def test_jax_matrix_power( *, dtype_and_x, n, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, rtol=1e-01, atol=1e-01, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, a=np.asarray(x[0], dtype=dtype[0]), n=n, ) # matrix_rank @handle_frontend_test( fn_tree="jax.numpy.linalg.matrix_rank", dtype_x_hermitian_atol_rtol=_matrix_rank_helper(), test_with_out=st.just(False), ) def test_jax_matrix_rank( *, dtype_x_hermitian_atol_rtol, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol assume(matrix_is_stable(x, cond_limit=10)) helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, M=x, tol=atol, ) # multi_dot @handle_frontend_test( fn_tree="jax.numpy.linalg.multi_dot", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), num_arrays=2, shared_dtype=True, ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), test_with_out=st.just(False), ) def test_jax_multi_dot( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, arrays=(x[0], x[1]), backend_to_test=backend_fw, ) # norm @handle_frontend_test( fn_tree="jax.numpy.linalg.norm", dtype_values_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=3, min_dim_size=2, max_dim_size=5, min_axis=-2, max_axis=1, large_abs_safety_factor=24, small_abs_safety_factor=24, safety_factor_scale="log", ), keepdims=st.booleans(), ord=st.sampled_from([None, np.inf, -np.inf, 1, -1, 2, -2]), test_with_out=st.just(False), ) def test_jax_norm( dtype_values_axis, keepdims, ord, frontend, test_flags, fn_tree, on_device, backend_fw, ): dtype, x, axis = dtype_values_axis if len(np.shape(x)) == 1: axis = None helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], ord=ord, axis=axis, keepdims=keepdims, atol=1e-1, rtol=1e-1, ) # pinv @handle_frontend_test( fn_tree="jax.numpy.linalg.pinv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=5, large_abs_safety_factor=4, small_abs_safety_factor=4, safety_factor_scale="log", ), test_with_out=st.just(False), rcond=st.floats(1e-5, 1e-3), ) def test_jax_pinv( dtype_and_x, frontend, fn_tree, test_flags, backend_fw, rcond, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, a=x[0], rcond=rcond, atol=1e-1, rtol=1e-1, ) # qr @handle_frontend_test( fn_tree="jax.numpy.linalg.qr", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=3, max_num_dims=5, min_dim_size=2, max_dim_size=5, min_value=2, max_value=5, ), mode=st.sampled_from(("reduced", "complete")), test_with_out=st.just(False), ) def test_jax_qr( *, dtype_and_x, mode, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_and_x ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, test_values=False, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=np.asarray(x[0], dtype[0]), mode=mode, ) with BackendHandler.update_backend(backend_fw) as ivy_backend: ret = [ivy_backend.to_numpy(x).astype(np.float64) for x in ret] frontend_ret = [x.astype(np.float64) for x in frontend_ret] Q, R = ret frontend_Q, frontend_R = frontend_ret assert_all_close( ret_np=Q @ R, ret_from_gt_np=frontend_Q @ frontend_R, atol=1e-02, backend=backend_fw, ground_truth_backend=frontend, ) # slogdet @handle_frontend_test( fn_tree="jax.numpy.linalg.slogdet", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), max_value=100, min_value=-100, shape=st.tuples( st.shared(st.integers(1, 5), key="sq"), st.shared(st.integers(1, 5), key="sq"), ), num_arrays=1, ), test_with_out=st.just(False), ) def test_jax_slogdet( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, atol=1e-4, rtol=1e-4, a=x[0], ) # solve @handle_frontend_test( fn_tree="jax.numpy.linalg.solve", x=helpers.get_first_solve_batch_matrix(), y=helpers.get_second_solve_batch_matrix(), test_with_out=st.just(False), ) def test_jax_solve( *, x, y, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype1, x1, _ = x input_dtype2, x2, _ = y helpers.test_frontend_function( input_dtypes=[input_dtype1, input_dtype2], frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, rtol=1e-4, atol=1e-4, a=x1, b=x2, ) # svd @handle_frontend_test( fn_tree="jax.numpy.linalg.svd", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ).filter( lambda x: "float16" not in x[0] and "bfloat16" not in x[0] and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon and np.linalg.det(np.asarray(x[1][0])) != 0 ), full_matrices=st.booleans(), compute_uv=st.booleans(), test_with_out=st.just(False), ) def test_jax_svd( *, dtype_and_x, full_matrices, compute_uv, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) # make symmetric positive-definite beforehand x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, x=x, full_matrices=full_matrices, compute_uv=compute_uv, ) if compute_uv: with BackendHandler.update_backend(backend_fw) as ivy_backend: ret = [ivy_backend.to_numpy(x) for x in ret] frontend_ret = [np.asarray(x) for x in frontend_ret] u, s, vh = ret frontend_u, frontend_s, frontend_vh = frontend_ret assert_all_close( ret_np=u @ np.diag(s) @ vh, ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh, rtol=1e-2, atol=1e-2, backend=backend_fw, ground_truth_backend=frontend, ) else: with BackendHandler.update_backend(backend_fw) as ivy_backend: ret = ivy_backend.to_numpy(ret) assert_all_close( ret_np=ret, ret_from_gt_np=np.asarray(frontend_ret[0]), rtol=1e-2, atol=1e-2, backend=backend_fw, ground_truth_backend=frontend, ) @handle_frontend_test( fn_tree="jax.numpy.linalg.tensorinv", params=_get_inv_square_matrices() ) def test_jax_tensorinv( *, params, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x, ind = params helpers.test_frontend_function( input_dtypes=dtype, rtol=1e-01, atol=1e-01, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, a=x, ind=ind, ) @handle_frontend_test( fn_tree="jax.numpy.linalg.tensorsolve", a_and_b=_get_solve_matrices(), test_with_out=st.just(False), ) def test_jax_tensorsolve( *, a_and_b, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x, y = a_and_b helpers.test_frontend_function( input_dtypes=[input_dtype], frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, a=x, b=y, atol=1e-2, rtol=1e-2, )
ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_linalg.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_linalg.py", "repo_id": "ivy", "token_count": 13223 }
55
# global import numpy as np from hypothesis import strategies as st # local import ivy import ivy_tests.test_ivy.helpers as helpers import ivy.functional.frontends.numpy as np_frontend from ivy_tests.test_ivy.helpers.pipeline_helper import ( BackendHandler, get_frontend_config, ) import ivy_tests.test_ivy.helpers.globals as test_globals # --- Helpers --- # # --------------- # @st.composite def _array_and_axes_permute_helper( draw, *, min_num_dims, max_num_dims, min_dim_size, max_dim_size, allow_none=False, ): """Return array, its dtype and either the random permutation of its axes or None. Parameters ---------- draw special function that draws data randomly (but is reproducible) from a given data-set (ex. list). min_num_dims minimum number of array dimensions max_num_dims maximum number of array dimensions min_dim_size minimum size of the dimension max_dim_size maximum size of the dimension Returns ------- A strategy that draws an array, its dtype and axes (or None). """ shape = draw( helpers.get_shape( allow_none=allow_none, min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ) ) dtype = draw(helpers.array_dtypes(num_arrays=1)) array = draw(helpers.array_values(dtype=dtype[0], shape=shape)) axes = draw( st.one_of( st.none(), helpers.get_axis( shape=shape, allow_neg=False, allow_none=False, sort_values=False, unique=True, min_size=len(shape), max_size=len(shape), force_tuple=True, force_int=False, ), ).filter(lambda x: x != tuple(range(len(shape)))) ) return array, dtype, axes def _flatten_frontend_return(*, ret, backend): """Flattening the returned frontend value to a list of numpy arrays.""" with BackendHandler.update_backend(backend) as ivy_backend: if not isinstance(ret, tuple): if not ivy_backend.is_ivy_array(ret): ret_np_flat = helpers.flatten_frontend_to_np(backend=backend, ret=ret) else: ret_np_flat = _flatten_fw_return(ret=ret, backend=backend) else: if any(not ivy_backend.is_ivy_array(x) for x in ret): ret_np_flat = helpers.flatten_frontend_to_np(backend=backend, ret=ret) else: ret_np_flat = _flatten_fw_return(ret=ret, backend=backend) return ret_np_flat def _flatten_fw_return(ret, backend): with BackendHandler.update_backend(backend) as ivy_backend: if not isinstance(ret, tuple): ret = (ret,) ret_idxs = ivy_backend.nested_argwhere( ret, lambda x: ivy_backend.is_ivy_array(x) or ivy_backend.is_native_array(x) ) if len(ret_idxs) == 0: ret_idxs = ivy_backend.nested_argwhere(ret, ivy_backend.isscalar) ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) ret_flat = [ ivy_backend.asarray( x, dtype=ivy_backend.Dtype(str(np.asarray(x).dtype)) ) for x in ret_flat ] else: ret_flat = ivy_backend.multi_index_nest(ret, ret_idxs) # convert the return to NumPy ret_np_flat = [ivy_backend.to_numpy(x) for x in ret_flat] return ret_np_flat @st.composite def _get_dtype_input_and_vectors(draw): dim_size = draw(helpers.ints(min_value=1, max_value=5)) dtype = draw(helpers.get_dtypes("float", index=1, full=False)) if dim_size == 1: vec1 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5 ) ) vec2 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5 ) ) else: vec1 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5 ) ) vec2 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5 ) ) return dtype, vec1, vec2 # Casting helper @st.composite def _get_safe_casting_dtype(draw, *, dtypes): target_dtype = dtypes[0] for dtype in dtypes[1:]: if np_frontend.can_cast(target_dtype, dtype, casting="safe"): target_dtype = dtype with BackendHandler.update_backend(test_globals.CURRENT_BACKEND) as ivy_backend: if ivy_backend.is_float_dtype(target_dtype): dtype = draw(st.sampled_from(["float64", None])) elif ivy_backend.is_uint_dtype(target_dtype): dtype = draw(st.sampled_from(["uint64", None])) elif ivy_backend.is_int_dtype(target_dtype): dtype = draw(st.sampled_from(["int64", None])) elif ivy_backend.is_complex_dtype(target_dtype): dtype = draw(st.sampled_from(["complex128", None])) else: dtype = draw(st.sampled_from(["bool", None])) # filter uint64 as not supported by torch backend if dtype == "uint64": dtype = None return dtype # noinspection PyShadowingNames def _test_frontend_function_ignoring_uninitialized(*args, **kwargs): # TODO: this is a hack to get around, but not sure if it is efficient way to do it. where = kwargs["where"] if kwargs["frontend"] == "numpy": kwargs["where"] = True else: kwargs["where"] = None kwargs["test_values"] = False values = helpers.test_frontend_function(*args, **kwargs) if values is None: return ret, frontend_ret = values # set backend to frontend to flatten the frontend array frontend_config = get_frontend_config(kwargs["frontend"]) # get flattened arrays from returned value if frontend_config.isscalar(frontend_ret): frontend_ret_np_flat = [np.asarray(frontend_ret)] else: if not isinstance(frontend_ret, tuple): frontend_ret = (frontend_ret,) frontend_ret_idxs = ivy.nested_argwhere( frontend_ret, frontend_config.is_native_array ) frontend_ret_flat = ivy.multi_index_nest(frontend_ret, frontend_ret_idxs) frontend_ret_np_flat = [frontend_config.to_numpy(x) for x in frontend_ret_flat] # get flattened arrays from returned value ret_np_flat = _flatten_frontend_return(ret=ret, backend=kwargs["backend_to_test"]) # handling where size where = np.asarray(where) if where.ndim == 0: where = np.array([where]) elif where.ndim > 1: where = where.flatten() # handling ret size first_el = ret_np_flat[0] # change where to match the shape of the first element of ret_np_flat if first_el.size == 1: where = where[:1] else: where = np.repeat(where, first_el.size) where = where[: first_el.size] where = where.reshape(first_el.shape) ret_flat = [np.where(where, x, np.zeros_like(x)) for x in ret_np_flat] frontend_ret_flat = [ np.where(where, x, np.zeros_like(x)) for x in frontend_ret_np_flat ] if "rtol" in kwargs: rtol = kwargs["rtol"] else: rtol = 1e-4 if "atol" in kwargs: atol = kwargs["atol"] else: atol = 1e-6 helpers.value_test( ret_np_flat=ret_flat, ret_np_from_gt_flat=frontend_ret_flat, rtol=rtol, atol=atol, backend=kwargs["backend_to_test"], ground_truth_backend=kwargs["frontend"], ) @st.composite def dtypes_values_casting_dtype( draw, *, arr_func, get_dtypes_none=True, special=False, ): dtypes, values = [], [] casting = draw(st.sampled_from(["no", "equiv", "safe", "same_kind", "unsafe"])) for func in arr_func: typ, val = draw(func()) dtypes += typ if isinstance(typ, list) else [typ] values += val if isinstance(val, list) else [val] if casting in ["no", "equiv"] and len(dtypes) > 0: dtypes = [dtypes[0]] * len(dtypes) if special: dtype = draw(st.sampled_from(["bool", None])) elif get_dtypes_none: dtype = draw(st.sampled_from([None])) elif casting in ["no", "equiv"]: dtype = draw(st.just(None)) elif casting in ["safe", "same_kind"]: dtype = draw(_get_safe_casting_dtype(dtypes=dtypes)) else: dtype = draw(st.sampled_from([None])) return dtypes, values, casting, dtype # ufunc num_positional_args helper @st.composite def get_num_positional_args_ufunc(draw, *, fn_name=None): """Draws data randomly from numbers between nin and nargs where nin and nargs are properties of the given ufunc. Parameters ---------- draw special function that draws data randomly (but is reproducible) from a given data-set (ex. list). fn_name name of the ufunc. Returns ------- A strategy that can be used in the @given hypothesis decorator. """ func = getattr(np_frontend, fn_name) nin = func.nin nargs = func.nargs return draw(st.integers(min_value=nin, max_value=nargs)) @st.composite def where(draw, *, shape=None): if shape is None: _, values = draw(helpers.dtype_and_values(dtype=["bool"])) else: _, values = draw(helpers.dtype_and_values(dtype=["bool"], shape=shape)) return draw(st.just(values) | st.just(True)) # --- Main --- # # ------------ # # noinspection PyShadowingNames def handle_where_and_array_bools(where, input_dtype, test_flags): if isinstance(where, (list, tuple)): where = where[0] test_flags.as_variable += [False] test_flags.native_arrays += [False] input_dtype += ["bool"] return where, input_dtype, test_flags return where, input_dtype, test_flags # noinspection PyShadowingNames def test_frontend_function(*args, where=None, **kwargs): if not ivy.exists(where): helpers.test_frontend_function(*args, **kwargs) else: kwargs["where"] = where if "out" in kwargs and kwargs["out"] is None: _test_frontend_function_ignoring_uninitialized(*args, **kwargs) return else: helpers.test_frontend_function(*args, **kwargs)
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/helpers.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/helpers.py", "repo_id": "ivy", "token_count": 5004 }
56
# global from hypothesis import given, strategies as st import platform # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy.functional.frontends.numpy.func_wrapper import ( inputs_to_ivy_arrays, outputs_to_frontend_arrays, to_ivy_arrays_and_back, handle_numpy_dtype, from_zero_dim_arrays_to_scalar, ) from ivy.functional.frontends.numpy.ndarray import ndarray import ivy.functional.frontends.numpy as np_frontend # --- Helpers --- # # --------------- # @st.composite def _dtype_helper(draw): return draw( st.sampled_from( [ draw(st.sampled_from([int, float, bool])), ivy.as_native_dtype( draw(helpers.get_dtypes("valid", full=False, prune_function=False))[ 0 ] ), np_frontend.dtype( draw(helpers.get_dtypes("valid", full=False, prune_function=False))[ 0 ] ), draw(st.sampled_from(list(np_frontend.numpy_scalar_to_dtype.keys()))), draw(st.sampled_from(list(np_frontend.numpy_str_to_type_table.keys()))), ] ) ) def _fn(*args, check_default=False, dtype=None): if ( check_default and any(not (ivy.is_array(i) or hasattr(i, "ivy_array")) for i in args) and not ivy.exists(dtype) ): ivy.utils.assertions.check_equal( ivy.default_float_dtype(), "float64", as_array=False ) if platform.system() != "Windows": ivy.utils.assertions.check_equal( ivy.default_int_dtype(), "int64", as_array=False ) else: ivy.utils.assertions.check_equal( ivy.default_int_dtype(), "int32", as_array=False ) if not ivy.exists(args[0]): return dtype return args[0] def _zero_dim_to_scalar_checks(x, ret_x): if len(x.shape) > 0: assert ivy.all(ivy.array(ret_x) == ivy.array(x)) else: assert issubclass(type(ret_x), np_frontend.generic) assert ret_x.ivy_array == ivy.array(x) @st.composite def _zero_dim_to_scalar_helper(draw): dtype = draw( helpers.get_dtypes("valid", prune_function=False, full=False).filter( lambda x: "bfloat16" not in x ) )[0] shape = draw(helpers.get_shape()) return draw( st.one_of( helpers.array_values(shape=shape, dtype=dtype), st.lists(helpers.array_values(shape=shape, dtype=dtype), min_size=1).map( tuple ), ) ) # --- Main --- # # ------------ # @given( dtype=_dtype_helper(), ) def test_handle_numpy_dtype(dtype, backend_fw): ivy.set_backend(backend_fw) ret_dtype = handle_numpy_dtype(_fn)(None, dtype=dtype) assert isinstance(ret_dtype, ivy.Dtype) ivy.previous_backend() @given(x=_zero_dim_to_scalar_helper()) def test_numpy_from_zero_dim_arrays_to_scalar(x, backend_fw): ivy.set_backend(backend_fw) ret_x = from_zero_dim_arrays_to_scalar(_fn)(x) if isinstance(x, tuple): assert isinstance(ret_x, tuple) for x_i, ret_x_i in zip(x, ret_x): _zero_dim_to_scalar_checks(x_i, ret_x_i) else: _zero_dim_to_scalar_checks(x, ret_x) ivy.previous_backend() @given( dtype_x_shape=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ret_shape=True, ), ) def test_numpy_inputs_to_ivy_arrays(dtype_x_shape, backend_fw): ivy.set_backend(backend_fw) x_dtype, x, shape = dtype_x_shape # check for ivy array input_ivy = ivy.array(x[0], dtype=x_dtype[0]) output = inputs_to_ivy_arrays(_fn)(input_ivy) assert isinstance(output, ivy.Array) assert input_ivy.dtype == output.dtype assert ivy.all(input_ivy == output) # check for native array input_native = ivy.native_array(input_ivy) output = inputs_to_ivy_arrays(_fn)(input_native) assert isinstance(output, ivy.Array) assert ivy.as_ivy_dtype(input_native.dtype) == str(output.dtype) assert ivy.all(input_native == output.data) # check for frontend array input_frontend = ndarray(shape) input_frontend.ivy_array = input_ivy output = inputs_to_ivy_arrays(_fn)(input_frontend) assert isinstance(output, ivy.Array) assert input_frontend.ivy_array.dtype == str(output.dtype) assert ivy.all(input_frontend.ivy_array == output) ivy.previous_backend() @given( dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), dtype=helpers.get_dtypes("valid", none=True, full=False, prune_function=False), ) def test_numpy_outputs_to_frontend_arrays(dtype_and_x, dtype, backend_fw): ivy.set_backend(backend_fw) x_dtype, x = dtype_and_x # check for ivy array input_ivy = ivy.array(x[0], dtype=x_dtype[0]) if not len(input_ivy.shape): scalar_input_ivy = ivy.to_scalar(input_ivy) outputs_to_frontend_arrays(_fn)( scalar_input_ivy, scalar_input_ivy, check_default=True, dtype=dtype ) outputs_to_frontend_arrays(_fn)( scalar_input_ivy, input_ivy, check_default=True, dtype=dtype ) output = outputs_to_frontend_arrays(_fn)(input_ivy, check_default=True, dtype=dtype) assert isinstance(output, ndarray) assert input_ivy.dtype == output.ivy_array.dtype assert ivy.all(input_ivy == output.ivy_array) assert ivy.default_float_dtype_stack == ivy.default_int_dtype_stack == [] ivy.previous_backend() @given( dtype_x_shape=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ret_shape=True, ), dtype=helpers.get_dtypes("valid", none=True, full=False, prune_function=False), ) def test_numpy_to_ivy_arrays_and_back(dtype_x_shape, dtype, backend_fw): ivy.set_backend(backend_fw) x_dtype, x, shape = dtype_x_shape # check for ivy array input_ivy = ivy.array(x[0], dtype=x_dtype[0]) if not len(input_ivy.shape): scalar_input_ivy = ivy.to_scalar(input_ivy) to_ivy_arrays_and_back(_fn)( scalar_input_ivy, scalar_input_ivy, check_default=True, dtype=dtype ) to_ivy_arrays_and_back(_fn)( scalar_input_ivy, input_ivy, check_default=True, dtype=dtype ) output = to_ivy_arrays_and_back(_fn)(input_ivy, check_default=True, dtype=dtype) assert isinstance(output, ndarray) assert input_ivy.dtype == output.ivy_array.dtype assert ivy.all(input_ivy == output.ivy_array) # check for native array input_native = ivy.native_array(input_ivy) if not len(input_native.shape): scalar_input_native = ivy.to_scalar(input_native) to_ivy_arrays_and_back(_fn)( scalar_input_native, scalar_input_native, check_default=True, dtype=dtype ) to_ivy_arrays_and_back(_fn)( scalar_input_native, input_native, check_default=True, dtype=dtype ) output = to_ivy_arrays_and_back(_fn)(input_native, check_default=True, dtype=dtype) assert isinstance(output, ndarray) assert ivy.as_ivy_dtype(input_native.dtype) == output.ivy_array.dtype assert ivy.all(input_native == output.ivy_array.data) # check for frontend array input_frontend = ndarray(shape) input_frontend.ivy_array = input_ivy if not len(input_frontend.shape): scalar_input_front = inputs_to_ivy_arrays(ivy.to_scalar)(input_frontend) to_ivy_arrays_and_back(_fn)( scalar_input_front, scalar_input_front, check_default=True, dtype=dtype ) to_ivy_arrays_and_back(_fn)( scalar_input_front, input_frontend, check_default=True, dtype=dtype ) output = to_ivy_arrays_and_back(_fn)( input_frontend, check_default=True, dtype=dtype ) assert isinstance(output, ndarray) assert input_frontend.ivy_array.dtype == output.ivy_array.dtype assert ivy.all(input_frontend.ivy_array == output.ivy_array) assert ivy.default_float_dtype_stack == ivy.default_int_dtype_stack == [] ivy.previous_backend()
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_func_wrapper.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_func_wrapper.py", "repo_id": "ivy", "token_count": 3974 }
57
# global import numpy as np # local import ivy_tests.test_ivy.helpers as helpers import ivy_tests.test_ivy.test_frontends.test_numpy.helpers as np_frontend_helpers from ivy_tests.test_ivy.helpers import handle_frontend_test @handle_frontend_test( fn_tree="numpy.isfinite", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, ) ], special=True, ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="isfinite" ), ) def test_numpy_isfinite( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, ) @handle_frontend_test( fn_tree="numpy.isinf", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, ) ], special=True, ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="isinf" ), ) def test_numpy_isinf( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, ) @handle_frontend_test( fn_tree="numpy.isnan", dtypes_values_casting=np_frontend_helpers.dtypes_values_casting_dtype( arr_func=[ lambda: helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-np.inf, max_value=np.inf, allow_nan=True, ) ], special=True, ), where=np_frontend_helpers.where(), number_positional_args=np_frontend_helpers.get_num_positional_args_ufunc( fn_name="isnan" ), ) def test_numpy_isnan( dtypes_values_casting, where, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtypes, x, casting, dtype = dtypes_values_casting where, input_dtypes, test_flags = np_frontend_helpers.handle_where_and_array_bools( where=where, input_dtype=input_dtypes, test_flags=test_flags, ) np_frontend_helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], out=None, where=where, casting=casting, order="K", dtype=dtype, subok=True, )
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_array_type_testing.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_logic/test_array_type_testing.py", "repo_id": "ivy", "token_count": 2090 }
58
# local import numpy as np import ivy_tests.test_ivy.helpers as helpers from hypothesis import strategies as st from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_functional.test_core.test_dtype import dtypes_shared # --- Helpers --- # # --------------- # # squeeze @st.composite def _squeeze_helper(draw): shape = draw(st.shared(helpers.get_shape(), key="value_shape")) valid_axes = [] for index, axis in enumerate(shape): if axis == 1: valid_axes.append(index) valid_axes.insert(0, None) return draw(st.sampled_from(valid_axes)) # broadcast_arrays @st.composite def broadcastable_arrays(draw, dtypes): num_arrays = st.shared(helpers.ints(min_value=2, max_value=5), key="num_arrays") shapes = draw(num_arrays.flatmap(helpers.mutually_broadcastable_shapes)) dtypes = draw(dtypes) arrays = [] for c, (shape, dtype) in enumerate(zip(shapes, dtypes), 1): x = draw(helpers.array_values(dtype=dtype, shape=shape), label=f"x{c}").tolist() arrays.append(x) return arrays # --- Main --- # # ------------ # # atleast_1d @handle_frontend_test( fn_tree="numpy.atleast_1d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=helpers.ints(min_value=1, max_value=10), ), test_with_out=st.just(False), ) def test_numpy_atleast_1d( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, arrays = dtype_and_x arys = {} for i, (array, idtype) in enumerate(zip(arrays, input_dtype)): arys[f"arrs{i}"] = np.asarray(array, dtype=idtype) test_flags.num_positional_args = len(arys) helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, **arys, ) # atleast_2d @handle_frontend_test( fn_tree="numpy.atleast_2d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=helpers.ints(min_value=1, max_value=10), ), test_with_out=st.just(False), ) def test_numpy_atleast_2d( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, arrays = dtype_and_x arys = {} for i, (array, idtype) in enumerate(zip(arrays, input_dtype)): arys[f"arrs{i}"] = np.asarray(array, dtype=idtype) test_flags.num_positional_args = len(arys) helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, **arys, ) # atleast_3d @handle_frontend_test( fn_tree="numpy.atleast_3d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=helpers.ints(min_value=1, max_value=10), ), test_with_out=st.just(False), ) def test_numpy_atleast_3d( *, dtype_and_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, arrays = dtype_and_x arys = {} for i, (array, idtype) in enumerate(zip(arrays, input_dtype)): arys[f"arrs{i}"] = np.asarray(array, dtype=idtype) test_flags.num_positional_args = len(arys) helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, **arys, ) @handle_frontend_test( fn_tree="numpy.broadcast_arrays", arrays=broadcastable_arrays(dtypes_shared("num_arrays")), input_dtypes=dtypes_shared("num_arrays"), test_with_out=st.just(False), ) def test_numpy_broadcast_arrays( *, arrays, input_dtypes, on_device, fn_tree, frontend, test_flags, backend_fw, ): args = {} for i, (array, dtype) in enumerate(zip(arrays, input_dtypes)): args[f"x{i}"] = np.asarray(array, dtype=dtype) test_flags.num_positional_args = len(args) helpers.test_frontend_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, test_flags=test_flags, frontend=frontend, fn_tree=fn_tree, on_device=on_device, **args, ) @handle_frontend_test( fn_tree="numpy.expand_dims", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="value_shape"), ), axis=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="value_shape"), min_size=1, max_size=1, force_int=True, ), ) def test_numpy_expand_dims( *, dtype_and_x, axis, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], axis=axis, ) @handle_frontend_test( fn_tree="numpy.squeeze", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="value_shape"), ), axis=_squeeze_helper(), ) def test_numpy_squeeze( *, dtype_and_x, axis, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, a=x[0], axis=axis, )
ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_changing_number_of_dimensions.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_numpy/test_manipulation_routines/test_changing_number_of_dimensions.py", "repo_id": "ivy", "token_count": 2926 }
59
# global import ivy from hypothesis import strategies as st, assume import numpy as np # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import assert_all_close from ivy_tests.test_ivy.helpers import handle_frontend_test, matrix_is_stable from ivy_tests.test_ivy.test_functional.test_core.test_linalg import ( _get_dtype_and_matrix, ) from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_linalg import ( _get_second_matrix, _get_cholesky_matrix, ) from ivy_tests.test_ivy.test_frontends.test_torch.test_blas_and_lapack_ops import ( _get_dtype_input_and_mat_vec, ) # --- Helpers --- # # --------------- # @st.composite def _dtype_values_axis(draw): dtype_and_values = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=5, min_value=0.1, max_value=1000.0, ) ) dtype, x = dtype_and_values x = x[0] r = len(x.shape) valid_axes = [None] for i in range(-r, r): valid_axes.append(i) for j in range(-r, r): if i != j and abs(i - j) != r: valid_axes.append([i, j]) axis = draw(st.sampled_from(valid_axes)) p_list = ["fro", 1, 2, ivy.inf, -ivy.inf] if isinstance(axis, list) and len(axis) == 2: p = draw( st.one_of( st.sampled_from(p_list), st.floats(min_value=1.0, max_value=10.0, allow_infinity=False), ) ) else: p = draw( st.one_of( st.sampled_from(p_list + [0]), st.floats(min_value=1.0, max_value=10.0, allow_infinity=False), ) ) return dtype, x, axis, p # cond @st.composite def _get_dtype_and_matrix_non_singular(draw, dtypes): while True: matrix = draw( helpers.dtype_and_values( available_dtypes=dtypes, min_value=-10, max_value=10, min_num_dims=2, max_num_dims=2, min_dim_size=1, max_dim_size=5, shape=st.tuples(st.integers(1, 5), st.integers(1, 5)).filter( lambda x: x[0] == x[1] ), allow_inf=False, allow_nan=False, ) ) if np.linalg.det(matrix[1][0]) != 0: break return matrix[0], matrix[1] @st.composite def _get_dtype_and_square_matrix(draw, real_and_complex_only=False): if real_and_complex_only: dtype = [ draw(st.sampled_from(["float32", "float64", "complex64", "complex128"])) ] else: dtype = draw(helpers.get_dtypes("valid")) dim_size = draw(helpers.ints(min_value=2, max_value=5)) mat = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=0, max_value=10 ) ) return dtype, mat @st.composite def _get_dtype_input_and_vectors(draw): dim_size = draw(helpers.ints(min_value=1, max_value=2)) dtype = draw(helpers.get_dtypes("float")) if dim_size == 1: vec1 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5 ) ) vec2 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size,), min_value=2, max_value=5 ) ) else: vec1 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5 ) ) vec2 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size, dim_size), min_value=2, max_value=5 ) ) return dtype, vec1, vec2 # cholesky_solve @st.composite def _get_paddle_cholesky_matrix(draw): input_dtype, spd_chol = draw(_get_cholesky_matrix()) probability = draw(st.floats(min_value=0, max_value=1)) if probability > 0.5: spd_chol = spd_chol.T # randomly transpose the matrix return input_dtype, spd_chol # transpose @st.composite def _transpose_helper(draw): dtype, x, shape = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, max_num_dims=4, min_dim_size=2, max_dim_size=3, ret_shape=True, ) ) perm = draw(st.permutations([i for i in range(len(shape))])) return dtype, x, perm # Helpers # # ------ # @st.composite def dtype_value1_value2_axis( draw, available_dtypes, abs_smallest_val=None, min_value=None, max_value=None, allow_inf=False, exclude_min=False, exclude_max=False, min_num_dims=1, max_num_dims=10, min_dim_size=1, max_dim_size=10, specific_dim_size=3, large_abs_safety_factor=4, small_abs_safety_factor=4, safety_factor_scale="log", ): # For cross product, a dim with size 3 is required shape = draw( helpers.get_shape( allow_none=False, min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ) ) axis = draw(helpers.ints(min_value=0, max_value=len(shape))) # make sure there is a dim with specific dim size shape = list(shape) shape = shape[:axis] + [specific_dim_size] + shape[axis:] shape = tuple(shape) dtype = draw(st.sampled_from(draw(available_dtypes))) values = [] for i in range(2): values.append( draw( helpers.array_values( dtype=dtype, shape=shape, abs_smallest_val=abs_smallest_val, min_value=min_value, max_value=max_value, allow_inf=allow_inf, exclude_min=exclude_min, exclude_max=exclude_max, large_abs_safety_factor=large_abs_safety_factor, small_abs_safety_factor=small_abs_safety_factor, safety_factor_scale=safety_factor_scale, ) ) ) value1, value2 = values[0], values[1] return [dtype], value1, value2, axis # --- Main --- # # ------------ # # bincount @handle_frontend_test( fn_tree="paddle.bincount", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), min_value=1, max_value=2, shape=st.shared( helpers.get_shape( min_num_dims=1, max_num_dims=1, ), key="a_s_d", ), ), test_with_out=st.just(False), ) def test_paddle_bincount( *, dtype_and_x, on_device, fn_tree, backend_fw, frontend, test_flags, ): input_dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], weights=None, minlength=0, ) # bmm @handle_frontend_test( fn_tree="paddle.bmm", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=(3, 3, 3), num_arrays=2, shared_dtype=True, min_value=-10, max_value=10, ), test_with_out=st.just(False), ) def test_paddle_bmm( *, dtype_x, frontend, test_flags, backend_fw, fn_tree, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # cholesky @handle_frontend_test( fn_tree="paddle.cholesky", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), upper=st.booleans(), ) def test_paddle_cholesky( dtype_and_x, upper, frontend, test_flags, fn_tree, backend_fw, on_device, ): dtype, x = dtype_and_x x = x[0] x = np.matmul(x.T, x) + np.identity(x.shape[0]) # make symmetric positive-definite helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x, upper=upper, ) @handle_frontend_test( fn_tree="paddle.linalg.cholesky_solve", x=_get_second_matrix(), y=_get_paddle_cholesky_matrix(), test_with_out=st.just(False), ) def test_paddle_cholesky_solve( *, x, y, frontend, backend_fw, test_flags, fn_tree, on_device, ): input_dtype1, x1 = x input_dtype2, x2 = y helpers.test_frontend_function( input_dtypes=[input_dtype1, input_dtype2], frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-3, atol=1e-3, x=x1, y=x2, upper=np.array_equal(x2, np.triu(x2)), # check whether the matrix is upper ) @handle_frontend_test( fn_tree="paddle.linalg.cond", dtype_and_x=_get_dtype_and_matrix_non_singular(dtypes=["float32", "float64"]), p=st.sampled_from([None, "fro", "nuc", np.inf, -np.inf, 1, -1, 2, -2]), test_with_out=st.just(False), ) def test_paddle_cond( *, dtype_and_x, p, on_device, fn_tree, frontend, test_flags, backend_fw ): dtype, x = dtype_and_x assume(matrix_is_stable(x[0])) helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, test_values=True, x=x[0], rtol=1e-5, atol=1e-5, p=p, ) # Tests # # ----- # # cross @handle_frontend_test( fn_tree="paddle.cross", dtype_x_y_axis=dtype_value1_value2_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, max_num_dims=5, min_dim_size=3, max_dim_size=3, min_value=-1e5, max_value=1e5, abs_smallest_val=0.01, safety_factor_scale="log", ), ) def test_paddle_cross( *, dtype_x_y_axis, frontend, test_flags, fn_tree, backend_fw, on_device, ): dtype, x, y, axis = dtype_x_y_axis helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x, y=y, axis=axis, ) @handle_frontend_test( fn_tree="paddle.dist", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True, min_value=-1e04, max_value=1e04, allow_inf=False, ), p=helpers.floats(min_value=1.0, max_value=10.0), ) def test_paddle_dist( *, dtype_and_input, p, on_device, fn_tree, backend_fw, frontend, test_flags, ): input_dtype, x = dtype_and_input helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], p=p, ) # dot @handle_frontend_test( fn_tree="paddle.dot", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_num_dims=1, max_num_dims=2, shared_dtype=True, ), test_with_out=st.just(False), ) def test_paddle_dot( *, dtype_x, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], ) # eig @handle_frontend_test( fn_tree="paddle.linalg.eig", dtype_and_input=_get_dtype_and_square_matrix(real_and_complex_only=True), test_with_out=st.just(False), ) def test_paddle_eig( *, dtype_and_input, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, x = dtype_and_input x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 if x.dtype == ivy.float32: x = x.astype("float64") input_dtype = [ivy.float64] ret, frontend_ret = helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, atol=1e-4, x=x, ) ret = [ivy.to_numpy(x).astype("float64") for x in ret] frontend_ret = [np.asarray(x, dtype=np.float64) for x in frontend_ret] l, v = ret # noqa: E741 front_l, front_v = frontend_ret assert_all_close( ret_np=v @ np.diag(l) @ v.T, ret_from_gt_np=front_v @ np.diag(front_l) @ front_v.T, rtol=1e-2, atol=1e-2, backend=backend_fw, ground_truth_backend=frontend, ) # eigh @handle_frontend_test( fn_tree="paddle.linalg.eigh", dtype_and_input=_get_dtype_and_square_matrix(real_and_complex_only=True), UPLO=st.sampled_from(("L", "U")), test_with_out=st.just(False), ) def test_paddle_eigh( *, dtype_and_input, UPLO, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, x = dtype_and_input x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 if x.dtype == ivy.float32: x = x.astype("float64") input_dtype = [ivy.float64] ret, frontend_ret = helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, atol=1e-4, x=x, UPLO=UPLO, ) ret = [ivy.to_numpy(x).astype("float64") for x in ret] frontend_ret = [np.asarray(x, dtype=np.float64) for x in frontend_ret] l, v = ret # noqa: E741 front_l, front_v = frontend_ret assert_all_close( ret_np=v @ np.diag(l) @ v.T, ret_from_gt_np=front_v @ np.diag(front_l) @ front_v.T, rtol=1e-2, atol=1e-2, backend=backend_fw, ground_truth_backend=frontend, ) # eigvals @handle_frontend_test( fn_tree="paddle.linalg.eigvals", dtype_x=_get_dtype_and_square_matrix(real_and_complex_only=True), test_with_out=st.just(False), ) def test_paddle_eigvals( *, dtype_x, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_x x = np.array(x[0], dtype=dtype[0]) # make symmetric positive-definite beforehand x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, x=x, ) # eigvalsh @handle_frontend_test( fn_tree="paddle.eigvalsh", dtype_x=_get_dtype_and_square_matrix(real_and_complex_only=True), UPLO=st.sampled_from(("L", "U")), test_with_out=st.just(False), ) def test_paddle_eigvalsh( *, dtype_x, UPLO, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_x x = np.asarray(x[0], dtype=dtype[0]) # make symmetric positive-definite beforehand x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, test_values=False, x=x, UPLO=UPLO, ) # diagonal @handle_frontend_test( fn_tree="paddle.diagonal", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"), ), axis_and_offset=helpers.dims_and_offset( shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape") ), ) def test_paddle_linalg_diagonal( dtype_and_values, axis_and_offset, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, value = dtype_and_values axis1, axis2, offset = axis_and_offset input = value[0] num_dims = len(np.shape(input)) assume(axis1 != axis2) if axis1 < 0: assume(axis1 + num_dims != axis2) if axis2 < 0: assume(axis1 != axis2 + num_dims) helpers.test_frontend_function( input_dtypes=input_dtype, on_device=on_device, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, x=input, offset=offset, axis1=axis1, axis2=axis2, ) @handle_frontend_test( fn_tree="paddle.lu_unpack", dtype_x=_get_dtype_and_square_matrix(real_and_complex_only=True), p=st.lists(st.floats(1, 5), max_size=5), unpack_datas=st.booleans(), unpack_pivots=st.booleans(), ) def test_paddle_lu_unpack( *, dtype_x, p, unpack_datas, unpack_pivots, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, x = dtype_x x = np.array(x[0], dtype=dtype[0]) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, lu_data=x, lu_pivots=p, unpack_datas=unpack_datas, unpack_pivots=unpack_pivots, rtol=1e-03, atol=1e-03, ) # matmul @handle_frontend_test( fn_tree="paddle.matmul", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=(3, 3), num_arrays=2, shared_dtype=True, min_value=-10, max_value=10, ), transpose_x=st.booleans(), transpose_y=st.booleans(), test_with_out=st.just(False), ) def test_paddle_matmul( *, dtype_x, transpose_x, transpose_y, frontend, test_flags, fn_tree, backend_fw, on_device, ): input_dtype, x = dtype_x helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], y=x[1], transpose_x=transpose_x, transpose_y=transpose_y, ) # matrix_power @handle_frontend_test( fn_tree="paddle.linalg.matrix_power", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=50, shape=helpers.ints(min_value=2, max_value=8).map(lambda x: (x, x)), ), n=helpers.ints(min_value=1, max_value=8), test_with_out=st.just(False), ) def test_paddle_matrix_power( dtype_and_x, n, frontend, backend_fw, test_flags, fn_tree, on_device, ): dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], n=n, ) # mv @handle_frontend_test( fn_tree="paddle.mv", dtype_mat_vec=_get_dtype_input_and_mat_vec(), test_with_out=st.just(False), ) def test_paddle_mv( dtype_mat_vec, frontend, test_flags, backend_fw, fn_tree, on_device, ): dtype, mat, vec = dtype_mat_vec helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=mat, vec=vec, ) # norm @handle_frontend_test( fn_tree="paddle.norm", dtype_values_axis=_dtype_values_axis(), keepdims=st.booleans(), test_with_out=st.just(False), ) def test_paddle_norm( dtype_values_axis, keepdims, frontend, test_flags, fn_tree, backend_fw, on_device, ): dtype, x, axis, p = dtype_values_axis helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x, p=p, axis=axis, keepdim=keepdims, atol=1e-1, rtol=1e-1, ) # pinv @handle_frontend_test( fn_tree="paddle.linalg.pinv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=5, min_dim_size=2, max_dim_size=5, min_value=3, max_value=10, large_abs_safety_factor=128, safety_factor_scale="log", ), rcond=st.floats(1e-5, 1e-3), test_with_out=st.just(False), ) def test_paddle_pinv( dtype_and_x, rcond, frontend, test_flags, fn_tree, backend_fw, on_device, ): # TODO: paddle returns nan for all values if the input # matrix has the same value at all indices e.g. # [[2., 2.], [2., 2.]] would return [[nan, nan], [nan, nan]], # causing the tests to fail for other backends. dtype, x = dtype_and_x helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-3, atol=1e-3, x=x[0], rcond=rcond, ) # qr @handle_frontend_test( fn_tree="paddle.linalg.qr", dtype_and_x=_get_dtype_and_matrix(), mode=st.sampled_from(("reduced", "complete")), test_with_out=st.just(False), ) def test_paddle_qr( dtype_and_x, mode, frontend, test_flags, fn_tree, backend_fw, on_device, ): dtype, x = dtype_and_x assume(matrix_is_stable(x[0])) helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, rtol=1e-01, x=x[0], mode=mode, ) # solve @handle_frontend_test( fn_tree="paddle.linalg.solve", x=helpers.get_first_solve_batch_matrix(), y=helpers.get_second_solve_batch_matrix(), test_with_out=st.just(False), ) def test_paddle_solve( *, x, y, frontend, backend_fw, test_flags, fn_tree, on_device, ): input_dtype1, x1, _ = x input_dtype2, x2, _ = y helpers.test_frontend_function( input_dtypes=[input_dtype1, input_dtype2], backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, rtol=1e-3, atol=1e-3, x=x1, y=x2, ) @handle_frontend_test( fn_tree="paddle.transpose", dtype_and_x_perm=_transpose_helper(), test_with_out=st.just(False), ) def test_paddle_transpose( dtype_and_x_perm, frontend, test_flags, backend_fw, fn_tree, on_device, ): dtype, x, perm = dtype_and_x_perm helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, test_flags=test_flags, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, x=x[0], perm=perm, )
ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_linalg.py", "repo_id": "ivy", "token_count": 13201 }
60
# global import numpy as np from hypothesis import assume, strategies as st # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test # --- Helpers --- # # --------------- # # test_where @st.composite def _broadcastable_trio(draw): shape = draw(helpers.get_shape(min_num_dims=1, min_dim_size=1)) cond = draw(helpers.array_values(dtype="bool", shape=shape)) dtypes, xs = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shape=shape, shared_dtype=True, large_abs_safety_factor=16, small_abs_safety_factor=16, safety_factor_scale="log", ) ) return cond, xs, dtypes # masked_select @st.composite def _dtypes_input_mask(draw): _shape = draw(helpers.get_shape(min_num_dims=1, min_dim_size=1)) _mask = draw(helpers.array_values(dtype="bool", shape=_shape)) _dtype, _x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=1, shape=_shape, ) ) return _dtype, _x, _mask # --- Main --- # # ------------ # @handle_frontend_test( fn_tree="paddle.argmax", dtype_x_and_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_paddle_argmax( dtype_x_and_axis, keepdim, frontend, backend_fw, test_flags, fn_tree, ): # Skipped dtype test due to paddle functions only accepting str and np.ndarray, # but test_frontend_function changes dtype kwargs to native dtype input_dtypes, x, axis = dtype_x_and_axis helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, x=x[0], axis=axis, keepdim=keepdim, ) @handle_frontend_test( fn_tree="paddle.argmin", dtype_x_and_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_paddle_argmin( dtype_x_and_axis, keepdim, frontend, backend_fw, test_flags, fn_tree, ): input_dtypes, x, axis = dtype_x_and_axis helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, x=x[0], axis=axis, keepdim=keepdim, ) # argsort @handle_frontend_test( fn_tree="paddle.argsort", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), descending=st.booleans(), ) def test_paddle_argsort( dtype_input_axis, descending, on_device, fn_tree, frontend, backend_fw, test_flags, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], axis=axis, descending=descending, ) @handle_frontend_test( fn_tree="paddle.index_sample", array_indices_axis=helpers.array_indices_axis( array_dtypes=helpers.get_dtypes("valid"), indices_dtypes=helpers.get_dtypes("integer"), min_num_dims=2, max_num_dims=2, disable_random_axis=True, ), ) def test_paddle_index_sample( *, array_indices_axis, frontend, test_flags, fn_tree, backend_fw, ): dtype, x, index = array_indices_axis if index.ndim == 2 and index.shape[0] == x.shape[0]: helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, x=x, index=index, ) # kthvalue @handle_frontend_test( fn_tree="paddle.kthvalue", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=2, valid_axis=True, force_int_axis=True, ).filter(lambda v: len(np.unique(v[1][0])) == len(np.ravel(v[1][0]))), k=st.integers(min_value=1), keepdim=st.booleans(), ) def test_paddle_kthvalue( *, dtype_input_axis, k, keepdim, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x, axis = dtype_input_axis assume(k <= x[0].shape[axis]) helpers.test_frontend_function( input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], k=k, axis=axis, keepdim=keepdim, ) @handle_frontend_test( fn_tree="paddle.masked_select", dtype_input_mask=_dtypes_input_mask(), ) def test_paddle_masked_select( *, dtype_input_mask, on_device, fn_tree, frontend, test_flags, backend_fw, ): ( input_dtype, x, mask, ) = dtype_input_mask helpers.test_frontend_function( input_dtypes=input_dtype + ["bool"], backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], mask=mask, ) # nonzero @handle_frontend_test( fn_tree="paddle.nonzero", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), as_tuple=st.booleans(), ) def test_paddle_nonzero( *, dtype_and_values, as_tuple, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtype, input = dtype_and_values helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=input[0], as_tuple=as_tuple, ) # searchsorted @handle_frontend_test( fn_tree="paddle.searchsorted", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shared_dtype=True, min_num_dims=1, num_arrays=2, ), out_int32=st.booleans(), right=st.booleans(), ) def test_paddle_searchsorted( *, dtype_and_values, out_int32, right, on_device, fn_tree, frontend, backend_fw, test_flags, ): dtype, input = dtype_and_values input[0] = np.sort(input[0]) helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, sorted_sequence=input[0], values=input[1], out_int32=out_int32, right=right, ) # sort @handle_frontend_test( fn_tree="paddle.tensor.search.sort", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), descending=st.booleans(), ) def test_paddle_sort( *, dtype_input_axis, descending, on_device, fn_tree, frontend, test_flags, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_function( input_dtypes=input_dtype, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, backend_to_test=backend_fw, x=x[0], axis=axis, descending=descending, ) @handle_frontend_test( fn_tree="paddle.topk", dtype_x_and_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), k=st.data(), sorted=st.booleans(), largest=st.booleans(), ) def test_paddle_topk( *, dtype_x_and_axis, k, sorted, largest, on_device, fn_tree, frontend, backend_fw, test_flags, ): input_dtypes, x, axis = dtype_x_and_axis k = k.draw(st.integers(min_value=1, max_value=x[0].shape[axis])) helpers.test_frontend_function( input_dtypes=input_dtypes, frontend=frontend, backend_to_test=backend_fw, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, x=x[0], k=k, axis=axis, largest=largest, sorted=sorted, test_values=False, ) @handle_frontend_test( fn_tree="paddle.where", broadcastables=_broadcastable_trio(), ) def test_paddle_where( *, broadcastables, test_flags, frontend, backend_fw, fn_tree, on_device ): cond, xs, dtypes = broadcastables helpers.test_frontend_function( input_dtypes=["bool"] + dtypes, test_flags=test_flags, frontend=frontend, backend_to_test=backend_fw, fn_tree=fn_tree, on_device=on_device, condition=cond, x=xs[0], y=xs[1], )
ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_search.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_paddle/test_search.py", "repo_id": "ivy", "token_count": 4836 }
61
from ivy.functional.frontends.sklearn.tree import DecisionTreeClassifier as ivy_DTC import ivy from hypothesis import given import ivy_tests.test_ivy.helpers as helpers # --- Helpers --- # # --------------- # # helper functions def _get_sklearn_predict(X, y, max_depth, DecisionTreeClassifier): clf = DecisionTreeClassifier(max_depth=max_depth, random_state=0) clf.fit(X, y) return clf.predict # --- Main --- # # ------------ # # todo: integrate with already existing strats and generalize @given( X=helpers.array_values( shape=(5, 2), dtype=helpers.get_dtypes("float", prune_function=False), safety_factor_scale="log", ), y=helpers.array_values( shape=(5,), dtype=helpers.get_dtypes("signed_integer", prune_function=False), safety_factor_scale="log", ), max_depth=helpers.ints(max_value=5, min_value=1), ) def test_sklearn_tree_predict(X, y, max_depth): try: from sklearn.tree import DecisionTreeClassifier as sklearn_DTC except ImportError: print("sklearn not installed, skipping test_sklearn_tree_predict") return sklearn_pred = _get_sklearn_predict(X, y, max_depth, sklearn_DTC)(X) ivy_pred = _get_sklearn_predict(ivy.array(X), ivy.array(y), max_depth, ivy_DTC)(X) helpers.assert_same_type_and_shape([sklearn_pred, ivy_pred])
ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_tree/test_tree.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_sklearn/test_tree/test_tree.py", "repo_id": "ivy", "token_count": 549 }
62
# global from hypothesis import strategies as st, given, assume import numpy as np import tensorflow as tf # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_method, BackendHandler from ivy_tests.test_ivy.test_frontends.test_tensorflow.test_raw_ops import ( _pow_helper_shared_dtype, ) from ivy.functional.frontends.tensorflow import EagerTensor CLASS_TREE = "ivy.functional.frontends.tensorflow.EagerTensor" # --- Helpers --- # # --------------- # @st.composite def _array_and_shape( draw, *, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=10, ): if isinstance(min_dim_size, st._internal.SearchStrategy): min_dim_size = draw(min_dim_size) if isinstance(max_dim_size, st._internal.SearchStrategy): max_dim_size = draw(max_dim_size) available_dtypes = draw(helpers.get_dtypes("numeric")) dtype = draw( helpers.array_dtypes( num_arrays=1, available_dtypes=available_dtypes, ) ) dtype.append("int32") shape = draw( st.shared( helpers.get_shape( min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ), key="shape", ) ) array = draw( helpers.array_values( dtype=dtype[0], shape=shape, ) ) to_shape = [(None if draw(st.booleans()) else _) for _ in shape] return dtype, [array, to_shape] # same implementation as in tensorflow backend but was causing backend conflict issues def _check_query(query): return not isinstance(query, list) and ( not (ivy.is_array(query) and ivy.is_bool_dtype(query) ^ bool(query.ndim > 0)) ) # --- Main --- # # ------------ # # __add__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__add__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___add__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __array__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__array__", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), dtype=helpers.get_dtypes("valid", full=False), ) def test_tensorflow___array__( dtype_and_x, dtype, frontend, backend_fw, ): input_dtype, x = dtype_and_x dtype[0] = np.dtype(dtype[0]) ret_gt = tf.constant(x[0]).__array__(dtype[0]) with BackendHandler.update_backend(backend_fw) as ivy_backend: local_importer = ivy_backend.utils.dynamic_import function_module = local_importer.import_module( "ivy.functional.frontends.tensorflow" ) ret = function_module.constant(x[0]).__array__(dtype[0]) helpers.value_test( ret_np_flat=ret.ravel(), ret_np_from_gt_flat=ret_gt.ravel(), ground_truth_backend="tensorflow", backend=backend_fw, ) # __bool__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__bool__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), max_dim_size=1, ), ) def test_tensorflow___bool__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__div__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, large_abs_safety_factor=10, small_abs_safety_factor=10, safety_factor_scale="log", ), ) def test_tensorflow___div__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[0], 0))) assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__eq__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_tensorflow___eq__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__floordiv__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True, large_abs_safety_factor=10, small_abs_safety_factor=10, safety_factor_scale="log", ), ) def test_tensorflow___floordiv__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __ge__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__ge__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___ge__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __getitem__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__getitem__", dtype_x_index=helpers.dtype_array_query( available_dtypes=helpers.get_dtypes("valid"), ).filter( lambda x: ( all(_check_query(i) for i in x[-1]) if isinstance(x[-1], tuple) else _check_query(x[-1]) ) ), ) def test_tensorflow___getitem__( dtype_x_index, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x, index = dtype_x_index helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"value": x}, method_input_dtypes=[*input_dtype[1:]], method_all_as_kwargs_np={"slice_spec": index}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __gt__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__gt__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___gt__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __invert__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__invert__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer") ), ) def test_tensorflow___invert__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __le__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__le__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___le__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __len__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__len__", dtype_and_x=_array_and_shape( min_num_dims=1, max_num_dims=5, ), ) def test_tensorflow___len__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __lt__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__lt__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___lt__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __matmul__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__matmul__", dtype_and_x=helpers.dtype_and_values( available_dtypes=[ "float32", "float64", "int32", "int64", ], shape=(3, 3), num_arrays=2, shared_dtype=True, large_abs_safety_factor=10, small_abs_safety_factor=10, safety_factor_scale="log", ), ) def test_tensorflow___matmul__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __mod__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__mod__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___mod__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[0], 0))) assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__mul__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___mul__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __ne__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__ne__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___ne__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __neg__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__neg__", dtype_and_x=helpers.dtype_and_values( available_dtypes=[ "float32", "float64", "int8", "int16", "int32", "int64", ], ), ) def test_tensorflow___neg__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __nonzero__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__nonzero__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), max_dim_size=1, ), ) def test_tensorflow___nonzero__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __or__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__or__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___or__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __pow__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__pow__", dtype_and_x=helpers.dtype_and_values( available_dtypes=[ "float16", "float32", "float64", "int32", "int64", ], num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___pow__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x if x[1].dtype in ["int32", "int64"]: if x[1].ndim == 0: if x[1] < 0: x[1] *= -1 else: x[1][(x[1] < 0).nonzero()] *= -1 helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __radd__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__radd__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___radd__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rand__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rand__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___rand__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rfloordiv__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rfloordiv__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True, large_abs_safety_factor=10, small_abs_safety_factor=10, safety_factor_scale="log", ), ) def test_tensorflow___rfloordiv__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rmatmul__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rmatmul__", dtype_and_x=helpers.dtype_and_values( available_dtypes=[ "float32", "float64", "int32", "int64", ], shape=(3, 3), num_arrays=2, shared_dtype=True, large_abs_safety_factor=10, small_abs_safety_factor=10, safety_factor_scale="log", ), ) def test_tensorflow___rmatmul__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rmul__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rmul__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, min_value=-100, max_value=100, ), ) def test_tensorflow___rmul__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __ror__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__ror__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___ror__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rpow__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rpow__", dtype_and_x=_pow_helper_shared_dtype(), ) def test_tensorflow___rpow__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rsub__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rsub__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___rsub__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rtruediv__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rtruediv__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, large_abs_safety_factor=15, small_abs_safety_factor=15, safety_factor_scale="log", ), ) def test_tensorflow___rtruediv__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __rxor__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__rxor__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___rxor__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "x": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __sub__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__sub__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___sub__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __truediv__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__truediv__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, large_abs_safety_factor=15, small_abs_safety_factor=15, safety_factor_scale="log", ), ) def test_tensorflow___truediv__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __xor__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__xor__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow___xor__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # __and__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="__and__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, shared_dtype=True, ), ) def test_tensorflow__and__( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "y": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_tensorflow_device( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x data = ivy.native_array(data[0]) x = EagerTensor(data) ivy.utils.assertions.check_equal(x.device, ivy.dev(data), as_array=False) ivy.previous_backend() @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ), ) def test_tensorflow_dtype( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) dtype, data = dtype_x x = EagerTensor(data[0]) ivy.utils.assertions.check_equal(x.dtype, ivy.Dtype(dtype[0]), as_array=False) ivy.previous_backend() @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="get_shape", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, min_dim_size=1, ), ) def test_tensorflow_get_shape( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "value": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ), ) def test_tensorflow_ivy_array( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = EagerTensor(data[0]) ret = helpers.flatten_and_to_np(ret=x.ivy_array.data, backend=backend_fw) ret_gt = helpers.flatten_and_to_np(ret=data[0], backend=backend_fw) helpers.value_test( ret_np_flat=ret, ret_np_from_gt_flat=ret_gt, ground_truth_backend="tensorflow", backend=backend_fw, ) ivy.previous_backend() @handle_frontend_method( class_tree=CLASS_TREE, init_tree="tensorflow.constant", method_name="set_shape", dtype_and_x=_array_and_shape( min_num_dims=0, max_num_dims=5, ), ) def test_tensorflow_set_shape( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"value": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"shape": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ret_shape=True, ), ) def test_tensorflow_shape( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) dtype, data, shape = dtype_x x = EagerTensor(data[0]) ivy.utils.assertions.check_equal( x.ivy_array.shape, ivy.Shape(shape), as_array=False ) ivy.previous_backend()
ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensor.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_tensor.py", "repo_id": "ivy", "token_count": 20231 }
63
# global import math from hypothesis import strategies as st, assume # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_functional.test_nn.test_layers import ( _assume_tf_dilation_gt_1, ) # --- Helpers --- # # --------------- # @st.composite def _fold_helper(draw, dim=2): stride, padding, dilation, kernel_size = draw(_fold_unfold_helper(dim)) strides = [stride] * dim if isinstance(stride, int) else stride paddings = [padding] * dim if isinstance(padding, int) else padding dilations = [dilation] * dim if isinstance(dilation, int) else dilation kernel_sizes = [kernel_size] * dim if isinstance(kernel_size, int) else kernel_size output_shape = () for i in range(dim): min_dim = kernel_sizes[i] + (kernel_sizes[i] - 1) * (dilations[i] - 1) output_shape = output_shape + (draw(st.integers(min_dim, 15)),) batch_size = draw(st.integers(1, 5)) n_channels = draw(st.integers(1, 3)) x_shape = [ (output_shape[i] + 2 * paddings[i] - dilations[i] * (kernel_sizes[i] - 1) - 1) // strides[i] + 1 for i in range(2) ] x_shape = (batch_size, n_channels * math.prod(kernel_sizes), math.prod(x_shape)) dtype, [vals] = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=x_shape, min_value=0.0, max_value=1.0, ) ) if vals.shape[0] == 1: # un-batched inputs are also supported vals = draw(st.sampled_from([vals, vals[0]])) return dtype, vals, kernel_size, output_shape, dilation, stride, padding @st.composite def _fold_unfold_helper(draw, dim): stride = draw( st.one_of( st.lists(st.integers(min_value=1, max_value=3), min_size=dim, max_size=dim), st.integers(min_value=1, max_value=3), ) ) padding = draw( st.one_of( st.integers(min_value=1, max_value=3), st.lists(st.integers(min_value=1, max_value=2), min_size=dim, max_size=dim), ) ) dilation = draw( st.one_of( st.lists(st.integers(min_value=1, max_value=3), min_size=dim, max_size=dim), st.integers(min_value=1, max_value=3), ) ) kernel_size = draw( st.one_of( st.integers(min_value=1, max_value=5), helpers.get_shape( min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5 ), ) ) return stride, padding, dilation, kernel_size def _output_shape( dims, dilation, stride, padding, output_padding, input_shape, weight_shape ): padding, output_padding = map( lambda x: [x] * dims if isinstance(x, int) else x, [padding, output_padding], ) return [ (input_shape[i] - 1) * stride[i] - 2 * padding[i] + dilation[i] * (weight_shape[i] - 1) + output_padding[i] + 1 for i in range(dims) ] @st.composite def _unfold_helper(draw, dim=2): stride, padding, dilation, kernel_size = draw(_fold_unfold_helper(dim)) dilations = [dilation] * dim if isinstance(dilation, int) else dilation kernel_sizes = [kernel_size] * dim if isinstance(kernel_size, int) else kernel_size x_dim = [] for i in range(dim): min_x = kernel_sizes[i] + (kernel_sizes[i] - 1) * (dilations[i] - 1) x_dim.append(draw(st.integers(min_x, 15))) batch_size = draw(st.integers(1, 5)) input_channels = draw(st.integers(1, 3)) x_shape = (batch_size, input_channels) + tuple(x_dim) dtype, [vals] = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=x_shape, min_value=0.0, max_value=1.0, ) ) return dtype, vals, kernel_size, dilation, stride, padding @st.composite def _x_and_filters(draw, dim: int = 2, transpose: bool = False, max_dilation=3): if not isinstance(dim, int): dim = draw(dim) strides = draw( st.one_of( st.lists(st.integers(min_value=1, max_value=3), min_size=dim, max_size=dim), st.integers(min_value=1, max_value=3), ) ) if not transpose: padding = draw( st.one_of( ( st.sampled_from(["same", "valid"]) if strides == 1 else st.just("valid") ), st.integers(min_value=1, max_value=3), st.lists( st.integers(min_value=1, max_value=2), min_size=dim, max_size=dim ), ) ) else: padding = draw( st.one_of( st.integers(min_value=1, max_value=3), st.lists( st.integers(min_value=1, max_value=2), min_size=dim, max_size=dim ), ) ) batch_size = draw(st.integers(1, 5)) filter_dim = draw( helpers.get_shape( min_num_dims=dim, max_num_dims=dim, min_dim_size=1, max_dim_size=5 ) ) dtype = draw(helpers.get_dtypes("float", full=False)) input_channels = draw(st.integers(1, 3)) output_channels = draw(st.integers(1, 3)) group_list = [i for i in range(1, 3)] if not transpose: group_list = list(filter(lambda x: (input_channels % x == 0), group_list)) else: group_list = list(filter(lambda x: (output_channels % x**2 == 0), group_list)) fc = draw(st.sampled_from(group_list)) dilations = draw( st.one_of( st.lists( st.integers(min_value=1, max_value=max_dilation), min_size=dim, max_size=dim, ), st.integers(min_value=1, max_value=max_dilation), ) ) fdilations = [dilations] * dim if isinstance(dilations, int) else dilations if transpose: x_dim = draw( helpers.get_shape( min_num_dims=dim, max_num_dims=dim, min_dim_size=2, max_dim_size=5 ) ) else: x_dim = [] for i in range(dim): min_x = filter_dim[i] + (filter_dim[i] - 1) * (fdilations[i] - 1) x_dim.append(draw(st.integers(min_x, 15))) x_dim = tuple(x_dim) if not transpose: output_channels = output_channels * fc filter_shape = (output_channels, input_channels // fc) + filter_dim else: input_channels = input_channels * fc filter_shape = (input_channels, output_channels // fc) + filter_dim x_shape = (batch_size, input_channels) + x_dim vals = draw( helpers.array_values( dtype=dtype[0], shape=x_shape, min_value=0.0, max_value=1.0, ) ) filters = draw( helpers.array_values( dtype=dtype[0], shape=filter_shape, min_value=0.0, max_value=1.0, ) ) bias = draw( helpers.array_values( dtype=dtype[0], shape=(output_channels,), min_value=0.0, max_value=1.0, ) ) if transpose: fstrides = [strides] * dim if isinstance(strides, int) else strides output_padding = draw( st.lists(st.integers(min_value=1, max_value=2), min_size=dim, max_size=dim) ) padding = [padding] * dim if isinstance(padding, int) else padding for i in range(len(output_padding)): m = min(fstrides[i], fdilations[i]) output_padding[i] = min(output_padding[i], m - 1) if draw(st.booleans()): output_padding = min(output_padding) assume( all( s > 0 for s in _output_shape( dim, fdilations, fstrides, padding, output_padding, x_dim, filter_dim, ) ) ) return ( dtype, vals, filters, bias, dilations, strides, padding, output_padding, fc, ) else: return dtype, vals, filters, bias, dilations, strides, padding, fc # --- Main --- # # ------------ # @handle_frontend_test( fn_tree="torch.nn.functional.conv1d", dtype_vals=_x_and_filters(dim=1), ) def test_torch_conv1d( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, weight, bias, dilations, strides, padding, fc = dtype_vals helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, weight=weight, bias=bias, stride=strides, padding=padding, dilation=dilations, groups=fc, ) @handle_frontend_test( fn_tree="torch.nn.functional.conv2d", dtype_vals=_x_and_filters(dim=2), ) def test_torch_conv2d( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, weight, bias, dilations, strides, padding, fc = dtype_vals helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, weight=weight, bias=bias, stride=strides, padding=padding, dilation=dilations, groups=fc, ) @handle_frontend_test( fn_tree="torch.nn.functional.conv3d", dtype_vals=_x_and_filters(dim=3), ) def test_torch_conv3d( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, weight, bias, dilations, strides, padding, fc = dtype_vals # ToDo: Enable gradient tests for dilations > 1 when tensorflow supports it. _assume_tf_dilation_gt_1(backend_fw, on_device, dilations) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, weight=weight, bias=bias, stride=strides, padding=padding, dilation=dilations, groups=fc, ) @handle_frontend_test( fn_tree="torch.nn.functional.conv_transpose1d", dtype_vals=_x_and_filters(dim=1, transpose=True), ) def test_torch_conv_transpose1d( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, weight, bias, dilations, strides, padding, output_pad, fc = dtype_vals assume( backend_fw in ["torch", "tensorflow"] or all( dil == 1 for dil in ([dilations] if isinstance(dilations, int) else dilations) ) ) _assume_tf_dilation_gt_1(backend_fw, on_device, dilations) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, weight=weight, bias=bias, stride=strides, padding=padding, output_padding=output_pad, groups=fc, dilation=dilations, ) @handle_frontend_test( fn_tree="torch.nn.functional.conv_transpose2d", dtype_vals=_x_and_filters(dim=2, transpose=True), ) def test_torch_conv_transpose2d( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, weight, bias, dilations, strides, padding, output_pad, fc = dtype_vals assume( backend_fw in ["torch", "tensorflow"] or all( dil == 1 for dil in ([dilations] if isinstance(dilations, int) else dilations) ) ) _assume_tf_dilation_gt_1(backend_fw, on_device, dilations) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, weight=weight, bias=bias, stride=strides, padding=padding, output_padding=output_pad, groups=fc, dilation=dilations, ) @handle_frontend_test( fn_tree="torch.nn.functional.conv_transpose3d", dtype_vals=_x_and_filters(dim=3, transpose=True), ) def test_torch_conv_transpose3d( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, weight, bias, dilations, strides, padding, output_pad, fc = dtype_vals assume( backend_fw in ["torch", "tensorflow"] or all( dil == 1 for dil in ([dilations] if isinstance(dilations, int) else dilations) ) ) _assume_tf_dilation_gt_1(backend_fw, on_device, dilations) helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, weight=weight, bias=bias, stride=strides, padding=padding, output_padding=output_pad, groups=fc, dilation=dilations, ) @handle_frontend_test( fn_tree="torch.nn.functional.fold", dtype_vals=_fold_helper(), ) def test_torch_fold( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, kernel_shape, output_shape, dilations, strides, padding = dtype_vals helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, output_size=output_shape, kernel_size=kernel_shape, dilation=dilations, padding=padding, stride=strides, ) @handle_frontend_test( fn_tree="torch.nn.functional.unfold", dtype_vals=_unfold_helper(), ) def test_torch_unfold( *, dtype_vals, on_device, fn_tree, frontend, test_flags, backend_fw, ): dtype, vals, kernel_shape, dilations, strides, padding = dtype_vals helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, input=vals, kernel_size=kernel_shape, dilation=dilations, padding=padding, stride=strides, )
ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_convolution_functions.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_nn/test_functional/test_convolution_functions.py", "repo_id": "ivy", "token_count": 7750 }
64
# global import pytest from types import SimpleNamespace import numpy as np from ivy_tests.test_ivy.test_frontends.test_torch.test_comparison_ops import ( _topk_helper, ) from ivy_tests.test_ivy.test_frontends.test_torch.test_creation_ops import ( _as_strided_helper, ) from ivy_tests.test_ivy.test_frontends.test_torch.test_indexing_slicing_joining_mutating_ops import ( # noqa: E501 _dtype_input_dim_start_length, ) from ivy_tests.test_ivy.test_frontends.test_torch.test_reduction_ops import ( _get_axis_and_p, ) import ivy from hypothesis import strategies as st, given, assume # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import sizes_ from ivy_tests.test_ivy.test_frontends.test_torch.test_blas_and_lapack_ops import ( _get_dtype_and_3dbatch_matrices, _get_dtype_input_and_matrices, _get_dtype_input_and_mat_vec, ) from ivy.functional.frontends.torch import Tensor from ivy_tests.test_ivy.helpers import handle_frontend_method, BackendHandler from ivy_tests.test_ivy.test_functional.test_core.test_searching import ( _broadcastable_trio, ) from ivy_tests.test_ivy.test_functional.test_core.test_manipulation import ( # noqa _get_splits, ) from ivy_tests.test_ivy.test_frontends.test_torch.test_miscellaneous_ops import ( # noqa dtype_value1_value2_axis, _get_dtype_value1_value2_cov, ) from ivy_tests.test_ivy.test_frontends.test_torch.test_linalg import ( # noqa _get_dtype_and_matrix, ) from ivy_tests.test_ivy.test_functional.test_core.test_statistical import ( _get_castable_dtype, _statistical_dtype_values, ) from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( # noqa put_along_axis_helper, ) from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_statistical import ( # noqa _quantile_helper, ) try: import torch except ImportError: torch = SimpleNamespace() CLASS_TREE = "ivy.functional.frontends.torch.Tensor" # --- Helpers --- # # --------------- # @st.composite def _array_idxes_n_dtype(draw, **kwargs): num_dims = draw(helpers.ints(min_value=1, max_value=4)) dtype, x = draw( helpers.dtype_and_values( **kwargs, min_num_dims=num_dims, max_num_dims=num_dims, shared_dtype=True ) ) idxes = draw( st.lists( helpers.ints(min_value=0, max_value=num_dims - 1), min_size=num_dims, max_size=num_dims, unique=True, ) ) return x, idxes, dtype @st.composite def _arrays_dim_idx_n_dtypes(draw): num_dims = draw(st.shared(helpers.ints(min_value=1, max_value=4), key="num_dims")) num_arrays = 2 common_shape = draw( helpers.lists( x=helpers.ints(min_value=2, max_value=3), min_size=num_dims - 1, max_size=num_dims - 1, ) ) _dim = draw(helpers.ints(min_value=0, max_value=num_dims - 1)) unique_dims = draw( helpers.lists( x=helpers.ints(min_value=2, max_value=3), min_size=num_arrays, max_size=num_arrays, ) ) min_dim = min(unique_dims) max_dim = max(unique_dims) _idx = draw( helpers.array_values( shape=min_dim, dtype="int64", min_value=0, max_value=max_dim, exclude_min=False, ) ) xs = [] available_input_types = draw(helpers.get_dtypes("numeric")) input_dtypes = draw( helpers.array_dtypes( available_dtypes=available_input_types, num_arrays=num_arrays, shared_dtype=True, ) ) for ud, dt in zip(unique_dims, input_dtypes): x = draw( helpers.array_values( shape=common_shape[:_dim] + [ud] + common_shape[_dim:], dtype=dt, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ) ) xs.append(x) return xs, input_dtypes, _dim, _idx # Helper functions @st.composite def _dtypes(draw): return draw( st.shared( helpers.list_of_size( x=st.sampled_from( draw(helpers.get_dtypes("numeric", prune_function=False)) ), size=1, ), key="dtype", ) ) @st.composite def _expand_helper(draw): num_dims = draw(st.integers(min_value=1, max_value=10)) shape = draw( helpers.get_shape(min_num_dims=num_dims, max_num_dims=num_dims).filter( lambda x: any(i == 1 for i in x) ) ) new_shape = draw( helpers.get_shape(min_num_dims=num_dims, max_num_dims=num_dims).filter( lambda x: all(x[i] == v if v != 1 else True for i, v in enumerate(shape)) ) ) dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=shape, ) ) return dtype, x, new_shape @st.composite def _fill_value_and_size( draw, *, min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10, ): if isinstance(min_dim_size, st._internal.SearchStrategy): min_dim_size = draw(min_dim_size) if isinstance(max_dim_size, st._internal.SearchStrategy): max_dim_size = draw(max_dim_size) available_dtypes = draw(helpers.get_dtypes("numeric")) dtype = draw( helpers.array_dtypes( num_arrays=1, available_dtypes=available_dtypes, ) ) array = draw( helpers.array_values( dtype=dtype[0], shape=(1,), ) ) dtype.append("int32") size = draw( st.shared( helpers.get_shape( min_num_dims=min_num_dims, max_num_dims=max_num_dims, min_dim_size=min_dim_size, max_dim_size=max_dim_size, ), key="shape", ) ) fill_value = draw(helpers.ints()) if "int" in dtype[0] else draw(helpers.floats()) return dtype, [array, size, fill_value] @st.composite def _get_clamp_inputs(draw): shape = draw( helpers.get_shape( min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10 ) ) x_dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=shape, ) ) min = draw(st.booleans()) if min: max = draw(st.booleans()) min = draw( helpers.array_values( dtype=x_dtype[0], shape=shape, min_value=0, max_value=25 ) ) max = ( draw( helpers.array_values( dtype=x_dtype[0], shape=shape, min_value=26, max_value=50 ) ) if max else None ) else: min = None max = draw( helpers.array_values( dtype=x_dtype[0], shape=shape, min_value=26, max_value=50 ) ) return x_dtype, x, min, max @st.composite def _get_clip_min_inputs(draw): shape = draw( helpers.get_shape( min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10 ) ) x_dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=shape, ) ) min = draw( helpers.array_values(dtype=x_dtype[0], shape=shape, min_value=0, max_value=25) ) return x_dtype, x, min @st.composite def _get_dtype_and_multiplicative_matrices(draw): return draw( st.one_of( _get_dtype_input_and_matrices(), _get_dtype_and_3dbatch_matrices(), ) ) @st.composite def _get_dtype_input_and_vectors(draw, with_input=False, same_size=False): dim_size1 = draw(helpers.ints(min_value=2, max_value=5)) dim_size2 = dim_size1 if same_size else draw(helpers.ints(min_value=2, max_value=5)) dtype = draw(helpers.get_dtypes("float", full=True)) dtype = [ draw(st.sampled_from(tuple(set(dtype).difference({"bfloat16", "float16"})))) ] vec1 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size1,), min_value=2, max_value=5 ) ) vec2 = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size2,), min_value=2, max_value=5 ) ) if with_input: input = draw( helpers.array_values( dtype=dtype[0], shape=(dim_size1, dim_size2), min_value=2, max_value=5 ) ) return dtype, input, vec1, vec2 return dtype, vec1, vec2 @st.composite def _masked_fill_helper(draw): cond, xs, dtypes = draw(_broadcastable_trio()) if ivy.is_uint_dtype(dtypes[0]): fill_value = draw(helpers.ints(min_value=0, max_value=5)) elif ivy.is_int_dtype(dtypes[0]): fill_value = draw(helpers.ints(min_value=-5, max_value=5)) else: fill_value = draw(helpers.floats(min_value=-5, max_value=5)) return dtypes[0], xs[0], cond, fill_value @st.composite def _repeat_helper(draw): shape = draw( helpers.get_shape( min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10 ) ) input_dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=shape, ) ) repeats = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=len(shape))) return input_dtype, x, repeats @st.composite def _requires_grad_and_dtypes(draw): dtypes = draw(_dtypes()) dtype = dtypes[0] if ivy.is_int_dtype(dtype) or ivy.is_uint_dtype(dtype): return draw(st.just(False)), dtypes return draw(st.booleans()), dtypes @st.composite def _to_helper(draw): dtype_x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, large_abs_safety_factor=3, ) ) input_dtype, x = dtype_x arg = draw(st.sampled_from(["tensor", "dtype", "device"])) if arg == "tensor": method_num_positional_args = 1 method_all_as_kwargs_np = {"other": x[1]} elif arg == "dtype": method_num_positional_args = 1 dtype = draw(helpers.get_dtypes("valid", full=False))[0] method_all_as_kwargs_np = {"dtype": dtype} else: method_num_positional_args = 0 device = draw(st.just("cpu")) dtype = draw(helpers.get_dtypes("valid", full=False, none=True))[0] method_all_as_kwargs_np = {"dtype": dtype, "device": device} return input_dtype, x, method_num_positional_args, method_all_as_kwargs_np @st.composite def _unfold_args(draw): values_dtype, values, axis, shape = draw( helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), force_int_axis=True, shape=draw( helpers.get_shape( allow_none=False, min_num_dims=1, min_dim_size=1, ) ), ret_shape=True, ) ) size = draw( st.integers( min_value=1, max_value=max(shape[axis] - 1, 1), ) ) step = draw( st.integers( min_value=1, max_value=size, ) ) return values_dtype, values, axis, size, step # --- Main --- # # ------------ # # __add__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__add__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___add__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __and__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__and__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___and__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__array__", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), dtype=helpers.get_dtypes("valid", full=False), ) def test_torch___array__( dtype_and_x, dtype, frontend, backend_fw, ): input_dtype, x = dtype_and_x if x[0].dtype == "bfloat16": return dtype[0] = np.dtype(dtype[0]) ret_gt = torch.tensor(x[0]).__array__(dtype[0]) with BackendHandler.update_backend(backend_fw) as ivy_backend: local_importer = ivy_backend.utils.dynamic_import function_module = local_importer.import_module("ivy.functional.frontends.torch") ret = function_module.tensor(x[0]).__array__(dtype[0]) helpers.value_test( ret_np_flat=ret.ravel(), ret_np_from_gt_flat=ret_gt.ravel(), ground_truth_backend="torch", backend=backend_fw, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__array_wrap__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch___array_wrap__( dtype_and_x, backend_fw, frontend, ): input_dtypes, x = dtype_and_x if x[1].dtype == "bfloat16": return if x[0].dtype == "bfloat16": ret_gt = torch.tensor(x[0].tolist(), dtype=torch.bfloat16).__array_wrap__(x[1]) else: ret_gt = torch.tensor(x[0]).__array_wrap__(x[1]) with BackendHandler.update_backend(backend_fw) as ivy_backend: local_importer = ivy_backend.utils.dynamic_import function_module = local_importer.import_module("ivy.functional.frontends.torch") ret = function_module.tensor(x[0]).__array_wrap__(x[1]) assert isinstance(ret, function_module.Tensor) helpers.value_test( ret_np_flat=np.array(ret.ivy_array).ravel(), ret_np_from_gt_flat=ret_gt.numpy().ravel(), ground_truth_backend="torch", backend=backend_fw, ) # __bool__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__bool__", dtype_and_x=helpers.dtype_and_values( max_dim_size=1, min_value=-1e04, max_value=1e04, ), ) def test_torch___bool__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __eq__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__eq__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___eq__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__floordiv__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ), ) def test_torch___floordiv__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, atol_=1, ) # __getitem__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__getitem__", dtype_x_index=helpers.dtype_array_query( available_dtypes=helpers.get_dtypes("valid"), allow_neg_step=False, ), ) def test_torch___getitem__( dtype_x_index, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, index = dtype_x_index helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x}, method_input_dtypes=[*input_dtype[1:]], method_all_as_kwargs_np={"query": index}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __gt__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__gt__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch___gt__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x try: helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) except RuntimeError as e: if "overflow" in e: assume(False) else: raise @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__iand__", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch___iand__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __invert__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__invert__", dtype_and_x=helpers.dtype_and_values(), ) def test_torch___invert__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __long__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__long__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___long__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __lt__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__lt__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___lt__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __matmul__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__matmul__", dtype_tensor1_tensor2=_get_dtype_and_multiplicative_matrices(), ) def test_torch___matmul__( dtype_tensor1_tensor2, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): dtype, tensor1, tensor2 = dtype_tensor1_tensor2 helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": tensor1, }, method_input_dtypes=dtype, method_all_as_kwargs_np={"other": tensor2}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __mod__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__mod__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch___mod__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __mul__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__mul__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___mul__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __ne__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__ne__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___ne__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __neg__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__neg__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___neg__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __or__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__or__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___or__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __pow__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__pow__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_torch___pow__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x dtype = input_dtype[0] if "int" in dtype: x[1] = ivy.abs(x[1]) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "exponent": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __radd__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__radd__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___radd__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __rmul__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__rmul__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___rmul__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __rpow__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__rpow__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, min_value=1, ), ) def test_torch___rpow__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x dtype = input_dtype[0] if "int" in dtype: x[0] = ivy.abs(x[0]) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __rsub__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__rsub__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_torch___rsub__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __setitem__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__setitem__", dtypes_x_index_val=helpers.dtype_array_query_val( available_dtypes=helpers.get_dtypes("valid"), allow_neg_step=False, ).filter(lambda x: x[0][0] == x[0][-1]), ) def test_torch___setitem__( dtypes_x_index_val, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, index, val = dtypes_x_index_val helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x}, method_input_dtypes=[*input_dtype[1:]], method_all_as_kwargs_np={"key": index, "value": val}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __sub__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__sub__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___sub__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # __truediv__ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="__truediv__", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shared_dtype=True, num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch___truediv__( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ), requires_grad=st.booleans(), ) def test_torch__requires_grad( dtype_x, requires_grad, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) assert not x._requires_grad x.requires_grad_() assert x._requires_grad x.requires_grad_(requires_grad) assert x._requires_grad == requires_grad ivy.previous_backend() # abs @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="abs", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_abs( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # abs_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="abs_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_abs_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # acos @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="acos", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_acos( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # acos_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="acos_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_acos_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # acosh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="acosh", dtype_and_x=helpers.dtype_and_values( min_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_acosh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # acosh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="acosh_", dtype_and_x=helpers.dtype_and_values( min_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_acosh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # add @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="add", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), alpha=st.floats(min_value=-1e04, max_value=1e04, allow_infinity=False), ) def test_torch_add( dtype_and_x, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # add_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="add_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), alpha=st.floats(min_value=-1e04, max_value=1e04, allow_infinity=False), test_inplace=st.just(True), ) def test_torch_add_( dtype_and_x, alpha, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # addbmm @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addbmm", dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), ) def test_torch_addbmm( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, batch1, batch2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "batch1": batch1, "batch2": batch2, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addbmm_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addbmm_", dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), test_inplace=st.just(True), ) def test_torch_addbmm_( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, batch1, batch2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "batch1": batch1, "batch2": batch2, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addcdiv @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addcdiv", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=3, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", shared_dtype=True, ), value=st.floats(min_value=-100, max_value=100), ) def test_torch_addcdiv( dtype_and_x, value, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[2], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "tensor1": x[1], "tensor2": x[2], "value": value, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, atol_=1e-03, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addcdiv_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=3, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", shared_dtype=True, ), value=st.floats(min_value=-100, max_value=100), test_inplace=st.just(True), ) def test_torch_addcdiv_( dtype_and_x, value, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[2], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "tensor1": x[1], "tensor2": x[2], "value": value, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, atol_=1e-03, ) # addcmul @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addcmul", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=3, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", shared_dtype=True, ), value=st.floats(min_value=-100, max_value=100), ) def test_torch_addcmul( dtype_and_x, value, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "tensor1": x[1], "tensor2": x[2], "value": value, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, atol_=1e-02, ) # addcmul_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addcmul_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=3, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", shared_dtype=True, ), value=st.floats(min_value=-100, max_value=100), test_inplace=st.just(True), ) def test_torch_addcmul_( dtype_and_x, value, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "tensor1": x[1], "tensor2": x[2], "value": value, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, atol_=1e-02, ) # addmm @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addmm", dtype_and_matrices=_get_dtype_input_and_matrices(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), ) def test_torch_addmm( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, mat1, mat2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "mat1": mat1, "mat2": mat2, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addmm_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addmm_", dtype_and_matrices=_get_dtype_input_and_matrices(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), test_inplace=st.just(True), ) def test_torch_addmm_( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, mat1, mat2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "mat1": mat1, "mat2": mat2, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addmv @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addmv", dtype_and_matrices=_get_dtype_input_and_mat_vec(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), ) def test_torch_addmv( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, mat, vec = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "mat": mat, "vec": vec, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addmv_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addmv_", dtype_and_matrices=_get_dtype_input_and_mat_vec(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), test_inplace=st.just(True), ) def test_torch_addmv_( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, mat, vec = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, backend_to_test=backend_fw, method_all_as_kwargs_np={ "mat": mat, "vec": vec, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addr @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addr", dtype_and_vecs=_get_dtype_input_and_vectors(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), ) def test_torch_addr( dtype_and_vecs, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): dtype, input, vec1, vec2 = dtype_and_vecs helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": input, }, method_input_dtypes=dtype, method_all_as_kwargs_np={ "vec1": vec1, "vec2": vec2, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # addr_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="addr_", dtype_and_vecs=_get_dtype_input_and_vectors(with_input=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), test_inplace=st.just(True), ) def test_torch_addr_( dtype_and_vecs, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): dtype, input, vec1, vec2 = dtype_and_vecs helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": input, }, method_input_dtypes=dtype, method_all_as_kwargs_np={ "vec1": vec1, "vec2": vec2, "beta": beta, "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="adjoint", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("real_and_complex"), min_num_dims=2, min_dim_size=2, ), ) def test_torch_adjoint( dtype_and_values, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, values = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": values[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, init_flags=init_flags, method_flags=method_flags, frontend_method_data=frontend_method_data, frontend=frontend, on_device=on_device, ) # all @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="all", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, min_value=-1e04, max_value=1e04, valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_torch_all( dtype_input_axis, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # amax @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="amax", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_torch_amax( dtype_x_axis, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # amin @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="amin", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_torch_amin( dtype_x_axis, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # aminmax @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="aminmax", dtype_input_axis=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_aminmax( dtype_input_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # angle @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="angle", dtype_and_values=helpers.dtype_and_values( available_dtypes=["float64", "complex64", "complex128"], ), ) def test_torch_angle( dtype_and_values, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, values = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": values[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, init_flags=init_flags, method_flags=method_flags, frontend_method_data=frontend_method_data, frontend=frontend, on_device=on_device, ) # any @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="any", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, min_value=-1e04, max_value=1e04, valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_torch_any( dtype_input_axis, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # write test for torch instance apply_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="apply_", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=1, ), test_inplace=st.just(True), ) def test_torch_apply_( dtype_and_values, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): def func(x): return x + 1 input_dtype, values = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": values[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "callable": func, }, init_flags=init_flags, method_flags=method_flags, frontend_method_data=frontend_method_data, frontend=frontend, on_device=on_device, ) # arccos @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arccos", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_arccos( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arccos_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arccos_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_arccos_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arccosh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arccosh", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_arccosh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arccosh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arccosh_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_arccosh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arcsin @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arcsin", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_arcsin( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arcsin_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arcsin_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_arcsin_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arcsinh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arcsinh", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_arcsinh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arcsinh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arcsinh_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_arcsinh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arctan @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arctan", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_arctan( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arctan2 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arctan2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch_arctan2( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arctan2_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arctan2_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch_arctan2_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arctan_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arctan_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_arctan_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arctanh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arctanh", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_arctanh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # arctanh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="arctanh_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_arctanh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # argmax @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="argmax", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), force_int_axis=True, valid_axis=True, ), keepdim=st.booleans(), ) def test_torch_argmax( dtype_input_axis, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # argmin @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="argmin", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), force_int_axis=True, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, min_value=1, max_value=5, valid_axis=True, allow_neg_axes=True, ), keepdim=st.booleans(), ) def test_torch_argmin( dtype_input_axis, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # argsort @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="argsort", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), force_int_axis=True, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, min_value=1, max_value=5, valid_axis=True, allow_neg_axes=True, ), descending=st.booleans(), ) def test_torch_argsort( dtype_input_axis, descending, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "descending": descending, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # argwhere @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="argwhere", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_argwhere( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="as_strided", dtype_x_and_other=_as_strided_helper(), ) def test_torch_as_strided( dtype_x_and_other, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, size, stride, offset = dtype_x_and_other helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "size": size, "stride": stride, "storage_offset": offset, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # asin @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="asin", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_asin( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # asin_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="asin_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_asin_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # asinh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="asinh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_asinh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, rtol_=1e-2, atol_=1e-2, on_device=on_device, ) # asinh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="asinh_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_asinh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, rtol_=1e-2, atol_=1e-2, on_device=on_device, ) # atan @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="atan", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_atan( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # atan2 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="atan2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch_atan2( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # atan2_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="atan2_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_atan2_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # atan_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="atan_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_atan_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # atanh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="atanh", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_atanh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # atanh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="atanh_", dtype_and_x=helpers.dtype_and_values( min_value=-1.0, max_value=1.0, available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_atanh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float", prune_function=False), num_arrays=3, min_value=-1e3, max_value=1e3, ).filter(lambda x: all(dt == "float32" for dt in x[0])), ) def test_torch_backward( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) if ivy.current_backend_str() == "numpy": ivy.warnings.warn("Gradient calculation unavailable for numpy backend") return if ivy.current_backend_str() == "paddle": ivy.warnings.warn("torch.Tensor.backward() unavailable for paddle backend") return _, values = dtype_x x = Tensor(values[0], requires_grad=True) y = Tensor(values[1], requires_grad=True) z = Tensor(values[2], requires_grad=True) a = x + y.pow(2) b = z * a c = b.sum() c.backward() x_torch = torch.tensor(values[0], requires_grad=True, dtype=torch.float32) y_torch = torch.tensor(values[1], requires_grad=True, dtype=torch.float32) z_torch = torch.tensor(values[2], requires_grad=True, dtype=torch.float32) a_torch = x_torch + y_torch.pow(2) b_torch = z_torch * a_torch c_torch = b_torch.sum() c_torch.backward() helpers.assertions.value_test( ret_np_flat=helpers.flatten_and_to_np( ret=x._grads.ivy_array, backend=backend_fw ), ret_np_from_gt_flat=helpers.flatten_and_to_np( ret=ivy.to_ivy(x_torch.grad.numpy()), backend=backend_fw ), rtol=1e-3, atol=1e-3, backend="torch", ) helpers.assertions.value_test( ret_np_flat=helpers.flatten_and_to_np( ret=y._grads.ivy_array, backend=backend_fw ), ret_np_from_gt_flat=helpers.flatten_and_to_np( ret=ivy.to_ivy(y_torch.grad.numpy()), backend=backend_fw ), rtol=1e-3, atol=1e-3, backend="torch", ) helpers.assertions.value_test( ret_np_flat=helpers.flatten_and_to_np( ret=z._grads.ivy_array, backend=backend_fw ), ret_np_from_gt_flat=helpers.flatten_and_to_np( ret=ivy.to_ivy(z_torch.grad.numpy()), backend=backend_fw ), rtol=1e-3, atol=1e-3, backend="torch", ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="baddbmm", dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), ) def test_torch_baddbmm( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, batch1, batch2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "batch1": batch1, "batch2": batch2, "beta": beta, "alpha": alpha, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="baddbmm_", dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True), beta=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), alpha=st.floats( min_value=-5, max_value=5, allow_nan=False, allow_subnormal=False, allow_infinity=False, ), test_inplace=st.just(True), ) def test_torch_baddbmm_( dtype_and_matrices, beta, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, batch1, batch2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "batch1": batch1, "batch2": batch2, "beta": beta, "alpha": alpha, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # bernoulli @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bernoulli", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_with_out=st.just(True), ) def test_torch_bernoulli( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "input": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"generator": x[1], "out": x[2]}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_and @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_and", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, ), ) def test_torch_bitwise_and( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_and_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_and_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_bitwise_and_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_left_shift @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_left_shift", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, ), ) def test_torch_bitwise_left_shift( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_not @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_not", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, ), ) def test_torch_bitwise_not( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, method_all_as_kwargs_np={}, frontend=frontend, on_device=on_device, ) # bitwise_not_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_not_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_bitwise_not_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, method_all_as_kwargs_np={}, frontend=frontend, on_device=on_device, ) # bitwise_or @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_or", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_bitwise_or( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_or_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_or_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_bitwise_or_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise right shift @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_right_shift", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), ) def test_torch_bitwise_right_shift( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x # negative shifts will throw an exception # shifts >= dtype width produce backend-defined behavior x[1] = np.asarray( np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1] ) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_right_shift_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_right_shift_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, shared_dtype=True, ), ) def test_torch_bitwise_right_shift_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x # negative shifts will throw an exception # shifts >= dtype width produce backend-defined behavior x[1] = np.asarray( np.clip(x[1], 0, np.iinfo(input_dtype[1]).bits - 1), dtype=input_dtype[1] ) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_xor @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_xor", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, ), ) def test_torch_bitwise_xor( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # bitwise_xor_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bitwise_xor_", dtype_and_x=helpers.dtype_and_values( available_dtypes=st.one_of(st.just(("bool",)), helpers.get_dtypes("integer")), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_bitwise_xor_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bmm", dtype_and_matrices=_get_dtype_and_3dbatch_matrices(with_input=True, input_3d=True), ) def test_torch_bmm( dtype_and_matrices, backend_fw, frontend, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, _, x, mat2 = dtype_and_matrices helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={"data": x}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"mat2": mat2}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, backend_to_test=backend_fw, ) # bool @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="bool", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), ), ) def test_torch_bool( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # byte @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="byte", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_byte( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # ceil @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="ceil", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_ceil( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # ceil_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="ceil_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_ceil_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # char @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="char", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-128, max_value=127, ), ) def test_torch_char( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cholesky", dtype_and_x=_get_dtype_and_matrix(square=True), upper=st.booleans(), ) def test_torch_cholesky( dtype_and_x, upper, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x x = x[0] # make symmetric positive-definite x = np.matmul(x.swapaxes(-1, -2), x) + np.identity(x.shape[-1]) * 1e-3 helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "upper": upper, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, rtol_=1e-2, ) # chunk @pytest.mark.skip("Testing takes a lot of time") @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="chunk", dtype_x_dim=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, min_value=-1e04, max_value=1e04, force_int_axis=True, valid_axis=True, ), chunks=st.integers( min_value=1, max_value=5, ), ) def test_torch_chunk( dtype_x_dim, chunks, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, dim = dtype_x_dim helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "chunks": chunks, "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # clamp @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clamp", dtype_and_x_min_max=_get_clamp_inputs(), ) def test_torch_clamp( dtype_and_x_min_max, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, min, max = dtype_and_x_min_max helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"min": min, "max": max}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # clamp_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clamp_", dtype_and_x_min_max=_get_clamp_inputs(), test_inplace=st.just(True), ) def test_torch_clamp_( dtype_and_x_min_max, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, min, max = dtype_and_x_min_max helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"min": min, "max": max}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clamp_min", input_and_ranges=_get_clip_min_inputs(), ) def test_torch_clamp_min( input_and_ranges, frontend_method_data, init_flags, backend_fw, frontend, on_device, method_flags, ): x_dtype, x, min = input_and_ranges helpers.test_frontend_method( init_input_dtypes=x_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=x_dtype, method_all_as_kwargs_np={ "min": min, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clamp_min_", input_and_ranges=_get_clip_min_inputs(), test_inplace=st.just(True), ) def test_torch_clamp_min_( input_and_ranges, frontend_method_data, init_flags, backend_fw, frontend, on_device, method_flags, ): x_dtype, x, min = input_and_ranges helpers.test_frontend_method( init_input_dtypes=x_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=x_dtype, method_all_as_kwargs_np={ "min": min, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # clip @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clip", input_and_ranges=_get_clamp_inputs(), ) def test_torch_clip( input_and_ranges, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, min, max = input_and_ranges helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"min": min, "max": max}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # clip_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clip_", input_and_ranges=_get_clamp_inputs(), ) def test_torch_clip_( input_and_ranges, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, min, max = input_and_ranges helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"min": min, "max": max}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # clone @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="clone", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=1, ), ) def test_torch_clone( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="conj", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_complex") ), ) def test_torch_conj( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # contiguous @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="contiguous", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_contiguous( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # copy_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="copy_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_copy_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # copysign @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="copysign", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, num_arrays=2, ), ) def test_torch_copysign( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # copysign_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="copysign_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_copysign_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cos @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cos", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_cos( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cos_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cos_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_cos_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": list(x[0]) if isinstance(x[0], int) else x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cosh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cosh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_cosh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cosh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cosh_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_cosh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, rtol_=1e-2, atol_=1e-2, ) # count_nonzero @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="count_nonzero", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), ) def test_torch_count_nonzero( dtype_value, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"dim": dim}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cov @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cov", dtype_and_x=_get_dtype_value1_value2_cov( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=2, min_dim_size=2, max_dim_size=5, min_value=1, max_value=1e10, abs_smallest_val=0.01, large_abs_safety_factor=2, safety_factor_scale="log", ), ) def test_torch_cov( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, correction, fweights, aweights = dtype_and_x helpers.test_frontend_method( init_input_dtypes=["float64", "int64", "float64"], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", "float64"], method_all_as_kwargs_np={ "correction": correction, "fweights": fweights, "aweights": aweights, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, rtol_=1e-2, atol_=1e-2, ) # cross @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cross", dtype_input_other_dim=dtype_value1_value2_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=10, min_dim_size=3, max_dim_size=3, min_value=-1e10, max_value=1e10, abs_smallest_val=0.01, large_abs_safety_factor=2, safety_factor_scale="log", ), ) def test_torch_cross( dtype_input_other_dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): dtype, input, other, dim = dtype_input_other_dim helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": input, }, method_input_dtypes=dtype, method_all_as_kwargs_np={ "other": other, "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, rtol_=1e-2, atol_=1e-2, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ).filter( lambda x: "bfloat16" not in x[0] and "uint16" not in x[0] and "uint32" not in x[0] and "uint64" not in x[0] ), ) def test_torch_cuda(dtype_x, backend_fw): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0], device="gpu:0") device = "gpu:0" ivy.utils.assertions.check_equal(x.cuda, device, as_array=False) ivy.previous_backend() # cummax @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cummax", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=False, force_int=True, ), ) def test_torch_cummax( dtype_value, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"dim": dim}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cumprod @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cumprod", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), dtypes=_dtypes(), ) def test_torch_cumprod( dtype_value, dim, dtypes, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtypes, method_all_as_kwargs_np={ "dim": dim, "dtype": dtypes[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cumsum @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cumsum", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), dtypes=_dtypes(), ) def test_torch_cumsum( dtype_value, dim, dtypes, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtypes, method_all_as_kwargs_np={ "dim": dim, "dtype": dtypes[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # cumsum_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="cumsum_", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), test_inplace=st.just(True), ) def test_torch_cumsum_( dtype_value, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, "dtype": input_dtype[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # det @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="det", dtype_and_x=_get_dtype_and_matrix(square=True, batch=True), ) def test_torch_det( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # detach @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="detach", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_detach( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # detach_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="detach_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_inplace=st.just(True), ) def test_torch_detach_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_device( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal( x.device, ivy.dev(ivy.array(data[0])), as_array=False ) ivy.previous_backend() @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="diag", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), shape=st.shared(helpers.get_shape(min_num_dims=1, max_num_dims=2), key="shape"), ), diagonal=st.integers(min_value=-100, max_value=100), ) def test_torch_diag( dtype_and_values, diagonal, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, values = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": values[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "diagonal": diagonal, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="diagonal", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape"), ), dims_and_offset=helpers.dims_and_offset( shape=st.shared(helpers.get_shape(min_num_dims=2), key="shape") ), ) def test_torch_diagonal( dtype_and_values, dims_and_offset, frontend, frontend_method_data, backend_fw, init_flags, method_flags, on_device, ): input_dtype, value = dtype_and_values dim1, dim2, offset = dims_and_offset input = value[0] num_dims = len(np.shape(input)) assume(dim1 != dim2) if dim1 < 0: assume(dim1 + num_dims != dim2) if dim2 < 0: assume(dim1 != dim2 + num_dims) helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], init_all_as_kwargs_np={"data": input}, method_input_dtypes=[input_dtype[0]], method_all_as_kwargs_np={ "offset": offset, "dim1": dim1, "dim2": dim2, }, frontend=frontend, frontend_method_data=frontend_method_data, backend_to_test=backend_fw, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # diff @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="diff", dtype_n_x_n_axis=helpers.dtype_values_axis( available_dtypes=st.shared(helpers.get_dtypes("valid"), key="dtype"), min_num_dims=1, min_value=-1e09, max_value=1e09, valid_axis=True, force_int_axis=True, ), n=st.integers(min_value=0, max_value=5), dtype_prepend=helpers.dtype_and_values( available_dtypes=st.shared(helpers.get_dtypes("numeric"), key="dtype"), min_num_dims=1, max_num_dims=1, ), dtype_append=helpers.dtype_and_values( available_dtypes=st.shared(helpers.get_dtypes("numeric"), key="dtype"), min_num_dims=1, max_num_dims=1, ), ) def test_torch_diff( dtype_n_x_n_axis, n, dtype_prepend, dtype_append, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_n_x_n_axis _, prepend = dtype_prepend _, append = dtype_append helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "n": n, "dim": axis, "prepend": prepend[0], "append": append[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # dim @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="dim", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_dim( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # div @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="div", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ), rounding_mode=st.sampled_from(["floor", "trunc"]) | st.none(), ) def test_torch_div( dtype_and_x, rounding_mode, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], "rounding_mode": rounding_mode, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # div_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="div_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ), rounding_mode=st.sampled_from(["floor", "trunc"]) | st.none(), test_inplace=st.just(True), ) def test_torch_div_( dtype_and_x, rounding_mode, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], "rounding_mode": rounding_mode, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # divide @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="divide", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_divide( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="dot", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shape=(1,), ), ) def test_torch_dot( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "tensor": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="double", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_double( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, backend_fw, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, ) # dsplit @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="dsplit", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=3), key="value_shape"), ), indices_or_sections=_get_splits( min_num_dims=3, axis=2, allow_none=False, allow_array_indices=False, is_mod_split=True, ), ) def test_torch_dsplit( dtype_value, indices_or_sections, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={"indices_or_sections": indices_or_sections}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_dtype(dtype_x, backend_fw): ivy.set_backend(backend_fw) dtype, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal(x.dtype, dtype[0], as_array=False) ivy.previous_backend() @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="eq_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_eq_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # equal @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, min_num_dims=1, min_value=-1e04, max_value=1e04, ), ) def test_torch_equal( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-04, rtol_=1e-04, on_device=on_device, ) # erf @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="erf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_erf( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # erf_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="erf_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_inplace=st.just(True), ) def test_torch_erf_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # erfinv_ tests @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="erfinv_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1, max_value=1, abs_smallest_val=1e-05, ), ) def test_torch_erfinv( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # erfinv_ tests @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="erfinv_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1, max_value=1, abs_smallest_val=1e-05, ), test_inplace=st.just(True), ) def test_torch_erfinv_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # exp @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="exp", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_exp( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # exp_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="exp_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), test_inplace=st.just(True), ) def test_torch_exp_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="expand", dtype_x_shape=_expand_helper(), unpack_shape=st.booleans(), ) def test_torch_expand( dtype_x_shape, unpack_shape, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, shape = dtype_x_shape if backend_fw == "paddle": assume( input_dtype[0] in ["int32", "int64", "float32", "float64", "bool"] and len(shape) < 7 ) if unpack_shape: method_flags.num_positional_args = len(shape) + 1 size = {} i = 0 for x_ in shape: size[f"x{i}"] = x_ i += 1 else: size = { "size": shape, } helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np=size, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # expand_as @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="expand_as", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2 ), ) def test_torch_expand_as( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # expm1 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="expm1", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_expm1( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # expm1_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="expm1_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_expm1_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fill_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fill_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), value=helpers.floats(min_value=1, max_value=10), test_inplace=st.just(True), ) def test_torch_fill_( dtype_and_x, value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "value": value, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fix @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fix", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), ) def test_torch_fix( dtype_value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fix_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fix_", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), test_inplace=st.just(True), ) def test_torch_fix_( dtype_value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # flatten @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="flatten", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="shape"), ), axes=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), min_size=2, max_size=2, unique=False, force_tuple=True, ), ) def test_torch_flatten( dtype_value, axes, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "start_dim": axes[0], "end_dim": axes[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # flip @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="flip", dtype_values_axis=_array_idxes_n_dtype( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_flip( dtype_values_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): x, idxes, dtype = dtype_values_axis helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtype, method_all_as_kwargs_np={ "dims": idxes, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fliplr @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fliplr", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, ), ) def test_torch_fliplr( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="float", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_float( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # floor @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="floor", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_floor( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="floor_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_floor_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fmax @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fmax", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch_fmax( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fmin @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fmin", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch_fmin( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # fmod @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fmod", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True, min_num_dims=1, min_value=-100, max_value=100, ), ) def test_torch_fmod( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # fmod_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="fmod_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, shared_dtype=True, min_num_dims=1, min_value=-100, max_value=100, ), test_inplace=st.just(True), ) def test_torch_fmod_( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"other": x[1]}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # frac @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="frac", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes(kind="valid"), num_arrays=1, max_value=1e6, min_value=-1e6, ), ) def test_torch_frac( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="gather", params_indices_others=helpers.array_indices_axis( array_dtypes=helpers.get_dtypes("valid"), indices_dtypes=["int64"], indices_same_dims=True, ), ) def test_torch_gather( params_indices_others, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, axis, batch_dims = params_indices_others helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x}, method_input_dtypes=[input_dtypes[1]], method_all_as_kwargs_np={ "dim": axis, "index": indices, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # gcd @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="gcd", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), min_value=-100, max_value=100, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, num_arrays=2, shared_dtype=True, ), ) def test_torch_gcd( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter( lambda x: "bfloat16" not in x[0] and "uint16" not in x[0] and "uint32" not in x[0] and "uint64" not in x[0] ), ) def test_torch_get_device( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) ivy.utils.assertions.check_equal(x.get_device, -1, as_array=False) x = Tensor(data[0], "gpu:0") ivy.utils.assertions.check_equal(x.get_device, 0, as_array=False) x = Tensor(data[0], "tpu:3") ivy.utils.assertions.check_equal(x.get_device, 3, as_array=False) ivy.previous_backend() def test_torch_grad(backend_fw): ivy.set_backend(backend_fw) x = Tensor(ivy.array([1.0, 2.0, 3.0])) grads = ivy.array([1.0, 2.0, 3.0]) x._grads = grads assert ivy.array_equal(x.grad, grads) ivy.previous_backend() def test_torch_grad_fn(backend_fw): ivy.set_backend(backend_fw) x = Tensor(ivy.array([3.0]), requires_grad=True) ivy.utils.assertions.check_equal(x.grad_fn, None, as_array=False) y = x.pow(2) ivy.utils.assertions.check_equal(y.grad_fn, "PowBackward", as_array=False) ivy.utils.assertions.check_equal( y.grad_fn.next_functions[0], "AccumulateGrad", as_array=False ) z = y.detach() ivy.utils.assertions.check_equal(z.grad_fn, None, as_array=False) ivy.previous_backend() # greater @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="greater", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_greater( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # greater_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="greater_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_greater_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # greater_equal @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="greater_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_greater_equal( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # greater_equal_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="greater_equal_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_greater_equal_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # half @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="half", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_half( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="heaviside", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, ), ) def test_torch_heaviside( dtype_and_values, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, values = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": values[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "values": values[1], }, init_flags=init_flags, method_flags=method_flags, frontend_method_data=frontend_method_data, frontend=frontend, on_device=on_device, ) # hsplit @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="hsplit", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"), ), indices_or_sections=_get_splits( min_num_dims=1, axis=1, allow_none=False, allow_array_indices=False, is_mod_split=True, ), ) def test_torch_hsplit( dtype_value, indices_or_sections, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={"indices_or_sections": indices_or_sections}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("complex", prune_function=False) ), ) def test_torch_imag(dtype_x, backend_fw): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal(x.imag, ivy.imag(data[0])) ivy.previous_backend() @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="index_add", xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes(), alpha=st.integers(min_value=1, max_value=2), ) def test_torch_index_add( *, xs_dtypes_dim_idx, alpha, frontend_method_data, init_flags, method_flags, on_device, frontend, backend_fw, ): xs, input_dtypes, axis, indices = xs_dtypes_dim_idx if xs[0].shape[axis] < xs[1].shape[axis]: source, input = xs else: input, source = xs helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": input, }, method_input_dtypes=["int64", input_dtypes[1]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "source": source, "alpha": alpha, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, rtol_=1e-03, ) # index_add @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="index_add_", xs_dtypes_dim_idx=_arrays_dim_idx_n_dtypes(), alpha=st.integers(min_value=1, max_value=2), test_inplace=st.just(True), ) def test_torch_index_add_( *, xs_dtypes_dim_idx, alpha, frontend_method_data, init_flags, method_flags, on_device, frontend, backend_fw, ): xs, input_dtypes, axis, indices = xs_dtypes_dim_idx if xs[0].shape[axis] < xs[1].shape[axis]: source, input = xs else: input, source = xs helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": input, }, method_input_dtypes=["int64", input_dtypes[1]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "source": source, "alpha": alpha, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, rtol_=1e-03, ) # index_fill @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="index_fill", dtype_indices_axis=helpers.array_indices_axis( array_dtypes=helpers.get_dtypes("numeric"), indices_dtypes=["int64"], min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10, first_dimension_only=True, indices_same_dims=False, ), value=st.floats(min_value=-100, max_value=100), ) def test_torch_index_fill( dtype_indices_axis, value, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, axis, _ = dtype_indices_axis if indices.ndim != 1: indices = ivy.flatten(indices) helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x}, method_input_dtypes=[input_dtypes[1]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "value": value, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # todo: remove dtype specifications @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="index_put", x_and_indices=helpers.array_indices_axis( array_dtypes=st.just(("float32",)), indices_dtypes=st.just(("int64",)), ), values=helpers.dtype_and_values( available_dtypes=st.just(("float32",)), max_num_dims=1, max_dim_size=1 ), accumulate=st.booleans(), ) def test_torch_index_put( x_and_indices, values, accumulate, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, indices, *_ = x_and_indices values_dtype, values = values init_dtypes = [input_dtype[0]] method_dtypes = [input_dtype[1], values_dtype[0]] helpers.test_frontend_method( init_input_dtypes=init_dtypes, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=method_dtypes, method_all_as_kwargs_np={ "indices": (indices,), "values": values[0], "accumulate": accumulate, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="index_put_", x_and_indices=helpers.array_indices_axis( array_dtypes=st.just(("float32",)), indices_dtypes=st.just(("int64",)), ), values=helpers.dtype_and_values( available_dtypes=st.just(("float32",)), max_num_dims=1, max_dim_size=1 ), accumulate=st.booleans(), test_inplace=st.just(True), ) def test_torch_index_put_( x_and_indices, values, accumulate, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, indices, *_ = x_and_indices values_dtype, values = values init_dtypes = [input_dtype[0]] method_dtypes = [input_dtype[1], values_dtype[0]] helpers.test_frontend_method( init_input_dtypes=init_dtypes, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=method_dtypes, method_all_as_kwargs_np={ "indices": (indices,), "values": values[0], "accumulate": accumulate, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # index_select @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="index_select", params_indices_others=helpers.array_indices_axis( array_dtypes=helpers.get_dtypes("valid"), indices_dtypes=["int64"], max_num_dims=1, indices_same_dims=True, ), ) def test_torch_index_select( params_indices_others, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtypes, input, indices, axis, batch_dims = params_indices_others helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": input, }, method_input_dtypes=[input_dtypes[1]], method_all_as_kwargs_np={ "dim": axis, "index": indices, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # int @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="int", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), ), ) def test_torch_int( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # inverse @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="inverse", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, ).filter(lambda s: s[1][0].shape[-1] == s[1][0].shape[-2]), ) def test_torch_inverse( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # is_complex @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="is_complex", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_is_complex( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_is_cuda( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal( x.is_cuda, "gpu" in ivy.dev(ivy.array(data[0])), as_array=False ) ivy.previous_backend() # is_floating_point @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="is_floating_point", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_is_floating_point( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( requires_grad=st.booleans(), ) def test_torch_is_leaf(requires_grad, backend_fw): ivy.set_backend(backend_fw) x = Tensor(ivy.array([3.0]), requires_grad=requires_grad) ivy.utils.assertions.check_equal(x.is_leaf, True, as_array=False) y = x.pow(2) ivy.utils.assertions.check_equal(y.is_leaf, not requires_grad, as_array=False) z = y.detach() ivy.utils.assertions.check_equal(z.is_leaf, True, as_array=False) ivy.previous_backend() @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_is_meta( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal( x.is_meta, "meta" in ivy.dev(ivy.array(data[0])), as_array=False ) ivy.previous_backend() @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_is_quantized( dtype_x, backend_fw, ): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal( x.is_quantized, "q" in ivy.dtype(ivy.array(data[0])), as_array=False ) ivy.previous_backend() # isfinite @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="isfinite", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_value=-np.inf, max_value=np.inf, ), ) def test_torch_isfinite( *, dtype_and_input, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x = dtype_and_input helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # isinf @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="isinf", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_isinf( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # isnan @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="isnan", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_isnan( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # isreal @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="isreal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_isreal( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_ivy_array( dtype_x, backend_fw, ): _, data = dtype_x ivy.set_backend(backend_fw) x = Tensor(data[0]) x.ivy_array = data[0] ret = helpers.flatten_and_to_np(ret=x.ivy_array.data, backend=backend_fw) ret_gt = helpers.flatten_and_to_np(ret=data[0], backend=backend_fw) helpers.value_test( ret_np_flat=ret, ret_np_from_gt_flat=ret_gt, backend="torch", ) # lcm @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="lcm", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, min_value=-100, max_value=100, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, shared_dtype=True, ), ) def test_torch_lcm( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # lcm_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="lcm_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), num_arrays=2, min_value=-100, max_value=100, min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, shared_dtype=True, ), test_inplace=st.just(True), ) def test_torch_lcm_( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # less @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="less", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_less( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # less_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="less_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_less_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # less_equal @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="less_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_less_equal( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # less_equal_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="less_equal_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_less_equal_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # log @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_log( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # log10 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log10", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_log10( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # log10_ tests @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log10_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_log10_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log1p", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), max_value=1e37, ), ) def test_torch_log1p( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # log1p_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log1p_", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), max_value=1e37, ), test_inplace=st.just(True), ) def test_torch_log1p_( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # log2 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_log2( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # log2_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log2_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_log2_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # log_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_log_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="log_softmax", dtype_x_and_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_axes_size=1, force_int_axis=True, valid_axis=True, ), dtypes=helpers.get_dtypes("float", none=False, full=False), ) def test_torch_log_softmax( *, dtype_x_and_axis, dtypes, on_device, frontend, backend_fw, frontend_method_data, init_flags, method_flags, ): input_dtype, x, axis = dtype_x_and_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "dtype": dtypes[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logaddexp @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logaddexp", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_num_dims=1, min_value=-100, max_value=100, shared_dtype=True, ), ) def test_torch_logaddexp( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logdet @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logdet", dtype_and_x=_get_dtype_and_matrix(square=True, batch=True), ) def test_torch_logdet( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x dtype, x = dtype_and_x x = np.matmul(x.T, x) + np.identity(x.shape[0]) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logical_and @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logical_and", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_logical_and( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logical_not @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logical_not", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=1 ), ) def test_torch_logical_not( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logical_not_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logical_not_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=1, large_abs_safety_factor=12, ), test_inplace=st.just(True), ) def test_torch_logical_not_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logical_or @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logical_or", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_logical_or( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logical_xor @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logical_xor", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_logical_xor( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # logit @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logit", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, min_num_dims=1, min_dim_size=1, ), ) def test_torch_logit( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # long @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="long", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), ), ) def test_torch_long( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # masked_fill @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="masked_fill", x_mask_val=_masked_fill_helper(), ) def test_torch_masked_fill( x_mask_val, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): dtype, x, mask, val = x_mask_val helpers.test_frontend_method( init_input_dtypes=[dtype], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["bool", dtype], method_all_as_kwargs_np={ "mask": mask, "value": val, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # matmul @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="matmul", dtype_tensor1_tensor2=_get_dtype_and_multiplicative_matrices(), ) def test_torch_matmul( dtype_tensor1_tensor2, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): dtype, tensor1, tensor2 = dtype_tensor1_tensor2 helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": tensor1, }, method_input_dtypes=dtype, method_all_as_kwargs_np={"other": tensor2}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # matrix_power @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="matrix_power", dtype_x=_get_dtype_and_matrix(square=True, invertible=True), n=helpers.ints(min_value=2, max_value=5), ) def test_torch_matrix_power( dtype_x, n, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "n": n, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # max @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="max", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_max( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # maximum @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="maximum", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_maximum( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # mean @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="mean", dtype_and_x=_statistical_dtype_values( function="mean", min_value=-1e04, max_value=1e04, ), keepdims=st.booleans(), ) def test_torch_mean( dtype_and_x, keepdims, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdims, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # median @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="median", dtype_input_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), keepdim=st.booleans(), ) def test_torch_median( dtype_input_axis, keepdim, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis = dtype_input_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # min @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="min", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_min( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # minimum @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="minimum", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_minimum( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # mm @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="mm", dtype_xy=_get_dtype_input_and_matrices(), ) def test_torch_mm( dtype_xy, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): dtype, x, y = dtype_xy helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=dtype, method_all_as_kwargs_np={ "mat2": y, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="movedim", dtype_and_input=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-100, max_value=100, shape=st.shared( helpers.get_shape( min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, ), key="a_s_d", ), ), source=helpers.get_axis( allow_none=False, unique=True, shape=st.shared( helpers.get_shape( min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, ), key="a_s_d", ), min_size=1, force_int=True, ), destination=helpers.get_axis( allow_none=False, unique=True, shape=st.shared( helpers.get_shape( min_num_dims=1, max_num_dims=3, min_dim_size=1, max_dim_size=3, ), key="a_s_d", ), min_size=1, force_int=True, ), ) def test_torch_movedim( dtype_and_input, source, destination, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, value = dtype_and_input helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": value[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "source": source, "destination": destination, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # msort @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="msort", dtype_value=helpers.dtype_and_values( available_dtypes=["float32", "float64", "int32", "int64"], shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), ) def test_torch_msort( dtype_value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # mul @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="mul", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_torch_mul( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # mul_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="mul_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, shared_dtype=True, ), test_inplace=st.just(True), ) def test_torch_mul_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # multiply @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="multiply", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_torch_multiply( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # multiply_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="multiply_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_multiply_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # nanmean @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="nanmean", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1e04, max_value=1e04, ), ) def test_torch_nanmean( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # nansum @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="nansum", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1e04, max_value=1e04, ), ) def test_torch_nansum( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="narrow", dtype_input_dim_start_length=_dtype_input_dim_start_length(), ) def test_torch_narrow( dtype_input_dim_start_length, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): (input_dtype, x, dim, start, length) = dtype_input_dim_start_length helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, "start": start, "length": length, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ret_shape=True, ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_ndim(dtype_x, backend_fw): ivy.set_backend(backend_fw) dtype, data, shape = dtype_x x = Tensor(data[0]) ivy.utils.assertions.check_equal(x.ndim, data[0].ndim, as_array=False) ivy.previous_backend() # ndimension @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="ndimension", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_ndimension( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # ne @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="ne", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_ne( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="ne_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_ne_( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # neg @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="neg", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_neg( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # neg_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="neg_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-1e04, max_value=1e04, allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_neg_( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # negative @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="negative", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_negative( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # new @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="new", dtype_and_x=helpers.dtype_and_values(), ) def test_torch_new_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # new_empty (not actually intuitive for testing) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="new_empty", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), size=helpers.get_shape( min_num_dims=1, max_num_dims=3, ), ) def test_torch_new_empty( dtype_and_x, size, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=[ivy.int32], method_all_as_kwargs_np={ "size": size, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # new_full @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="new_full", dtype_and_x=_fill_value_and_size(max_num_dims=3), ) def test_torch_new_full( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[input_dtype[1]], method_all_as_kwargs_np={ "size": x[1], "fill_value": x[2], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # new_ones @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="new_ones", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("float")), size=helpers.get_shape( allow_none=False, min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10, ), requires_grad_and_dtypes=_requires_grad_and_dtypes(), ) def test_torch_new_ones( dtype_and_x, size, requires_grad_and_dtypes, on_device, frontend_method_data, init_flags, method_flags, frontend, backend_fw, ): input_dtype, x = dtype_and_x requires_grad, dtypes = requires_grad_and_dtypes helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtypes, method_all_as_kwargs_np={ "size": size, "dtype": dtypes[0], "requires_grad": requires_grad, "device": on_device, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # new_tensor @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="new_tensor", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), ) def test_torch_new_tensor( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[input_dtype[1]], method_all_as_kwargs_np={ "data": x[1], "dtype": input_dtype[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # new_zeros @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="new_zeros", dtype_and_x=helpers.dtype_and_values(available_dtypes=helpers.get_dtypes("valid")), size=helpers.get_shape( allow_none=False, min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10, ), requires_grad_and_dtypes=_requires_grad_and_dtypes(), ) def test_torch_new_zeros( dtype_and_x, size, requires_grad_and_dtypes, on_device, frontend_method_data, init_flags, method_flags, frontend, backend_fw, ): input_dtype, x = dtype_and_x requires_grad, dtypes = requires_grad_and_dtypes helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtypes, method_all_as_kwargs_np={ "size": size, "dtype": dtypes[0], "requires_grad": requires_grad, "device": on_device, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # nonzero @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="nonzero", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_nonzero( dtype_and_values, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # norm @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="norm", p_dtype_x_axis=_get_axis_and_p(), keepdim=st.booleans(), dtype=helpers.get_dtypes("valid", full=False), ) def test_torch_norm( p_dtype_x_axis, keepdim, dtype, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): p, values = p_dtype_x_axis input_dtype, x, axis = values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "p": p, "dim": axis, "keepdim": keepdim, "dtype": dtype[0], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # normal_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="normal_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), mean=helpers.floats(min_value=-1, max_value=1), std=helpers.floats(min_value=0, max_value=1), ) def test_torch_normal_( dtype_and_x, mean, std, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): dtype, x = dtype_and_x def call(): return helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=dtype, method_all_as_kwargs_np={ "mean": mean, "std": std, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, test_values=False, ) ret = call() if not ivy.exists(ret): return ret_np, ret_from_np = ret ret_np = helpers.flatten_and_to_np(ret=ret_np, backend=backend_fw) ret_from_np = helpers.flatten_and_to_np(ret=ret_from_np, backend=backend_fw) for u, v in zip(ret_np, ret_from_np): assert u.dtype == v.dtype assert u.shape == v.shape # not_equal @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="not_equal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_not_equal( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="not_equal_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_not_equal_( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # numpy @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="numpy", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_numpy( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x ret, frontend_ret = helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, test_values=False, ) # manual testing required as function return is numpy frontend helpers.value_test( ret_np_flat=helpers.flatten_and_to_np(ret=ret, backend=backend_fw), ret_np_from_gt_flat=frontend_ret[0], ground_truth_backend="torch", backend=backend_fw, ) # permute @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="permute", dtype_values_axis=_array_idxes_n_dtype( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_permute( dtype_values_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): x, idxes, dtype = dtype_values_axis unpack_dims = True if unpack_dims: method_flags.num_positional_args = len(idxes) + 1 dims = {} i = 0 for x_ in idxes: dims[f"x{i}"] = x_ i += 1 else: dims = { "dims": tuple(idxes), } helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtype, method_all_as_kwargs_np=dims, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # pow @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="pow", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_pow( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x dtype = input_dtype[0] if "int" in dtype: x[1] = ivy.abs(x[1]) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "exponent": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # pow_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="pow_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_pow_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x dtype = input_dtype[0] if "int" in dtype: x[1] = ivy.abs(x[1]) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "exponent": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # prod @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="prod", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, max_num_dims=5, valid_axis=True, allow_neg_axes=False, max_axes_size=1, force_int_axis=True, large_abs_safety_factor=10, small_abs_safety_factor=10, safety_factor_scale="log", ), dtype=helpers.get_dtypes("float", none=True, full=False), keepdims=st.booleans(), ) def test_torch_prod( dtype_x_axis, dtype, keepdims, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis = dtype_x_axis if ivy.current_backend_str() == "torch": init_flags.as_variable = [False] method_flags.as_variable = [False] helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "keepdim": keepdims, "dtype": dtype[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="quantile", dtype_and_x=_quantile_helper().filter(lambda x: "bfloat16" not in x[0]), keepdims=st.booleans(), ) def test_torch_quantile( dtype_and_x, keepdims, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis, interpolation, q = dtype_and_x if type(axis) is tuple: axis = axis[0] helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "q": q, "dim": axis, "keepdim": keepdims, "interpolation": interpolation[0], }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # rad2deg @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="rad2deg", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_rad2deg( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # random_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="random_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float_and_integer"), min_value=1, max_value=5, min_num_dims=1, max_num_dims=5, ), to=helpers.ints(min_value=1, max_value=100), test_inplace=st.just(True), ) def test_torch_random_( dtype_and_x, to, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, method_input_dtypes=input_dtype, frontend_method_data=frontend_method_data, init_all_as_kwargs_np={ "data": x[0], }, method_all_as_kwargs_np={ "to": to, }, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, test_values=False, ) # ravel @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="ravel", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), ) def test_torch_ravel( dtype_value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("complex", prune_function=False) ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_real(dtype_x, backend_fw): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0]) x.ivy_array = data[0] ivy.utils.assertions.check_equal(x.real, ivy.real(data[0])) ivy.previous_backend() # reciprocal @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="reciprocal", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=1, ), ) def test_torch_reciprocal( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # reciprocal_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="reciprocal_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=1, ), test_inplace=st.just(True), ) def test_torch_reciprocal_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # relu @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="relu", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_relu( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # remainder @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="remainder", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, shared_dtype=True, num_arrays=2, ), ) def test_torch_remainder( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # remainder_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="remainder_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=-1e04, max_value=1e04, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, shared_dtype=True, num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_remainder_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # repeat @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="repeat", dtype_x_repeats=_repeat_helper(), unpack_repeat=st.booleans(), ) def test_torch_repeat( dtype_x_repeats, unpack_repeat, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, repeats = dtype_x_repeats repeat = { "repeats": repeats, } if unpack_repeat: method_flags.num_positional_args = len(repeat["repeats"]) + 1 for i, x_ in enumerate(repeat["repeats"]): repeat[f"x{i}"] = x_ helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np=repeat, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ), requires_grad=st.booleans(), ) def test_torch_requires_grad(dtype_x, requires_grad, backend_fw): ivy.set_backend(backend_fw) _, data = dtype_x x = Tensor(data[0], requires_grad=requires_grad) ivy.utils.assertions.check_equal(x.requires_grad, requires_grad, as_array=False) x.requires_grad = not requires_grad ivy.utils.assertions.check_equal(x.requires_grad, not requires_grad, as_array=False) ivy.previous_backend() @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="reshape", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="value_shape"), ), shape=helpers.reshape_shapes( shape=st.shared(helpers.get_shape(), key="value_shape") ), unpack_shape=st.booleans(), ) def test_torch_reshape( dtype_x, shape, unpack_shape, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x shape = { "shape": shape, } if unpack_shape: method_flags.num_positional_args = len(shape["shape"]) + 1 i = 0 for x_ in shape["shape"]: shape[f"x{i}"] = x_ i += 1 helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np=shape, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # reshape_as @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="reshape_as", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2 ), ) def test_torch_reshape_as( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # round @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="round", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), decimals=st.integers(min_value=0, max_value=5), ) def test_torch_round( dtype_and_x, decimals, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "decimals": decimals, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # round_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="round_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), decimals=st.integers(min_value=0, max_value=5), test_inplace=st.just(True), ) def test_torch_round_( dtype_and_x, decimals, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "decimals": decimals, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # rsqrt @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="rsqrt", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_rsqrt( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # rsqrt_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="rsqrt_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_rsqrt_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # scatter @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="scatter", args=put_along_axis_helper(), ) def test_torch_scatter( args, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, values, axis = args helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", input_dtypes[0]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "src": values, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # scatter_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="scatter_", args=put_along_axis_helper(), reduce=st.sampled_from(["add", "multiply"]), ) def test_torch_scatter_( args, reduce, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, values, axis = args helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", input_dtypes[0]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "src": values, "reduce": reduce, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # scatter_add @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="scatter_add", args=put_along_axis_helper(), ) def test_torch_scatter_add( args, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, values, axis = args helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", input_dtypes[0]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "src": values, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # scatter_add_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="scatter_add_", args=put_along_axis_helper(), ) def test_torch_scatter_add_( args, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, values, axis = args helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", input_dtypes[0]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "src": values, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # scatter_reduce @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="scatter_reduce", args=put_along_axis_helper(), mode=st.sampled_from(["sum", "prod", "amin", "amax"]), ) def test_torch_scatter_reduce( args, mode, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, values, axis = args helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", input_dtypes[0]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "src": values, "reduce": mode, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # scatter_reduce_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="scatter_reduce_", args=put_along_axis_helper(), mode=st.sampled_from(["sum", "prod", "amin", "amax"]), ) def test_torch_scatter_reduce_( args, mode, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtypes, x, indices, values, axis = args helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=["int64", input_dtypes[0]], method_all_as_kwargs_np={ "dim": axis, "index": indices, "src": values, "reduce": mode, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) @given( dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid", prune_function=False), ret_shape=True, ).filter(lambda x: "bfloat16" not in x[0]), ) def test_torch_shape(dtype_x, backend_fw): ivy.set_backend(backend_fw) dtype, data, shape = dtype_x x = Tensor(data[0]) ivy.utils.assertions.check_equal( x.ivy_array.shape, ivy.Shape(shape), as_array=False ) ivy.previous_backend() # short @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="short", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_short( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sigmoid @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sigmoid", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_sigmoid( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sigmoid @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sigmoid_", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_sigmoid_( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sign @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sign", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_sign( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sign_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sign_", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_inplace=st.just(True), ) def test_torch_sign_( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=[input_dtype], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, frontend=frontend, ) # sin @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sin", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_sin( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sin_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sin_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_sin_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sinc", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_sinc( *, dtype_and_x, frontend, backend_fw, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, ) # sinc_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sinc_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), test_inplace=st.just(True), ) def test_torch_sinc_( *, dtype_and_x, frontend, backend_fw, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, ) # sinh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sinh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_sinh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sinh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sinh_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_sinh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # size @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="size", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), force_int=True, ), ) def test_torch_size( dtype_and_x, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # softmax @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="softmax", dtype_x_and_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, max_axes_size=1, force_int_axis=True, valid_axis=True, ), dtype=helpers.get_dtypes("float", full=False), ) def test_torch_softmax( dtype_x_and_axis, dtype, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_x_and_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "dtype": dtype[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sort @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sort", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), descending=st.booleans(), ) def test_torch_sort( dtype_value, dim, descending, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, "descending": descending, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # split @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="split", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"), ), split_size=_get_splits(allow_none=False, min_num_dims=1, allow_array_indices=False), dim=st.shared( helpers.get_axis( shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"), force_int=True, ), key="target_axis", ), ) def test_torch_split( dtype_value, split_size, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "split_size": split_size, "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sqrt @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sqrt", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), ), ) def test_torch_sqrt( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sqrt_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sqrt_", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), test_inplace=st.just(True), ) def test_torch_sqrt_( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # square @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="square", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_square( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # square_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="square_", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), allow_inf=False, max_value=1e04, min_value=-1e04, ), ) def test_torch_square_( dtype_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # squeeze @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="squeeze", dtype_value_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), ) def test_torch_squeeze( dtype_value_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_value_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # squeeze_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="squeeze_", dtype_value_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), test_inplace=st.just(True), ) def test_torch_squeeze_( dtype_value_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis = dtype_value_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # std @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="std", dtype_and_x=_statistical_dtype_values(function="std"), ) def test_torch_std( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, _, _ = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="stride", dtype_value_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), ) def test_torch_stride( dtype_value_axis, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis = dtype_value_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"dim": axis}, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # sub @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sub", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_value=-1e04, max_value=1e04, allow_inf=False, ), alpha=st.floats(min_value=-1e04, max_value=1e04, allow_infinity=False), ) def test_torch_sub( dtype_and_x, alpha, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], "alpha": alpha, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, atol_=1e-02, on_device=on_device, ) # subtract_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="subtract_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), num_arrays=2, ), test_inplace=st.just(True), ) def test_torch_subtract_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=[input_dtype[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # sum @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="sum", dtype_x_dim=_get_castable_dtype( min_value=-1e04, max_value=1e04, ), keepdim=st.booleans(), ) def test_torch_sum( dtype_x_dim, keepdim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, dim, castable_dtype = dtype_x_dim if method_flags.as_variable: castable_dtype = input_dtype input_dtype = [input_dtype] helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, "keepdim": keepdim, "dtype": castable_dtype, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="svd", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), some=st.booleans(), compute_uv=st.booleans(), ) def test_torch_svd( dtype_and_x, some, compute_uv, frontend, backend_fw, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x = dtype_and_x x = np.asarray(x[0], dtype=input_dtype[0]) ret, frontend_ret = helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "some": some, "compute_uv": compute_uv, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, test_values=False, ) with helpers.update_backend(backend_fw) as ivy_backend: ret = [ivy_backend.to_numpy(x) for x in ret] frontend_ret = [np.asarray(x) for x in frontend_ret] u, s, vh = ret frontend_u, frontend_s, frontend_vh = frontend_ret if compute_uv: helpers.assert_all_close( ret_np=frontend_u @ np.diag(frontend_s) @ frontend_vh.T, ret_from_gt_np=u @ np.diag(s) @ vh, rtol=1e-2, atol=1e-2, backend=backend_fw, ground_truth_backend=frontend, ) else: helpers.assert_all_close( ret_np=frontend_s, ret_from_gt_np=s, rtol=1e-2, atol=1e-2, backend=backend_fw, ground_truth_backend=frontend, ) # t @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="t", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=helpers.get_shape(min_num_dims=2, max_num_dims=2), ), ) def test_torch_t( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="take_along_dim", dtype_indices_axis=helpers.array_indices_axis( array_dtypes=helpers.get_dtypes("numeric"), indices_dtypes=["int64"], min_num_dims=1, max_num_dims=5, min_dim_size=1, max_dim_size=10, indices_same_dims=True, ), ) def test_torch_take_along_dim( dtype_indices_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtypes, value, indices, axis, _ = dtype_indices_axis helpers.test_frontend_method( init_input_dtypes=[input_dtypes[0]], backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": value, }, method_input_dtypes=[input_dtypes[1]], method_all_as_kwargs_np={ "indices": indices, "dim": axis, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tan @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tan", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_tan( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tan_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tan_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_tan_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tanh @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tanh", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), ) def test_torch_tanh( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tanh_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tanh_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_tanh_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # corrcoef @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="corrcoef", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) def test_torch_tensor_corrcoef( dtype_and_x, frontend, backend_fw, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, ) # erfc_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="erfc_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), ), ) def test_torch_tensor_erfc_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, rtol_=1e-2, atol_=1e-2, ) # logaddexp2 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="logaddexp2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_num_dims=1, min_value=-100, max_value=100, shared_dtype=True, ), ) def test_torch_tensor_logaddexp2( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # positive @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="positive", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=-1e04, max_value=1e04, allow_inf=False, ), ) def test_torch_tensor_positive( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tensor_split @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tensor_split", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("integer"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"), ), indices_or_sections=_get_splits( min_num_dims=1, allow_none=False, allow_array_indices=False ), dim=st.shared( helpers.get_axis( shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape"), force_int=True, ), key="target_axis", ), method_num_positional_args=st.just(1), ) def test_torch_tensor_split( dtype_value, indices_or_sections, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={ "indices_or_sections": indices_or_sections, "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tile", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="shape"), ), reps=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=False, ), ) def test_torch_tile( dtype_and_values, reps, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, values = dtype_and_values if isinstance(reps, tuple): method_flags.num_positional_args = len(reps) else: method_flags.num_positional_args = 1 helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": values[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "reps": reps, }, init_flags=init_flags, method_flags=method_flags, frontend_method_data=frontend_method_data, frontend=frontend, on_device=on_device, ) # to @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="to", args_kwargs=_to_helper(), ) def test_torch_to( args_kwargs, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, method_num_positional_args, method_all_as_kwargs_np = args_kwargs method_flags.num_positional_args = method_num_positional_args helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np=method_all_as_kwargs_np, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # topk # TODO: add value test after the stable sorting is added to torch # https://github.com/pytorch/pytorch/issues/88184 @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="topk", dtype_x_axis_k=_topk_helper(), largest=st.booleans(), sorted=st.booleans(), ) def test_torch_topk( dtype_x_axis_k, largest, sorted, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, input, axis, k = dtype_x_axis_k helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": input[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "k": k, "dim": axis, "largest": largest, "sorted": sorted, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, test_values=False, ) # transpose @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="transpose", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim0=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), dim1=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), ) def test_torch_transpose( dtype_value, dim0, dim1, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"dim0": dim0, "dim1": dim1}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # transpose_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="transpose_", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), dim0=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), dim1=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), ) def test_torch_transpose_( dtype_value, dim0, dim1, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim0": dim0, "dim1": dim1, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tril @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tril", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=2, # Torch requires this. ), diagonal=st.integers(min_value=-100, max_value=100), ) def test_torch_tril( dtype_and_values, diagonal, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "diagonal": diagonal, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # tril_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="tril_", dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=2, # Torch requires this. ), diagonal=st.integers(min_value=-100, max_value=100), test_inplace=st.just(True), ) def test_torch_tril_( dtype_and_values, diagonal, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_values helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "diagonal": diagonal, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # triu @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="triu", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=2, max_num_dims=5, min_dim_size=1, max_dim_size=5, ), diagonal=st.integers( min_value=-4, max_value=4, ), ) def test_torch_triu( dtype_x, diagonal, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"diagonal": diagonal}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # triu_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="triu_", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=2, max_num_dims=5, min_dim_size=1, max_dim_size=5, ), diagonal=st.integers( min_value=-4, max_value=4, ), ) def test_torch_triu_( dtype_x, diagonal, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={"diagonal": diagonal}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # true_divide_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="true_divide_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ), test_inplace=st.just(True), ) def test_torch_true_divide_( dtype_and_x, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x = dtype_and_x assume(not np.any(np.isclose(x[1], 0))) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # trunc @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="trunc", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), ) def test_torch_trunc( dtype_value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # trunc_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="trunc_", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), ), test_inplace=st.just(True), ) def test_torch_trunc_( dtype_value, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # type @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="type", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), dtype=helpers.get_dtypes("valid", full=False), ) def test_torch_type( dtype_and_x, dtype, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dtype": dtype[0], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # type_as @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="type_as", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), num_arrays=2, ), ) def test_torch_type_as( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # unbind @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unbind", dtype_value_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("numeric"), min_num_dims=1, valid_axis=True, force_int_axis=True, ), ) def test_torch_unbind( dtype_value_axis, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtypes, x, axis = dtype_value_axis helpers.test_frontend_method( init_input_dtypes=input_dtypes, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtypes, method_all_as_kwargs_np={ "dim": axis, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unflatten", shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), dtype_and_values=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=1, shape_key="shape", ), axis=helpers.get_axis( shape=st.shared(helpers.get_shape(min_num_dims=1), key="shape"), force_int=True, ), ) def test_torch_unflatten( *, dtype_and_values, on_device, frontend, backend_fw, shape, axis, frontend_method_data, init_flags, method_flags, ): dtype, x = dtype_and_values sizes = sizes_(shape, axis) helpers.test_frontend_method( init_input_dtypes=dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=dtype, method_all_as_kwargs_np={ "dim": axis, "sizes": sizes, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # unfold @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unfold", dtype_values_args=_unfold_args(), ) def test_torch_unfold( dtype_values_args, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x, axis, size, step = dtype_values_args print(axis, size, step) helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x, }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dimension": axis, "size": size, "step": step, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # uniform_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="uniform_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), min_value=1, max_value=5, min_num_dims=1, max_num_dims=5, ), from_=helpers.floats(min_value=-1000, max_value=0), to=helpers.floats(min_value=1, max_value=1000), test_inplace=st.just(True), ) def test_torch_uniform_( dtype_and_x, from_, to, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x method_flags.num_positional_args = 3 helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "from_": from_, "to": to, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, test_values=False, ) # unique @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unique", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), valid_axis=True, force_int_axis=True, ), sorted=st.booleans(), return_inverse=st.booleans(), return_counts=st.booleans(), ) def test_torch_unique( dtype_x_axis, sorted, return_inverse, return_counts, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "sorted": sorted, "return_inverse": return_inverse, "return_counts": return_counts, "dim": axis, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # unique_consecutive @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unique_consecutive", dtype_x_axis=helpers.dtype_values_axis( available_dtypes=helpers.get_dtypes("valid"), min_num_dims=2, min_dim_size=2, force_int_axis=True, valid_axis=True, ), return_inverse=st.booleans(), return_counts=st.booleans(), ) def test_torch_unique_consecutive( dtype_x_axis, return_inverse, return_counts, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis = dtype_x_axis helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "return_inverse": return_inverse, "return_counts": return_counts, "dim": axis, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # unsqueeze @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unsqueeze", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), ) def test_torch_unsqueeze( dtype_value, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # unsqueeze_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="unsqueeze_", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="shape"), ), dim=helpers.get_axis( shape=st.shared(helpers.get_shape(), key="shape"), allow_neg=True, force_int=True, ), test_inplace=st.just(True), ) def test_torch_unsqueeze_( dtype_value, dim, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": dim, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="var", dtype_and_x=_statistical_dtype_values( function="var", min_value=-1e04, max_value=1e04, ), keepdim=st.booleans(), ) def test_torch_var( dtype_and_x, keepdim, frontend, frontend_method_data, init_flags, method_flags, on_device, backend_fw, ): input_dtype, x, axis, correction = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={"data": x[0]}, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "dim": axis, "correction": int(correction), "keepdim": keepdim, }, frontend=frontend, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, on_device=on_device, ) # view @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="view", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(), key="value_shape"), ), shape=helpers.reshape_shapes( shape=st.shared(helpers.get_shape(min_num_dims=1), key="value_shape") ), ) def test_torch_view( dtype_x, shape, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "size": shape, }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # view_as @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="view_as", dtype_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric"), shape=st.shared(helpers.get_shape(), key="value_shape"), num_arrays=2, ), ) def test_torch_view_as( dtype_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # vsplit @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="vsplit", dtype_value=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), shape=st.shared(helpers.get_shape(min_num_dims=2), key="value_shape"), ), indices_or_sections=_get_splits( min_num_dims=2, axis=0, allow_none=False, allow_array_indices=False, is_mod_split=True, ), ) def test_torch_vsplit( dtype_value, indices_or_sections, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_value helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=[], method_all_as_kwargs_np={"indices_or_sections": indices_or_sections}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # where @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="where", broadcastables=_broadcastable_trio(), ) def test_torch_where( broadcastables, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): cond, xs, dtypes = broadcastables helpers.test_frontend_method( init_input_dtypes=dtypes, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": xs[0], }, method_input_dtypes=["bool", dtypes[1]], method_all_as_kwargs_np={ "condition": cond, "other": xs[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, ) # xlogy @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="xlogy", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_num_dims=1, min_value=-100, max_value=100, shared_dtype=True, ), ) def test_torch_xlogy( dtype_and_x, frontend, backend_fw, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, ) # xlogy_ @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="xlogy_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=2, min_num_dims=1, min_value=-100, max_value=100, shared_dtype=True, ), test_inplace=st.just(True), ) def test_torch_xlogy_( dtype_and_x, frontend, backend_fw, frontend_method_data, init_flags, method_flags, on_device, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={ "other": x[1], }, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, backend_to_test=backend_fw, on_device=on_device, ) # zero_ tests @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="zero_", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), allow_inf=False, ), test_inplace=st.just(True), ) def test_torch_zero_( dtype_and_x, frontend_method_data, init_flags, method_flags, frontend, on_device, backend_fw, ): input_dtype, x = dtype_and_x helpers.test_frontend_method( init_input_dtypes=input_dtype, backend_to_test=backend_fw, init_all_as_kwargs_np={ "data": x[0], }, method_input_dtypes=input_dtype, method_all_as_kwargs_np={}, frontend_method_data=frontend_method_data, init_flags=init_flags, method_flags=method_flags, frontend=frontend, on_device=on_device, )
ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py", "repo_id": "ivy", "token_count": 184925 }
65
"""Collection of tests for unified gradient functions.""" # global from hypothesis import strategies as st import pytest import numpy as np # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_test, BackendHandler # --- Helpers --- # # --------------- # @st.composite def get_gradient_arguments_with_lr( draw, *, min_value=-1e20, max_value=1e20, abs_smallest_val=None, large_abs_safety_factor=2, small_abs_safety_factor=16, num_arrays=1, float_lr=False, no_lr=False, ): dtypes, arrays, shape = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), num_arrays=num_arrays, min_value=min_value, max_value=max_value, abs_smallest_val=abs_smallest_val, large_abs_safety_factor=large_abs_safety_factor, small_abs_safety_factor=small_abs_safety_factor, safety_factor_scale="log", min_num_dims=1, shared_dtype=True, ret_shape=True, ) ) dtype = dtypes[0] if no_lr: return dtypes, arrays if float_lr: lr = draw( helpers.floats( min_value=1e-2, max_value=1.0, ) ) else: lr = draw( st.one_of( helpers.floats( min_value=1e-2, max_value=1.0, ), helpers.array_values( dtype=dtype, shape=shape, min_value=1e-2, max_value=1.0, ), ) ) if isinstance(lr, list): dtypes += [dtype] return dtypes, arrays, lr # adam_step @handle_test( fn_tree="functional.ivy.adam_step", dtype_n_dcdw_n_mw_n_vw=get_gradient_arguments_with_lr( num_arrays=3, no_lr=True, min_value=1e-05, max_value=1e08, large_abs_safety_factor=2, small_abs_safety_factor=2, ), step=helpers.ints(min_value=1, max_value=3), beta1_n_beta2_n_epsilon=helpers.list_of_size( x=helpers.floats(min_value=1e-1, max_value=1), size=3, ), ) def test_adam_step( *, dtype_n_dcdw_n_mw_n_vw, step, beta1_n_beta2_n_epsilon, test_flags, backend_fw, fn_name, on_device, ): input_dtypes, [dcdw, mw, vw] = dtype_n_dcdw_n_mw_n_vw ( beta1, beta2, epsilon, ) = beta1_n_beta2_n_epsilon helpers.test_function( input_dtypes=input_dtypes, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, dcdw=dcdw, mw=mw, vw=vw, step=step, beta1=beta1, beta2=beta2, epsilon=epsilon, ) # adam_update @handle_test( fn_tree="functional.ivy.adam_update", dtype_n_ws_n_dcdw_n_mwtm1_n_vwtm1_n_lr=get_gradient_arguments_with_lr( num_arrays=4, min_value=1e-05, max_value=1e08, large_abs_safety_factor=2.0, small_abs_safety_factor=2.0, ), step=st.integers(min_value=1, max_value=10), beta1_n_beta2_n_epsilon=helpers.list_of_size( x=helpers.floats(min_value=1e-2, max_value=1), size=3, ), stopgrad=st.booleans(), ) def test_adam_update( *, dtype_n_ws_n_dcdw_n_mwtm1_n_vwtm1_n_lr, step, beta1_n_beta2_n_epsilon, stopgrad, test_flags, backend_fw, fn_name, on_device, ): input_dtypes, [w, dcdw, mw_tm1, vw_tm1], lr = dtype_n_ws_n_dcdw_n_mwtm1_n_vwtm1_n_lr beta1, beta2, epsilon = beta1_n_beta2_n_epsilon stop_gradients = stopgrad helpers.test_function( input_dtypes=input_dtypes, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, w=w, dcdw=dcdw, lr=lr, mw_tm1=mw_tm1, vw_tm1=vw_tm1, step=step, beta1=beta1, beta2=beta2, epsilon=epsilon, stop_gradients=stop_gradients, ) # execute_with_gradients @handle_test( fn_tree="functional.ivy.execute_with_gradients", dtype_and_xs=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=1, min_dim_size=1, min_value=0, max_value=100, ), retain_grads=st.booleans(), test_instance_method=st.just(False), test_with_out=st.just(False), test_gradients=st.just(False), ) def test_execute_with_gradients( *, dtype_and_xs, retain_grads, test_flags, backend_fw, fn_name, on_device ): if backend_fw == "numpy": return def func(xs): with BackendHandler.update_backend( ivy.current_backend(xs.to_native()).backend ) as ivy_backend: if isinstance(xs, ivy_backend.Container): array_idxs = ivy_backend.nested_argwhere(xs, ivy_backend.is_array) array_vals = ivy_backend.multi_index_nest(xs, array_idxs) if len(array_vals) == 0: final_array = None else: final_array = ivy_backend.stack(array_vals) else: final_array = xs ret = ivy_backend.mean(final_array) return ret dtype, xs = dtype_and_xs helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, func=func, rtol_=1e-1, atol_=1e-1, on_device=on_device, xs=xs[0], retain_grads=retain_grads, ) # grad @pytest.mark.parametrize( "x", [[[4.6, 2.1, 5], [2.8, 1.3, 6.2]], [[4.6, 2.1], [5, 2.8], [1.3, 6.2]]] ) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize( "func", [lambda x: ivy.mean(ivy.square(x)), lambda x: ivy.mean(ivy.cos(x))] ) @pytest.mark.parametrize("nth", [1, 2, 3]) def test_grad(x, dtype, func, backend_fw, nth): # ToDo: Remove skipping for paddle and jax for nth > 1 if backend_fw == "numpy" or (backend_fw in ["paddle", "jax"] and nth > 1): return with BackendHandler.update_backend(backend_fw) as ivy_backend: _variable_fn = ivy_backend.ivy.functional.ivy.gradients._variable var = _variable_fn(ivy_backend.array(x, dtype=dtype)) fn = ivy_backend.grad(func) if nth > 1: for _ in range(1, nth): fn = ivy_backend.grad(fn) grad = fn(var) grad_np = helpers.flatten_and_to_np(ret=grad, backend=backend_fw) with BackendHandler.update_backend("tensorflow") as gt_backend: _variable_fn = gt_backend.ivy.functional.ivy.gradients._variable var = _variable_fn(ivy.array(x, dtype=dtype)) fn = gt_backend.grad(func) if nth > 1: for _ in range(1, nth): fn = gt_backend.grad(fn) grad_gt = fn(var) grad_np_from_gt = helpers.flatten_and_to_np(ret=grad_gt, backend="tensorflow") for grad, grad_from_gt in zip(grad_np, grad_np_from_gt): assert grad.shape == grad_from_gt.shape assert np.allclose(grad, grad_from_gt) # gradient_descent_update @handle_test( fn_tree="functional.ivy.gradient_descent_update", dtype_n_ws_n_dcdw_n_lr=get_gradient_arguments_with_lr(num_arrays=2), stop_gradients=st.booleans(), ) def test_gradient_descent_update( *, dtype_n_ws_n_dcdw_n_lr, stop_gradients, test_flags, backend_fw, fn_name, on_device, ): input_dtypes, [w, dcdw], lr = dtype_n_ws_n_dcdw_n_lr helpers.test_function( input_dtypes=input_dtypes, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, w=w, dcdw=dcdw, lr=lr, stop_gradients=stop_gradients, ) # jac @pytest.mark.parametrize( "x", [[[4.6, 2.1, 5], [2.8, 1.3, 6.2]], [[4.6, 2.1], [5, 2.8], [1.3, 6.2]]] ) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("func_str", ["square", "cos"]) def test_jac(x, dtype, func_str, backend_fw): if backend_fw == "numpy": pytest.skip() with BackendHandler.update_backend(backend_fw) as ivy_backend: f = ivy_backend.__dict__[func_str] def func(x): return ivy_backend.mean(f(x)) _variable_fn = ivy_backend.ivy.functional.ivy.gradients._variable var = _variable_fn(ivy_backend.array(x, dtype=dtype)) fn = ivy_backend.jac(func) jacobian = fn(var) jacobian_np = helpers.flatten_and_to_np(ret=jacobian, backend=backend_fw) assert jacobian_np != [] with BackendHandler.update_backend("tensorflow") as gt_backend: f = gt_backend.__dict__[func_str] def func(x): return gt_backend.mean(f(x)) _variable_fn = gt_backend.ivy.functional.ivy.gradients._variable var = _variable_fn(gt_backend.array(x, dtype=dtype)) fn = gt_backend.jac(func) jacobian_gt = fn(var) jacobian_np_from_gt = helpers.flatten_and_to_np( ret=jacobian_gt, backend="tensorflow" ) for jacobian, jacobian_from_gt in zip(jacobian_np, jacobian_np_from_gt): assert jacobian.shape == jacobian_from_gt.shape assert np.allclose(jacobian, jacobian_from_gt) # Test nested input def func(xs): return 2 * xs[1]["x2"], xs[0] with BackendHandler.update_backend(backend_fw) as ivy_backend: _variable_fn = ivy_backend.ivy.functional.ivy.gradients._variable var1 = _variable_fn(ivy_backend.array(x[0], dtype=dtype)) var2 = _variable_fn(ivy_backend.array(x[1], dtype=dtype)) var = [var1, {"x2": var2}] fn = ivy_backend.jac(func) jacobian = fn(var) jacobian_np = helpers.flatten_and_to_np(ret=jacobian, backend=backend_fw) with BackendHandler.update_backend("tensorflow") as gt_backend: _variable_fn = gt_backend.ivy.functional.ivy.gradients._variable var1 = _variable_fn(gt_backend.array(x[0], dtype=dtype)) var2 = _variable_fn(gt_backend.array(x[1], dtype=dtype)) var = [var1, {"x2": var2}] fn = gt_backend.jac(func) jacobian_gt = fn(var) jacobian_np_from_gt = helpers.flatten_and_to_np( ret=jacobian_gt, backend="tensorflow" ) for jacobian, jacobian_from_gt in zip(jacobian_np, jacobian_np_from_gt): assert jacobian.shape == jacobian_from_gt.shape assert np.allclose(jacobian, jacobian_from_gt) # Test func with non 0-dim output with BackendHandler.update_backend(backend_fw) as ivy_backend: func = ivy_backend.__dict__[func_str] _variable_fn = ivy_backend.ivy.functional.ivy.gradients._variable var = _variable_fn(ivy_backend.array(x, dtype=dtype)) fn = ivy_backend.jac(func) jacobian = fn(var) jacobian_np = helpers.flatten_and_to_np(ret=jacobian, backend=backend_fw) with BackendHandler.update_backend("tensorflow") as gt_backend: func = gt_backend.__dict__[func_str] _variable_fn = gt_backend.ivy.functional.ivy.gradients._variable var = _variable_fn(gt_backend.array(x, dtype=dtype)) fn = gt_backend.jac(func) jacobian_gt = fn(var) jacobian_np_from_gt = helpers.flatten_and_to_np( ret=jacobian_gt, backend="tensorflow" ) for jacobian, jacobian_from_gt in zip(jacobian_np, jacobian_np_from_gt): assert jacobian.shape == jacobian_from_gt.shape assert np.allclose(jacobian, jacobian_from_gt) # lamb_update @handle_test( fn_tree="functional.ivy.lamb_update", dtype_n_ws_n_dcdw_n_mwtm1_n_vwtm1_n_lr=get_gradient_arguments_with_lr( min_value=-1e5, max_value=1e5, num_arrays=4, ), step=helpers.ints(min_value=1, max_value=100), beta1_n_beta2_n_epsilon_n_lambda=helpers.list_of_size( x=helpers.floats( min_value=1e-2, max_value=1.0, ), size=4, ), mtr=st.one_of( helpers.ints(min_value=1, max_value=10), st.floats(min_value=1e-2, max_value=10, exclude_min=True), ), stopgrad=st.booleans(), ) def test_lamb_update( *, dtype_n_ws_n_dcdw_n_mwtm1_n_vwtm1_n_lr, step, beta1_n_beta2_n_epsilon_n_lambda, mtr, stopgrad, test_flags, backend_fw, fn_name, on_device, ): input_dtypes, [w, dcdw, mw_tm1, vw_tm1], lr = dtype_n_ws_n_dcdw_n_mwtm1_n_vwtm1_n_lr ( beta1, beta2, epsilon, decay_lambda, ) = beta1_n_beta2_n_epsilon_n_lambda max_trust_ratio, stop_gradients = mtr, stopgrad # ToDo: enable gradient tests for jax once the issue with jacrev is resolved if backend_fw == "jax": test_flags.test_gradients = False helpers.test_function( input_dtypes=input_dtypes, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, w=w, dcdw=dcdw, lr=lr, mw_tm1=mw_tm1, vw_tm1=vw_tm1, step=step, beta1=beta1, beta2=beta2, epsilon=epsilon, max_trust_ratio=max_trust_ratio, decay_lambda=decay_lambda, stop_gradients=stop_gradients, ) # lars_update @handle_test( fn_tree="functional.ivy.lars_update", dtype_n_ws_n_dcdw_n_lr=get_gradient_arguments_with_lr( num_arrays=2, ), decay_lambda=helpers.floats(min_value=1e-2, max_value=1), stop_gradients=st.booleans(), ) def test_lars_update( *, dtype_n_ws_n_dcdw_n_lr, decay_lambda, stop_gradients, test_flags, backend_fw, fn_name, on_device, ): input_dtypes, [w, dcdw], lr = dtype_n_ws_n_dcdw_n_lr # ToDo: Add testing for bfloat16 back when it returns consistent gradients for jax if "bfloat16" in input_dtypes: return helpers.test_function( input_dtypes=input_dtypes, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, w=w, dcdw=dcdw, lr=lr, decay_lambda=decay_lambda, stop_gradients=stop_gradients, ) # optimizer_update @handle_test( fn_tree="functional.ivy.optimizer_update", dtype_n_ws_n_effgrad_n_lr=get_gradient_arguments_with_lr(num_arrays=2), stop_gradients=st.booleans(), ) def test_optimizer_update( *, dtype_n_ws_n_effgrad_n_lr, stop_gradients, test_flags, backend_fw, fn_name, on_device, ): input_dtypes, [w, effective_grad], lr = dtype_n_ws_n_effgrad_n_lr helpers.test_function( input_dtypes=input_dtypes, backend_to_test=backend_fw, test_flags=test_flags, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, w=w, effective_grad=effective_grad, lr=lr, stop_gradients=stop_gradients, ) # stop_gradient @handle_test( fn_tree="functional.ivy.stop_gradient", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("numeric") ), preserve_type=st.booleans(), test_instance_method=st.just(False), test_gradients=st.just(False), ) def test_stop_gradient( *, dtype_and_x, preserve_type, test_flags, backend_fw, fn_name, on_device ): dtype, x = dtype_and_x helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], preserve_type=preserve_type, ) # value_and_grad @pytest.mark.parametrize( "x", [[[4.6, 2.1, 5], [2.8, 1.3, 6.2]], [[4.6, 2.1], [5, 2.8], [1.3, 6.2]]] ) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize( "func", [lambda x: ivy.mean(ivy.square(x)), lambda x: ivy.mean(ivy.cos(x))] ) def test_value_and_grad(x, dtype, func, backend_fw): if backend_fw == "numpy": return with BackendHandler.update_backend(backend_fw) as ivy_backend: var = ivy_backend.ivy.functional.ivy.gradients._variable( ivy_backend.array(x, dtype=dtype) ) fn = ivy_backend.value_and_grad(func) value, grad = fn(var) value_np, grad_np = helpers.flatten_and_to_np( ret=value, backend=backend_fw ), helpers.flatten_and_to_np(ret=grad, backend=backend_fw) with BackendHandler.update_backend("tensorflow") as gt_backend: var = gt_backend.ivy.functional.ivy.gradients._variable( gt_backend.array(x, dtype=dtype) ) fn = gt_backend.value_and_grad(func) value_gt, grad_gt = fn(var) value_np_from_gt, grad_np_from_gt = helpers.flatten_and_to_np( ret=value_gt, backend="tensorflow" ), helpers.flatten_and_to_np(ret=grad_gt, backend="tensorflow") for value, value_from_gt in zip(value_np, value_np_from_gt): assert value.shape == value_from_gt.shape assert np.allclose(value, value_from_gt) for grad, grad_from_gt in zip(grad_np, grad_np_from_gt): assert grad.shape == grad_from_gt.shape assert np.allclose(grad, grad_from_gt)
ivy/ivy_tests/test_ivy/test_functional/test_core/test_gradients.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_functional/test_core/test_gradients.py", "repo_id": "ivy", "token_count": 9390 }
66
# global from hypothesis import strategies as st # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_test # --- Helpers --- # # --------------- # @st.composite def _reduce_helper(draw): # ToDo: remove the filtering when supported dtypes are fixed for mixed functions dtype = draw( helpers.get_dtypes("valid", full=False).filter(lambda x: "complex" not in x[0]) ) if dtype[0] == "bool": func = draw(st.sampled_from([ivy.logical_and, ivy.logical_or])) else: func = draw(st.sampled_from([ivy.add, ivy.maximum, ivy.minimum, ivy.multiply])) init_value = draw( helpers.dtype_and_values( dtype=dtype, shape=(), allow_inf=True, ) )[1] dtype, operand, shape = draw( helpers.dtype_and_values( min_num_dims=1, dtype=dtype, ret_shape=True, ) ) axes = draw(helpers.get_axis(shape=shape)) return dtype, operand[0], init_value[0], func, axes # --- Main --- # # ------------ # # reduce @handle_test( fn_tree="functional.ivy.experimental.reduce", args=_reduce_helper(), keepdims=st.booleans(), test_with_out=st.just(False), test_gradients=st.just(False), ) def test_reduce(*, args, keepdims, test_flags, backend_fw, fn_name, on_device): dtype, operand, init_value, func, axes = args helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, operand=operand, init_value=init_value, computation=func, axes=axes, keepdims=keepdims, )
ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_general.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_core/test_general.py", "repo_id": "ivy", "token_count": 808 }
67
# global import numpy as np import torch from hypothesis import strategies as st, assume # local import ivy import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_test, BackendHandler # --- Helpers --- # # --------------- # def _get_reduce_func(dtype): if dtype == "bool": return st.sampled_from([ivy.logical_and, ivy.logical_or]) else: return st.sampled_from([ivy.add, ivy.maximum, ivy.minimum, ivy.multiply]) @st.composite def _interp_args(draw, mode=None, mode_list=None): mixed_fn_compos = draw(st.booleans()) curr_backend = ivy.current_backend_str() torch_modes = [ "linear", "bilinear", "trilinear", "nearest", "nearest-exact", "area", "bicubic", ] tf_modes = [ "linear", "bilinear", "trilinear", "nearest-exact", "tf_area", "tf_bicubic", "lanczos3", "lanczos5", "mitchellcubic", "gaussian", ] jax_modes = [ "linear", "bilinear", "trilinear", "nearest-exact", "tf_bicubic", "lanczos3", "lanczos5", ] if mode_list == "torch": mode_list = torch_modes if not mode and not mode_list: if curr_backend == "torch" and not mixed_fn_compos: mode = draw(st.sampled_from(torch_modes)) elif curr_backend == "tensorflow" and not mixed_fn_compos: mode = draw(st.sampled_from(tf_modes)) elif curr_backend == "jax" and not mixed_fn_compos: mode = draw(st.sampled_from(jax_modes)) else: mode = draw( st.sampled_from( [ "linear", "bilinear", "trilinear", "nearest", "nearest-exact", "area", "tf_area", "tf_bicubic", "lanczos3", "lanczos5", "mitchellcubic", "gaussian", ] ) ) elif mode_list: mode = draw(st.sampled_from(mode_list)) if mode == "linear": num_dims = 3 elif mode in [ "bilinear", "tf_bicubic", "bicubic", "mitchellcubic", "gaussian", ]: num_dims = 4 elif mode == "trilinear": num_dims = 5 elif mode in [ "nearest", "area", "tf_area", "lanczos3", "lanczos5", "nearest-exact", ]: num_dims = ( draw( helpers.ints(min_value=1, max_value=3, mixed_fn_compos=mixed_fn_compos) ) + 2 ) if curr_backend == "tensorflow" and not mixed_fn_compos: num_dims = 3 dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes( "float", mixed_fn_compos=mixed_fn_compos ), min_num_dims=num_dims, max_num_dims=num_dims, min_dim_size=2, max_dim_size=5, max_value=1e04, min_value=-1e04, abs_smallest_val=1e-04, ) ) align_corners = draw(st.booleans()) if draw(st.booleans()): if draw(st.booleans()): scale_factor = draw( st.floats(min_value=max([1 / d for d in x[0].shape[2:]]), max_value=3) ) else: scale_factor = [] for s in x[0].shape[2:]: scale_factor += [draw(st.floats(min_value=1 / s, max_value=3))] recompute_scale_factor = draw(st.booleans()) size = None else: size = draw( st.one_of( st.lists( st.integers(min_value=1, max_value=3 * max(x[0].shape)), min_size=num_dims - 2, max_size=num_dims - 2, ), st.integers(min_value=1, max_value=3 * max(x[0].shape)), ) ) recompute_scale_factor = None scale_factor = None return (dtype, x, mode, size, align_corners, scale_factor, recompute_scale_factor) @st.composite def _lstm_helper(draw): input_size = draw(helpers.ints(min_value=2, max_value=5)) hidden_size = 4 * input_size input_length = draw(helpers.ints(min_value=2, max_value=5)) batch_size = draw(helpers.ints(min_value=1, max_value=4)) * 2 dtype = draw(helpers.get_dtypes("float", full=False)) (time_major, go_backwards, unroll, zero_output_for_mask, return_all_outputs) = draw( helpers.array_bools(size=5) ) shape = [batch_size, input_length, input_size] if time_major: shape = [input_length, batch_size, input_size] inputs = draw( helpers.dtype_and_values( available_dtypes=dtype, shape=shape, min_value=-1, max_value=1, abs_smallest_val=1e-5, safety_factor_scale="log", ) )[1][0] mask = draw( st.just([None, [None]]) | helpers.dtype_and_values( available_dtypes=["bool"], shape=[*shape[:2], 1], ) )[1][0] kernel, recurrent_kernel = draw( helpers.dtype_and_values( available_dtypes=dtype, num_arrays=2, shape=(input_size, hidden_size), min_value=-1, max_value=1, abs_smallest_val=1e-5, safety_factor_scale="log", ) )[1] bias, recurrent_bias = draw( helpers.dtype_and_values( available_dtypes=dtype, num_arrays=2, shape=(1, hidden_size), min_value=-1, max_value=1, abs_smallest_val=1e-5, safety_factor_scale="log", ) )[1] init_h, init_c = draw( helpers.dtype_and_values( available_dtypes=dtype, num_arrays=2, shape=(batch_size, input_size), min_value=-1, max_value=1, abs_smallest_val=1e-5, safety_factor_scale="log", ) )[1] dtypes = [dtype[0] for _ in range(7)] if mask is not None: dtypes.append("bool") # ToDo : zero_output_for_mask doesn't work if we don't return_all_outputs # in tensorflow zero_output_for_mask = zero_output_for_mask and return_all_outputs return ( dtypes, inputs, kernel, recurrent_kernel, bias, recurrent_bias, [init_h, init_c], go_backwards, mask, unroll, input_length, time_major, zero_output_for_mask, return_all_outputs, ) @st.composite def _reduce_window_helper(draw, get_func_st): dtype = draw(helpers.get_dtypes("valid", full=False, index=2)) py_func = draw(get_func_st(dtype[0])) init_value = draw( helpers.dtype_and_values( dtype=dtype, shape=(), allow_inf=True, ) )[1] ndim = draw(st.integers(min_value=1, max_value=4)) _, others = draw( helpers.dtype_and_values( num_arrays=4, dtype=["int64"] * 4, shape=(ndim,), min_value=1, max_value=3, small_abs_safety_factor=1, large_abs_safety_factor=1, ) ) others = [other.tolist() for other in others] window, dilation = others[0], others[2] op_shape = [] for i in range(ndim): min_x = window[i] + (window[i] - 1) * (dilation[i] - 1) op_shape.append(draw(st.integers(min_x, min_x + 1))) dtype, operand = draw( helpers.dtype_and_values( dtype=dtype, shape=op_shape, ) ) padding = draw( st.one_of( st.lists( st.tuples( st.integers(min_value=0, max_value=3), st.integers(min_value=0, max_value=3), ), min_size=ndim, max_size=ndim, ), st.sampled_from(["SAME", "VALID"]), ) ) for i, arg in enumerate(others): if len(np.unique(arg)) == 1 and draw(st.booleans()): others[i] = arg[0] return dtype * 2, operand, init_value, py_func, others, padding @st.composite def _valid_dct(draw): dtype, x = draw( helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), max_value=65280, min_value=-65280, min_num_dims=1, max_num_dims=5, min_dim_size=2, max_dim_size=10, shared_dtype=True, ) ) dims_len = len(x[0].shape) n = draw(st.sampled_from([None, "int"])) axis = draw(helpers.ints(min_value=-dims_len, max_value=dims_len - 1)) norm = draw(st.sampled_from([None, "ortho"])) type = draw(helpers.ints(min_value=1, max_value=4)) if n == "int": n = draw(helpers.ints(min_value=1, max_value=20)) if n <= 1 and type == 1: n = 2 if norm == "ortho" and type == 1: norm = None return dtype, x, type, n, axis, norm @st.composite def _valid_stft(draw): dtype, x = draw( helpers.dtype_and_values( available_dtypes=["float32", "float64"], max_value=65280, min_value=-65280, min_num_dims=1, min_dim_size=2, shared_dtype=True, ) ) frame_length = draw(helpers.ints(min_value=16, max_value=100)) frame_step = draw(helpers.ints(min_value=1, max_value=50)) return dtype, x, frame_length, frame_step @st.composite def _x_and_fft(draw): min_fft_points = 2 dtype = draw(helpers.get_dtypes("valid", full=False)) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e5, max_value=1e5, allow_inf=False, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ) ) dim = draw(helpers.get_axis(shape=x_dim, allow_neg=True, force_int=True)) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) n = draw(st.integers(min_fft_points, 256)) return dtype, x, dim, norm, n @st.composite def _x_and_fft2(draw): min_fft2_points = 2 dtype = draw(helpers.get_dtypes("float_and_complex", full=False)) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=2, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e5, max_value=1e5, allow_inf=False, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ) ) s = ( draw(st.integers(min_fft2_points, 256)), draw(st.integers(min_fft2_points, 256)), ) dim = draw(st.sampled_from([(0, 1), (-1, -2), (1, 0)])) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) return dtype, x, s, dim, norm @st.composite def _x_and_ifft(draw): min_fft_points = 2 dtype = draw(helpers.get_dtypes("complex")) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e-10, max_value=1e10, ) ) dim = draw(st.integers(1 - len(list(x_dim)), len(list(x_dim)) - 1)) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) n = draw(st.integers(min_fft_points, 256)) return dtype, x, dim, norm, n @st.composite def _x_and_ifftn(draw): min_fft_points = 2 dtype = draw(helpers.get_dtypes("complex")) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e-10, max_value=1e10, ) ) axes = draw( st.lists( st.integers(0, len(x_dim) - 1), min_size=1, max_size=len(x_dim), unique=True ) ) norm = draw(st.sampled_from(["forward", "ortho", "backward"])) # Shape for s can be larger, smaller or equal to the size of the input # along the axes specified by axes. # Here, we're generating a list of integers corresponding to each axis in axes. s = draw( st.lists( st.integers(min_fft_points, 256), min_size=len(axes), max_size=len(axes) ) ) return dtype, x, s, axes, norm @st.composite def _x_and_rfft(draw): min_fft_points = 2 dtype = draw(helpers.get_dtypes("numeric")) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=4 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e-10, max_value=1e10, ) ) axis = draw(st.integers(1 - len(list(x_dim)), len(list(x_dim)) - 1)) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) n = draw(st.integers(min_fft_points, 256)) return dtype, x, axis, norm, n @st.composite def _x_and_rfftn(draw): min_rfftn_points = 2 dtype = draw(helpers.get_dtypes("float")) x_dim = draw( helpers.get_shape( min_dim_size=2, max_dim_size=100, min_num_dims=1, max_num_dims=3 ) ) x = draw( helpers.array_values( dtype=dtype[0], shape=tuple(x_dim), min_value=-1e10, max_value=1e10, large_abs_safety_factor=2.5, small_abs_safety_factor=2.5, safety_factor_scale="log", ) ) axes = draw( st.lists( st.integers(0, len(x_dim) - 1), min_size=1, max_size=len(x_dim), unique=True ) ) s = draw( st.lists( st.integers(min_rfftn_points, 256), min_size=len(axes), max_size=len(axes) ) ) norm = draw(st.sampled_from(["backward", "forward", "ortho"])) return dtype, x, s, axes, norm @st.composite def max_unpool1d_helper( draw, **data_gen_kwargs, ): dts, values, kernel_size, strides, _ = draw( helpers.arrays_for_pooling( min_dims=3, max_dims=3, data_format="channel_first", **data_gen_kwargs, ) ) dts.extend(["int64"]) values = values[0] if dts[0] in ["float16", "bfloat16"]: values = values.astype(np.float32) dts[0] = "float32" padding = draw(helpers.ints(min_value=0, max_value=2)) if padding > (kernel_size[0] // 2): padding = 0 values, indices = torch.nn.functional.max_pool1d( torch.tensor(values.astype(np.float32)), kernel_size, strides, padding, return_indices=True, ) indices = indices.numpy().astype(np.int64) max_idx = values.shape[-1] - 1 indices = np.where(indices > max_idx, max_idx, indices) values = values.numpy().astype(dts[0]) return dts, values, indices, kernel_size, strides, padding # --- Main --- # # ------------ # @handle_test( fn_tree="functional.ivy.experimental.adaptive_avg_pool1d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=2, max_num_dims=3, min_dim_size=1, max_value=100, min_value=-100, ), output_size=helpers.ints(min_value=1, max_value=5), test_with_out=st.just(False), ground_truth_backend="torch", ) def test_adaptive_avg_pool1d( *, dtype_and_x, output_size, test_flags, backend_fw, fn_name, on_device ): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, input=x[0], output_size=output_size, ) @handle_test( fn_tree="functional.ivy.experimental.adaptive_avg_pool2d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=3, max_num_dims=4, min_dim_size=2, max_value=100, min_value=-100, ), output_size=st.one_of( st.tuples( helpers.ints(min_value=1, max_value=5), helpers.ints(min_value=1, max_value=5), ), helpers.ints(min_value=1, max_value=5), ), data_format=st.sampled_from(["NCHW", "NHWC"]), test_with_out=st.just(False), ground_truth_backend="torch", ) def test_adaptive_avg_pool2d( *, dtype_and_x, output_size, data_format, test_flags, backend_fw, fn_name, on_device ): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, input=x[0], output_size=output_size, data_format=data_format, ) @handle_test( fn_tree="functional.ivy.experimental.adaptive_max_pool2d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=3, max_num_dims=4, min_dim_size=1, # Setting max and min value because this operation in paddle is not # numerically stable max_value=100, min_value=-100, ), output_size=st.one_of( st.tuples( helpers.ints(min_value=1, max_value=5), helpers.ints(min_value=1, max_value=5), ), helpers.ints(min_value=1, max_value=5), ), test_with_out=st.just(False), ground_truth_backend="torch", ) def test_adaptive_max_pool2d( *, dtype_and_x, output_size, test_flags, backend_fw, fn_name, on_device ): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, input=x[0], output_size=output_size, ) @handle_test( fn_tree="functional.ivy.experimental.adaptive_max_pool3d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_num_dims=4, max_num_dims=5, min_dim_size=1, max_value=100, min_value=-100, ), output_size=st.one_of( st.tuples( helpers.ints(min_value=1, max_value=5), helpers.ints(min_value=1, max_value=5), helpers.ints(min_value=1, max_value=5), ), helpers.ints(min_value=1, max_value=5), ), test_with_out=st.just(False), ground_truth_backend="torch", ) def test_adaptive_max_pool3d( *, dtype_and_x, output_size, test_flags, backend_fw, fn_name, on_device ): input_dtype, x = dtype_and_x helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, input=x[0], output_size=output_size, ) @handle_test( fn_tree="functional.ivy.experimental.avg_pool1d", x_k_s_p=helpers.arrays_for_pooling(min_dims=3, max_dims=3, min_side=1, max_side=4), count_include_pad=st.booleans(), ceil_mode=st.booleans(), ground_truth_backend="jax", test_gradients=st.just(False), ) def test_avg_pool1d( *, x_k_s_p, count_include_pad, ceil_mode, test_flags, backend_fw, on_device, ): dtype, x, kernel, stride, pad = x_k_s_p helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name="avg_pool1d", rtol_=1e-2, atol_=1e-2, on_device=on_device, x=x[0], kernel=kernel, strides=stride, padding=pad, count_include_pad=count_include_pad, ceil_mode=ceil_mode, ) # avg_pool2d @handle_test( fn_tree="functional.ivy.experimental.avg_pool2d", x_k_s_p=helpers.arrays_for_pooling(min_dims=4, max_dims=4, min_side=1, max_side=4), count_include_pad=st.booleans(), ceil_mode=st.booleans(), divisor_override=st.one_of(st.none(), st.integers(min_value=1, max_value=4)), data_format=st.sampled_from(["NCHW", "NHWC"]), ground_truth_backend="jax", test_gradients=st.just(False), ) def test_avg_pool2d( *, x_k_s_p, count_include_pad, ceil_mode, divisor_override, data_format, test_flags, backend_fw, on_device, fn_name, ): dtype, x, kernel, stride, pad = x_k_s_p if data_format == "NCHW": x[0] = x[0].reshape( (x[0].shape[0], x[0].shape[3], x[0].shape[1], x[0].shape[2]) ) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x[0], kernel=kernel, strides=stride, padding=pad, data_format=data_format, count_include_pad=count_include_pad, ceil_mode=ceil_mode, divisor_override=divisor_override, ) @handle_test( fn_tree="functional.ivy.experimental.avg_pool3d", x_k_s_p=helpers.arrays_for_pooling(min_dims=5, max_dims=5, min_side=1, max_side=4), count_include_pad=st.booleans(), ceil_mode=st.booleans(), divisor_override=st.one_of(st.none(), st.integers(min_value=1, max_value=4)), ground_truth_backend="jax", test_gradients=st.just(False), ) def test_avg_pool3d( *, x_k_s_p, count_include_pad, ceil_mode, divisor_override, test_flags, backend_fw, fn_name, on_device, ): dtype, x, kernel, stride, pad = x_k_s_p helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-1, atol_=1e-1, x=x[0], kernel=kernel, strides=stride, padding=pad, count_include_pad=count_include_pad, ceil_mode=ceil_mode, divisor_override=divisor_override, ) @handle_test( fn_tree="dct", dtype_x_and_args=_valid_dct(), test_gradients=st.just(False), ) def test_dct(*, dtype_x_and_args, test_flags, backend_fw, fn_name, on_device): input_dtype, x, type, n, axis, norm = dtype_x_and_args helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x[0], type=type, n=n, axis=axis, norm=norm, rtol_=1e-3, atol_=1e-1, ) @handle_test( fn_tree="dft", d_xfft_axis_n_length=_x_and_fft(), d_xifft_axis_n_length=_x_and_ifft(), inverse=st.booleans(), onesided=st.booleans(), ) def test_dft( *, d_xfft_axis_n_length, d_xifft_axis_n_length, inverse, onesided, test_flags, backend_fw, fn_name, on_device, ): if inverse: dtype, x, axis, norm, dft_length = d_xifft_axis_n_length else: dtype, x, axis, norm, dft_length = d_xfft_axis_n_length helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, x=x, axis=axis, inverse=inverse, onesided=onesided, dft_length=dft_length, norm=norm, ) # dropout1d @handle_test( fn_tree="functional.ivy.experimental.dropout1d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=50, allow_inf=False, min_num_dims=2, max_num_dims=3, min_dim_size=1, max_dim_size=5, ), prob=helpers.floats(min_value=0, max_value=0.9), training=st.booleans(), data_format=st.sampled_from(["NWC", "NCW"]), test_gradients=st.just(False), test_with_out=st.just(True), ) def test_dropout1d( *, dtype_and_x, prob, training, data_format, test_flags, backend_fw, on_device, fn_name, ): dtype, x = dtype_and_x ret, gt_ret = helpers.test_function( input_dtypes=dtype, test_flags=test_flags, test_values=False, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, x=x[0], prob=prob, training=training, data_format=data_format, return_flat_np_arrays=True, ) ret = helpers.flatten_and_to_np(backend=backend_fw, ret=ret) gt_ret = helpers.flatten_and_to_np( backend=test_flags.ground_truth_backend, ret=gt_ret ) for u, v, w in zip(ret, gt_ret, x): # cardinality test assert u.shape == v.shape == w.shape @handle_test( fn_tree="functional.ivy.experimental.dropout2d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=50, allow_inf=False, min_num_dims=3, max_num_dims=4, min_dim_size=1, max_dim_size=5, ), prob=helpers.floats(min_value=0, max_value=0.9), training=st.booleans(), data_format=st.sampled_from(["NCHW", "NHWC"]), test_gradients=st.just(False), test_with_out=st.just(True), ) def test_dropout2d( *, dtype_and_x, prob, training, data_format, test_flags, backend_fw, on_device, fn_name, ): dtype, x = dtype_and_x ret, gt_ret = helpers.test_function( input_dtypes=dtype, test_flags=test_flags, test_values=False, on_device=on_device, backend_to_test=backend_fw, fn_name=fn_name, x=x[0], prob=prob, training=training, data_format=data_format, return_flat_np_arrays=True, ) ret = helpers.flatten_and_to_np(backend=backend_fw, ret=ret) gt_ret = helpers.flatten_and_to_np( backend=test_flags.ground_truth_backend, ret=gt_ret ) for u, v, w in zip(ret, gt_ret, x): # cardinality test assert u.shape == v.shape == w.shape # dropout3d @handle_test( fn_tree="functional.ivy.experimental.dropout3d", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("float"), min_value=0, max_value=50, allow_inf=False, min_num_dims=4, max_num_dims=5, min_dim_size=1, max_dim_size=5, ), prob=helpers.floats(min_value=0, max_value=0.9), training=st.booleans(), data_format=st.sampled_from(["NCDHW", "NDHWC"]), test_gradients=st.just(False), test_with_out=st.just(True), ) def test_dropout3d( *, dtype_and_x, prob, training, data_format, test_flags, backend_fw, on_device, fn_name, ): dtype, x = dtype_and_x ret, gt_ret = helpers.test_function( input_dtypes=dtype, test_flags=test_flags, test_values=False, on_device=on_device, backend_to_test=backend_fw, fn_name=fn_name, x=x[0], prob=prob, training=training, data_format=data_format, return_flat_np_arrays=True, ) ret = helpers.flatten_and_to_np(backend=backend_fw, ret=ret) gt_ret = helpers.flatten_and_to_np( backend=test_flags.ground_truth_backend, ret=gt_ret ) for u, v, w in zip(ret, gt_ret, x): # cardinality test assert u.shape == v.shape == w.shape # embedding @handle_test( fn_tree="functional.ivy.experimental.embedding", dtypes_indices_weights=helpers.embedding_helper(), max_norm=st.one_of(st.none(), st.floats(min_value=1, max_value=5)), number_positional_args=st.just(2), ) def test_embedding( *, dtypes_indices_weights, max_norm, test_flags, backend_fw, on_device, fn_name ): dtypes, indices, weights, _ = dtypes_indices_weights dtypes = [dtypes[1], dtypes[0]] helpers.test_function( input_dtypes=dtypes, test_flags=test_flags, xs_grad_idxs=[[0, 0]], backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, weights=weights, indices=indices, max_norm=max_norm, ) @handle_test( fn_tree="functional.ivy.experimental.fft", d_x_d_n_n=_x_and_fft(), ground_truth_backend="jax", test_gradients=st.just(False), ) def test_fft(*, d_x_d_n_n, test_flags, backend_fw, on_device, fn_name): dtype, x, dim, norm, n = d_x_d_n_n helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, on_device=on_device, x=x, dim=dim, norm=norm, n=n, ) @handle_test( fn_tree="functional.ivy.experimental.fft2", d_x_d_s_n=_x_and_fft2(), ground_truth_backend="numpy", container_flags=st.just([False]), test_gradients=st.just(False), ) def test_fft2(*, d_x_d_s_n, test_flags, backend_fw, fn_name, on_device): dtype, x, s, dim, norm = d_x_d_s_n helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x, s=s, dim=dim, norm=norm, ) @handle_test( fn_tree="idct", dtype_x_and_args=_valid_dct(), test_gradients=st.just(False), ) def test_idct(dtype_x_and_args, test_flags, backend_fw, fn_name, on_device): input_dtype, x, type, n, axis, norm = dtype_x_and_args helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, x=x[0], type=type, n=n, axis=axis, norm=norm, rtol_=1e-3, atol_=1e-1, on_device=on_device, ) @handle_test( fn_tree="functional.ivy.experimental.ifft", d_x_d_n_n=_x_and_ifft(), test_gradients=st.just(False), ) def test_ifft(*, d_x_d_n_n, test_flags, backend_fw, fn_name): dtype, x, dim, norm, n = d_x_d_n_n helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x, dim=dim, norm=norm, n=n, ) @handle_test( fn_tree="functional.ivy.experimental.ifftn", d_x_d_s_n=_x_and_ifftn(), ground_truth_backend="numpy", test_gradients=st.just(False), ) def test_ifftn( *, d_x_d_s_n, test_flags, backend_fw, fn_name, on_device, ): dtype, x, s, axes, norm = d_x_d_s_n helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, x=x, s=s, axes=axes, norm=norm, ) @handle_test( fn_tree="functional.ivy.experimental.interpolate", dtype_x_mode=_interp_args(), test_gradients=st.just(False), number_positional_args=st.just(2), ) def test_interpolate(dtype_x_mode, test_flags, backend_fw, fn_name, on_device): ( input_dtype, x, mode, size, align_corners, scale_factor, recompute_scale_factor, ) = dtype_x_mode helpers.test_function( input_dtypes=input_dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-01, atol_=1e-03, x=x[0], size=size, mode=mode, align_corners=align_corners, scale_factor=scale_factor, recompute_scale_factor=recompute_scale_factor, ) @handle_test( fn_tree="functional.ivy.experimental.max_pool1d", x_k_s_p=helpers.arrays_for_pooling( min_dims=3, max_dims=3, min_side=1, max_side=4, explicit_or_str_padding=True, return_dilation=True, data_format=st.sampled_from(["channel_first", "channel_last"]), return_data_format=True, ), ceil_mode=st.sampled_from([True, False]), test_gradients=st.just(False), ground_truth_backend="torch", ) def test_max_pool1d( *, x_k_s_p, ceil_mode, test_flags, backend_fw, fn_name, on_device, ): dtype, x, kernel, stride, pad, dilation, data_format = x_k_s_p data_format = "NCW" if data_format == "channel_first" else "NWC" assume(not (isinstance(pad, str) and (pad.upper() == "VALID") and ceil_mode)) # TODO: Remove this once the paddle backend supports dilation assume(backend_fw != "paddle" or max(list(dilation)) <= 1) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x[0], kernel=kernel, strides=stride, padding=pad, dilation=dilation, data_format=data_format, ceil_mode=ceil_mode, ) @handle_test( fn_tree="functional.ivy.experimental.max_pool2d", x_k_s_p=helpers.arrays_for_pooling( min_dims=4, max_dims=4, min_side=2, max_side=4, explicit_or_str_padding=True, return_dilation=True, data_format=st.sampled_from(["channel_first", "channel_last"]), return_data_format=True, ), ceil_mode=st.sampled_from([True, False]), test_gradients=st.just(False), ground_truth_backend="jax", ) def test_max_pool2d( *, x_k_s_p, ceil_mode, test_flags, backend_fw, fn_name, on_device, ): dtype, x, kernel, stride, pad, dilation, data_format = x_k_s_p assume( not ( backend_fw == "tensorflow" and all( stride[i] > kernel[i] or (stride[i] > 1 and dilation[i] > 1) for i in range(2) ) ) ) data_format = "NCHW" if data_format == "channel_first" else "NHWC" assume(not (isinstance(pad, str) and (pad.upper() == "VALID") and ceil_mode)) # TODO: Remove this once the paddle backend supports dilation assume(backend_fw != "paddle" or max(list(dilation)) <= 1) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, fn_name=fn_name, on_device=on_device, rtol_=1e-2, atol_=1e-2, x=x[0], kernel=kernel, strides=stride, padding=pad, dilation=dilation, ceil_mode=ceil_mode, data_format=data_format, ) @handle_test( fn_tree="functional.ivy.experimental.max_pool3d", x_k_s_p=helpers.arrays_for_pooling( min_dims=5, max_dims=5, min_side=1, max_side=4, explicit_or_str_padding=True, return_dilation=True, data_format=st.sampled_from(["channel_first", "channel_last"]), return_data_format=True, ), ceil_mode=st.sampled_from([True, False]), test_gradients=st.just(False), ground_truth_backend="torch", ) def test_max_pool3d( *, x_k_s_p, ceil_mode, test_flags, backend_fw, fn_name, on_device, ): dtype, x, kernel, stride, pad, dilation, data_format = x_k_s_p assume( not ( backend_fw == "tensorflow" and isinstance(pad, str) and pad == "SAME" and any(dil > 1 for dil in dilation) ) ) data_format = "NCDHW" if data_format == "channel_first" else "NDHWC" assume(not (isinstance(pad, str) and (pad.upper() == "VALID") and ceil_mode)) # TODO: Remove this once the paddle backend supports dilation assume(backend_fw != "paddle" or max(list(dilation)) <= 1) helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x[0], kernel=kernel, strides=stride, padding=pad, data_format=data_format, dilation=dilation, ceil_mode=ceil_mode, ) @handle_test( fn_tree="functional.ivy.experimental.max_unpool1d", x_k_s_p=max_unpool1d_helper(min_side=2, max_side=5), ground_truth_backend="jax", test_gradients=st.just(False), test_with_out=st.just(False), ) def test_max_unpool1d( *, x_k_s_p, test_flags, backend_fw, fn_name, on_device, ): dtype, x, ind, kernel, stride, pad = x_k_s_p helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, input=x, indices=ind, kernel_size=kernel, strides=stride, padding=pad, ) @handle_test( fn_tree="functional.ivy.experimental.reduce_window", all_args=_reduce_window_helper(_get_reduce_func), test_with_out=st.just(False), ground_truth_backend="jax", ) def test_reduce_window(*, all_args, test_flags, backend_fw, fn_name, on_device): dtypes, operand, init_value, computation, others, padding = all_args helpers.test_function( input_dtypes=dtypes, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, operand=operand[0], init_value=init_value[0], computation=computation, window_dimensions=others[0], window_strides=others[1], padding=padding, base_dilation=others[2], window_dilation=None, ) @handle_test( fn_tree="functional.ivy.experimental.rfft", dtype_x_axis_norm_n=_x_and_rfft(), ground_truth_backend="numpy", ) def test_rfft( *, dtype_x_axis_norm_n, test_flags, backend_fw, fn_name, on_device, ): dtype, x, axis, norm, n = dtype_x_axis_norm_n helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x, n=n, axis=axis, norm=norm, ) @handle_test( fn_tree="functional.ivy.experimental.rfftn", d_x_d_s_n=_x_and_rfftn(), ground_truth_backend="numpy", test_gradients=st.just(False), ) def test_rfftn( *, d_x_d_s_n, test_flags, backend_fw, fn_name, on_device, ): dtype, x, s, axes, norm = d_x_d_s_n helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, x=x, s=s, axes=axes, norm=norm, ) # test_rnn @handle_test( fn_tree="functional.ivy.experimental.rnn", rnn_args=_lstm_helper(), test_with_out=st.just(False), test_instance_method=st.just(False), ) def test_rnn( *, rnn_args, test_flags, backend_fw, fn_name, on_device, ): # ToDo : Get the tests passing with paddle ( input_dtypes, inputs, kernel_orig, recurrent_kernel_orig, bias_orig, recurrent_bias_orig, initial_states, go_backwards, mask, unroll, input_length, time_major, zero_output_for_mask, return_all_outputs, ) = rnn_args # unsupported dtype of float16 is in our _lstm_step function # so can't be inferred through ivy.function_unsupported_devices_and_dtypes assume(not (backend_fw == "torch" and input_dtypes[0] == "float16")) def _lstm_step(cell_inputs, cell_states): with BackendHandler.update_backend( ivy.current_backend( cell_inputs.to_native() if "ivy" in str(type(cell_inputs)) else cell_inputs ).backend ) as ivy_backend: nonlocal kernel_orig, recurrent_kernel_orig, bias_orig, recurrent_bias_orig kernel = ivy_backend.array(kernel_orig) recurrent_kernel = ivy_backend.array(recurrent_kernel_orig) bias = ivy_backend.array(bias_orig) recurrent_bias = ivy_backend.array(recurrent_bias_orig) h_tm1 = cell_states[0] # previous memory state c_tm1 = cell_states[1] # previous carry state z = ivy_backend.dot(cell_inputs, kernel) + bias z += ivy_backend.dot(h_tm1, recurrent_kernel) + recurrent_bias z0, z1, z2, z3 = ivy_backend.split(z, num_or_size_splits=4, axis=-1) i = ivy_backend.sigmoid(z0) # input f = ivy_backend.sigmoid(z1) # forget c = f * c_tm1 + i * ivy_backend.tanh(z2) o = ivy_backend.sigmoid(z3) # output h = o * ivy_backend.tanh(c) return h, [h, c] helpers.test_function( input_dtypes=input_dtypes, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-1, atol_=1e-1, step_function=_lstm_step, inputs=inputs, initial_states=initial_states, go_backwards=go_backwards, mask=mask, constants=None, unroll=unroll, input_length=input_length, time_major=time_major, zero_output_for_mask=zero_output_for_mask, return_all_outputs=return_all_outputs, ) @handle_test( fn_tree="functional.ivy.experimental.sliding_window", all_args=helpers.arrays_for_pooling(3, 3, 1, 2, return_dilation=True), test_with_out=st.just(False), ground_truth_backend="jax", ) def test_sliding_window(*, all_args, test_flags, backend_fw, fn_name, on_device): dtypes, input, k, stride, padding, dilation = all_args helpers.test_function( input_dtypes=dtypes, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, input=input, window_size=k, stride=stride[0], dilation=dilation[0], padding=padding, ) # test_stft @handle_test( fn_tree="functional.ivy.experimental.stft", dtype_x_and_args=_valid_stft(), ground_truth_backend="tensorflow", test_gradients=st.just(False), ) def test_stft( *, dtype_x_and_args, test_flags, backend_fw, fn_name, on_device, ): dtype, x, frame_length, frame_step = dtype_x_and_args helpers.test_function( input_dtypes=dtype, test_flags=test_flags, backend_to_test=backend_fw, on_device=on_device, fn_name=fn_name, rtol_=1e-2, atol_=1e-2, signals=x[0], frame_length=frame_length, frame_step=frame_step, fft_length=None, window_fn=None, pad_end=True, )
ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_layers.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_functional/test_experimental/test_nn/test_layers.py", "repo_id": "ivy", "token_count": 23507 }
68
# global from hypothesis import strategies as st import ivy # local import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_method @handle_method( method_tree="Constant.create_variables", ground_truth_backend="numpy", var_shape=helpers.get_shape(), constant=helpers.floats( large_abs_safety_factor=4, small_abs_safety_factor=4, safety_factor_scale="log" ), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_constant( var_shape, constant, init_with_v, method_with_v, class_name, method_name, ground_truth_backend, backend_fw, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={"constant": constant}, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.equal(ret_ivy, ivy.array(constant))) @handle_method( method_tree="FirstLayerSiren.create_variables", ground_truth_backend="jax", var_shape=helpers.get_shape(), fan_in=helpers.ints( min_value=1, safety_factor=4, safety_factor_scale="log", ), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_first_layer_siren( var_shape, fan_in, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={}, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", "fan_in": fan_in, }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) bound = fan_in assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.less(ivy.abs(ret_ivy), ivy.array(bound))) @handle_method( method_tree="GlorotUniform.create_variables", ground_truth_backend="numpy", var_shape=helpers.get_shape(), fan_in=helpers.ints(min_value=1, max_value=100), fan_out=helpers.ints(min_value=1, max_value=100), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_glorot_uniform( var_shape, fan_in, fan_out, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={}, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", "fan_in": fan_in, "fan_out": fan_out, }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) bound = (6 / (fan_in + fan_out)) ** 0.5 assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.less(ivy.abs(ret_ivy), ivy.array(bound))) @handle_method( method_tree="KaimingNormal.create_variables", mean=helpers.floats( min_value=-1e5, max_value=1e5, small_abs_safety_factor=8, safety_factor_scale="log", ), fan_mode=st.sampled_from(["fan_in", "fan_out", "fan_sum", "fan_avg"]), var_shape=helpers.get_shape(), fan_in=helpers.ints(min_value=1, safety_factor=8, safety_factor_scale="log"), fan_out=helpers.ints(min_value=1, safety_factor=8, safety_factor_scale="log"), negative_slope=helpers.floats( min_value=1e-5, max_value=5.0, ), # should be replaced with helpers.get_dtypes() but somehow it causes inconsistent data generation # noqa dtype=st.sampled_from([None, "float64", "float32", "float16"]), init_with_v=st.booleans(), method_with_v=st.booleans(), ground_truth_backend="numpy", init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_kaiming_normal( mean, fan_mode, var_shape, fan_in, fan_out, negative_slope, dtype, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={ "mean": mean, "fan_mode": fan_mode, }, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", "fan_in": fan_in, "fan_out": fan_out, "negative_slope": negative_slope, "dtype": dtype, }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype @handle_method( method_tree="Ones.create_variables", ground_truth_backend="numpy", var_shape=helpers.get_shape(), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_ones( var_shape, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={}, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.equal(ret_ivy, ivy.array(1.0))) @handle_method( method_tree="RandomNormal.create_variables", mean=helpers.floats( min_value=-1e5, max_value=1e5, small_abs_safety_factor=8, safety_factor_scale="log", ), stddev=helpers.floats( min_value=0, max_value=1e5, small_abs_safety_factor=8, safety_factor_scale="log", ), shape=helpers.get_shape(), # should be replaced with helpers.get_dtypes() but somehow it causes inconsistent data generation # noqa dtype=st.sampled_from([None, "float64", "float32", "float16"]), init_with_v=st.booleans(), method_with_v=st.booleans(), ground_truth_backend="numpy", init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_random_normal( mean, stddev, shape, dtype, init_with_v, method_with_v, class_name, method_name, ground_truth_backend, init_flags, method_flags, on_device, backend_fw, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={ "mean": mean, "stddev": stddev, }, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": shape, "device": "cpu", "dtype": dtype, }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype @handle_method( method_tree="Siren.create_variables", ground_truth_backend="numpy", var_shape=helpers.get_shape(), w0=helpers.floats(min_value=1.0, max_value=100.0), fan_in=st.integers(min_value=1), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_native_arrays=st.just([False]), init_num_positional_args=st.just(0), method_as_variable_flags=st.just([False]), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), method_num_positional_args=st.just(0), ) def test_siren( var_shape, w0, fan_in, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={"w0": w0}, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", "fan_in": fan_in, }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) bound = ((6 / fan_in) ** 0.5) / w0 assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.less(ivy.abs(ret_ivy), ivy.array(bound))) @handle_method( method_tree="Uniform.create_variables", ground_truth_backend="numpy", numerator=helpers.floats(min_value=1.0, max_value=10.0), fan_mode=st.sampled_from(["fan_in", "fan_out", "fan_sum", "fan_avg"]), power=helpers.floats(min_value=1.0, max_value=3.0), gain=helpers.floats(min_value=1.0, max_value=10.0), var_shape=helpers.get_shape(), fan_in=helpers.ints(min_value=1, max_value=100), fan_out=helpers.ints(min_value=1, max_value=100), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_uniform( numerator, fan_mode, power, gain, var_shape, fan_in, fan_out, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={ "numerator": numerator, "fan_mode": fan_mode, "power": power, "gain": gain, }, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", "fan_in": fan_in, "fan_out": fan_out, }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) if fan_mode == "fan_in": fan = fan_in elif fan_mode == "fan_out": fan = fan_out elif fan_mode == "fan_sum": fan = fan_in + fan_out elif fan_mode == "fan_avg": fan = (fan_in + fan_out) / 2 bound = gain * (numerator / fan) ** power assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.less(ivy.abs(ret_ivy), ivy.array(bound))) @handle_method( method_tree="Zeros.create_variables", ground_truth_backend="numpy", var_shape=helpers.get_shape(), init_with_v=st.booleans(), method_with_v=st.booleans(), init_as_variable_flags=st.just([False]), init_num_positional_args=st.just(0), init_native_arrays=st.just([False]), method_as_variable_flags=st.just([False]), method_num_positional_args=st.just(0), method_native_arrays=st.just([False]), method_container_flags=st.just([False]), ) def test_zeros( var_shape, init_with_v, method_with_v, class_name, method_name, backend_fw, ground_truth_backend, init_flags, method_flags, on_device, ): ret_ivy, ret_gt = helpers.test_method( backend_to_test=backend_fw, ground_truth_backend=ground_truth_backend, init_flags=init_flags, method_flags=method_flags, init_input_dtypes=[], init_all_as_kwargs_np={}, method_input_dtypes=[], method_all_as_kwargs_np={ "var_shape": var_shape, "device": "cpu", }, class_name=class_name, method_name=method_name, init_with_v=init_with_v, method_with_v=method_with_v, test_values=False, on_device=on_device, ) assert ret_ivy.shape == ret_gt.shape assert ret_ivy.dtype == ret_gt.dtype assert ivy.all(ivy.equal(ret_ivy, ivy.array(0.0)))
ivy/ivy_tests/test_ivy/test_stateful/test_initializers.py/0
{ "file_path": "ivy/ivy_tests/test_ivy/test_stateful/test_initializers.py", "repo_id": "ivy", "token_count": 7881 }
69
import sys import subprocess import pprint import inspect import json from colorama import Fore, Style, init from importlib import import_module from importlib.util import find_spec from tree_generation import generate as generate_backend from shared import BackendNativeObject from dataclasses import asdict all_devices = ("cpu", "gpu", "tpu") all_ivy_dtypes = ( "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "bfloat16", "float16", "float32", "float64", "complex64", "complex128", "bool", ) all_int_dtypes = ( "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", ) all_uint_dtypes = ( "uint8", "uint16", "uint32", "uint64", ) all_float_dtypes = ( "bfloat16", "float16", "float32", "float64", ) all_complex_dtypes = ( "complex64", "complex128", ) _imported_backend = None _backend_is_installed = False backend = {"name": None, "alias": None} config_natives = { "NativeArray": asdict(BackendNativeObject(name="None", namespace="")), "NativeVariable": asdict(BackendNativeObject(name="None", namespace="")), "NativeDevice": asdict(BackendNativeObject(name="None", namespace="")), "NativeDtype": asdict(BackendNativeObject(name="None", namespace="")), "NativeShape": asdict(BackendNativeObject(name="None", namespace="")), "NativeSparseArray": asdict(BackendNativeObject(name="None", namespace="")), } config_valids = { "valid_devices": list(all_devices), "valid_int_dtypes": list(all_int_dtypes), "valid_float_dtypes": list(all_float_dtypes), "valid_complex_dtypes": list(all_complex_dtypes), } config_flags = { "native_inplace_support": False, "supports_gradients": False, } def _get_user_input(fn, *args, **kwargs): # A basic loop to get user input and handle keyboard interrupt while True: try: ret = fn(*args, **kwargs) if ret: break except KeyboardInterrupt: print("Aborted.") sys.exit() def _update_native_config_value(key): # Handle the logic for updating native config ret = input( "\nPress ENTER to skip, use full namespace\n" f"Enter a value for {Style.BRIGHT + key + Style.NORMAL} " "(case sensitive) " f"default: '{Style.BRIGHT}{config_natives[key]['name']}{Style.NORMAL}': " ) if ret != "" and _imported_backend is not None: parsed = ret.strip().rpartition(".") try: if parsed[1] == "": # primitive type try: obj = __builtins__.__dict__[parsed[-1]] except KeyError: print(f"{Fore.RED}{parsed[-1]} is not a primitive object.") return False else: try: mod = import_module(parsed[0]) except ModuleNotFoundError: print(f"{Fore.RED}failed to import {parsed[0]}") return False try: obj = getattr(mod, parsed[-1]) except AttributeError: print(f"{Fore.RED}{parsed[-1]} is not found in module.") return False if not inspect.isclass(obj): print(f"{Fore.RED}{obj} is not a class.") return False print(f"{Fore.GREEN}Found class: {obj}") # Use alias if exists if backend["alias"] is not None: modified_namespace = parsed[0].replace( backend["name"], backend["alias"], 1 ) config_natives[key] = asdict( BackendNativeObject(name=parsed[-1], namespace=modified_namespace) ) return True except KeyError: print(f"{Fore.RED}Couldn't find {ret}") return False return True def _should_install_backend(package_name): # Check if backend is installed, otherwise install it locally for type hints ret = input( f"Backend {package_name} isn't installed locally, " "would you like to install it? [Y/n]\n" ) if ret.lower() == "y": try: # Install backend subprocess.check_call( [sys.executable, "-m", "pip", "install", package_name] ) global _backend_is_installed _backend_is_installed = True with open("../../requirements/optional.txt", "a") as reqr_file: reqr_file.write("\n" + package_name + "\n") except subprocess.CalledProcessError as e: raise RuntimeError( f"{Fore.RED}Installing {package_name} failed. {e}" ) from e elif ret.lower() == "n": print( Fore.YELLOW + "Will continue without backend installed, " "type checking won't be available.\n" ) else: print(f"{Fore.RED}{ret} not understood.") return False return True def _get_backend(): # Main function to query backend package_name = input( "Enter backend name (same as Python package name, case sensitive): " ) package_name = package_name.strip(" ") if package_name.strip(" ") == "": return False backend_spec = find_spec(package_name) if backend_spec is None: try: _get_user_input(_should_install_backend, package_name) except Exception as e: print(e) return False else: global _backend_is_installed _backend_is_installed = True print(f"{Fore.GREEN}Backend {package_name} found.", end=" ") print(f"Installed at {backend_spec.origin}\n") _get_user_input(_add_alias_for_backend) if _backend_is_installed: def _import_name(): ret = ( input( f"Enter Import name for package {package_name}, " f"Press Enter to use {package_name}: " ) .strip() .lower() ) if ret == "": backend["name"] = package_name else: backend["name"] = ret return True _get_user_input(_import_name) global _imported_backend print(f"{Style.BRIGHT}Importing {backend['name']} for type checking...") try: _imported_backend = import_module(backend["name"]) return True except Exception as e: print(f"{Fore.RED}Failed to import {backend['name']}:{e}") return False return True def _add_alias_for_backend(): # Handle adding an alias for backend import ret = input("Enter alias for Python import (Press ENTER to skip): ") ret = ret.strip(" ") if ret == "": return True backend["alias"] = ret return True def _update_flag_config_value(key): # Handle flag input and update it's value ret = input( f"\nToggle flag {Style.BRIGHT}{key}{Style.NORMAL} [Y/n]? " f"default: {Fore.RED}'{config_flags[key]}'" f"{Style.RESET_ALL}: " ) ret = ret.strip(" ").lower() if ret == "y": config_flags[key] = not config_flags[key] return True elif ret in ["n", ""]: return True print(f"{Fore.RED}{ret} not understood.") return False def _update_valid_config_value(key): # Handle valids selection print(f"Select items to remove from list {Style.BRIGHT}{key}:\n") for i, item in enumerate(config_valids[key]): print(f"{i}. {item}") ret = input("\nPress ENTER to skip. Enter numbers (space separated): ") ret = ret.strip("") if ret == "": return True indices = ret.split(" ") indices = [int(item.strip(" ")) for item in indices] for i in sorted(indices, reverse=True): del config_valids[key][i] return True def _call_generate_tree(config_name: str): ret = input(Style.BRIGHT + "\n:: Procced with generation? [Y/n]\n").strip().lower() if ret == "y": generate_backend(config_name) return True elif ret == "n": return True return False if __name__ == "__main__": init(autoreset=True) _get_user_input(_get_backend) for key in config_natives: _get_user_input(_update_native_config_value, key) for key in config_flags: _get_user_input(_update_flag_config_value, key) for key in config_valids: _get_user_input(_update_valid_config_value, key) # Add uint dtypes int_dtypes = set(all_int_dtypes).difference(all_uint_dtypes) config_valids["valid_uint_dtypes"] = ( set(config_valids["valid_int_dtypes"]) - int_dtypes ) # Add numeric dtypes and valid dtypes config_valids["valid_numeric_dtypes"] = ( config_valids["valid_int_dtypes"] + config_valids["valid_float_dtypes"] + config_valids["valid_complex_dtypes"] ) config_valids["valid_dtypes"] = config_valids["valid_numeric_dtypes"] + ["bool"] # Create Invalid dict fullset_mapping = { "valid_dtypes": all_ivy_dtypes, "valid_numeric_dtypes": all_int_dtypes + all_float_dtypes + all_complex_dtypes, "valid_int_dtypes": all_int_dtypes, "valid_uint_dtypes": all_uint_dtypes, "valid_float_dtypes": all_float_dtypes, "valid_complex_dtypes": all_complex_dtypes, "valid_devices": all_devices, } for key, value in config_valids.copy().items(): all_items = fullset_mapping[key] invalid_items = list(set(all_items).difference(value)) config_valids[f"in{key}"] = invalid_items for key in config_valids["valid_dtypes"]: new_key = f"native_{key}" config_natives[new_key] = asdict(BackendNativeObject(name="None", namespace="")) _get_user_input(_update_native_config_value, new_key) for key in config_valids["invalid_dtypes"]: new_key = f"native_{key}" config_natives[new_key] = asdict(BackendNativeObject(name="None", namespace="")) print("\n:: Backend\n") pprint.pprint(backend, sort_dicts=False) print("\n:: Config\n") pprint.pprint(config_natives, sort_dicts=False) # Print valids for key, valid_itesm in config_valids.items(): if not key.startswith("in"): valid_items = config_valids[key] invalid_items = config_valids[f"in{key}"] print("\n:: " + key.partition("_")[-1]) print(f"{Fore.GREEN}valid > {valid_items.__str__()}") print(f"{Fore.RED}invalid > {invalid_items.__str__()}") # Print flags for key, value in config_flags.items(): flag_color = Fore.GREEN if value else Fore.RED print(f"\n:: {key}: {flag_color}{value}") json_config = {**backend, **config_flags, **config_natives} for k, v in config_valids.items(): json_config[k] = list(v) file_path = None with open("config.json", "w") as file: json.dump(json_config, file, indent=4) file_path = file.name print(f"Config saved to {file_path}.") _get_user_input(_call_generate_tree, file_path)
ivy/scripts/backend_generation/generate.py/0
{ "file_path": "ivy/scripts/backend_generation/generate.py", "repo_id": "ivy", "token_count": 5140 }
70
# Run Tests import sys from pymongo import MongoClient from helpers import ( get_latest_package_version, get_submodule_and_function_name, ) if __name__ == "__main__": failed = False cluster = MongoClient( "mongodb+srv://readonly-user:[email protected]" ) db = cluster["ci_dashboard"] with open(sys.argv[2], "w") as f_write: with open(sys.argv[1], "r") as f: for line in f: test_path, backend = line.strip().split(",") is_frontend_test = "test_frontends" in test_path collection = ( db["frontend_tests"] if is_frontend_test else db["ivy_tests"] ) submodule, function_name = get_submodule_and_function_name( test_path, is_frontend_test ) version = get_latest_package_version(backend).replace(".", "_") document = collection.find_one({"_id": function_name}) if document: try: if document[backend][version]["status"]["cpu"]: line = line.strip("\n") + " (main: pass)\n" except KeyError: print( f"Could not find {backend}.{version}.status.cpu for" " document" ) f_write.write(line)
ivy/scripts/run_tests/label_failures.py/0
{ "file_path": "ivy/scripts/run_tests/label_failures.py", "repo_id": "ivy", "token_count": 801 }
71
USER_EMAIL="[email protected]" USER_NAME="ivy-branch" TARGET_BRANCH=$1 CLONE_DIR=$(mktemp -d) GITHUB_SERVER="github.com" mkdir --parents "$HOME/.ssh" DEPLOY_KEY_FILE="$HOME/.ssh/deploy_key" echo "${SSH_DEPLOY_KEY}" > "$DEPLOY_KEY_FILE" chmod 600 "$DEPLOY_KEY_FILE" SSH_KNOWN_HOSTS_FILE="$HOME/.ssh/known_hosts" ssh-keyscan -H "$GITHUB_SERVER" > "$SSH_KNOWN_HOSTS_FILE" export GIT_SSH_COMMAND="ssh -i "$DEPLOY_KEY_FILE" -o UserKnownHostsFile=$SSH_KNOWN_HOSTS_FILE" # Setup git git config --global user.email "$USER_EMAIL" git config --global user.name "$USER_NAME" git clone --single-branch --depth 1 --branch "$TARGET_BRANCH" [email protected]:unifyai/Mapping.git
ivy/scripts/shell/clone_mapping.sh/0
{ "file_path": "ivy/scripts/shell/clone_mapping.sh", "repo_id": "ivy", "token_count": 292 }
72
# Assert All Dependencies are Importable and Correctly Versioned # # ---------------------------------------------------------------# import os import argparse import termcolor import importlib import faulthandler from packaging import version faulthandler.enable() ERROR = False ERROR_MSG = "\n" WARN = False WARN_MSG = "\n" PRINT_MSG = "\n" def parse(str_in): str_in = str_in.replace("\n", "") import_ops = ["==", "<", "<=", ">", ">="] if "mod_name=" in str_in: mod_name = str_in.split("mod_name=")[-1].split(" ")[0].split(",")[0] else: mod_name = str_in.split("=")[0].split(" ")[0] expected_version, expected_op = None, None for import_op in import_ops: if import_op in str_in: lib_name, expected_version = str_in.split(import_op) if "mod_name=" not in str_in: mod_name = lib_name expected_version = expected_version.split(" ")[0].split(",")[0] expected_op = import_op return mod_name, expected_version, expected_op def compare(version1, version2, operator): version1 = version.parse(version1) version2 = version.parse(version2) if operator == "==": return version1 == version2 elif "<" in operator: if operator == "<=": return version1 <= version2 return version1 < version2 else: if operator == ">=": return version1 >= version2 return version1 > version2 def test_imports(fname, assert_version, update_versions): global ERROR global ERROR_MSG global WARN global WARN_MSG global PRINT_MSG versions_to_update = {} msg = f"\nasserting imports work for: {fname}\n\n" PRINT_MSG += msg ERROR_MSG += msg WARN_MSG += msg with open(fname, "r") as f: file_lines = f.readlines() mod_names_n_versions = [parse(req) for req in file_lines] for line_num, (mod_name, expected_version, expected_op) in enumerate( mod_names_n_versions ): # noinspection PyBroadException try: mod = importlib.import_module(mod_name) except Exception as e: ERROR = True msg = f"{mod_name} could not be imported: {e}\n" ERROR_MSG += msg PRINT_MSG += msg continue # noinspection PyBroadException try: # noinspection PyUnresolvedReferences detected_version = mod.__version__ except AttributeError: try: detected_version = ".".join([str(n) for n in mod.VERSION]) except AttributeError: continue except Exception: detected_version = None if detected_version and expected_version: if compare(detected_version, expected_version, expected_op): msg = f"{mod_name} detected correct version: {detected_version}\n" else: msg = ( f"expected version {expected_version} for module {mod_name}, but" f" detected version {detected_version}\n" ) versions_to_update[line_num] = { "expected": expected_version, "detected": detected_version, } if assert_version: ERROR = True ERROR_MSG += msg else: WARN = True WARN_MSG += msg PRINT_MSG += msg else: if detected_version: msg = ( f"{mod_name} detected version: {detected_version}, but no expected" " version provided\n" ) elif expected_version: msg = ( f"{mod_name} expected version: {expected_version}, but unable to" " detect version\n" ) else: msg = ( "no expected version provided, and unable to detect version for" f" {mod_name}\n" ) WARN = True PRINT_MSG += msg WARN_MSG += msg if not update_versions: return for line_num, versions in versions_to_update.items(): orig_str = file_lines[line_num] new_str = orig_str.replace(versions["expected"], versions["detected"]) file_lines[line_num] = new_str with open(fname, "w") as f: f.writelines(file_lines) def main(filepaths, assert_matching_versions, update_versions): for filepath in filepaths.replace(" ", "").split(","): assert os.path.isfile(filepath) test_imports( filepath, assert_version=assert_matching_versions, update_versions=update_versions, ) print(PRINT_MSG) if WARN: print(termcolor.colored("WARNING\n" + WARN_MSG, "red")) if ERROR: raise Exception(ERROR_MSG) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-fp", "--filepaths", type=str, required=True, help=( "Comma separated filepaths of all text files to check. Spaces are ignored." ), ) parser.add_argument( "-amv", "--assert_matching_versions", action="store_true", help=( "Whether to assert that all module versions match those lists in the " "requirements.txt and optional.txt files." ), ) parser.add_argument( "-uv", "--update_versions", action="store_true", help="Whether to update the versions in the installation files.", ) parsed_args = parser.parse_args() main( parsed_args.filepaths, parsed_args.assert_matching_versions, parsed_args.update_versions, )
ivy/scripts/test_dependencies.py/0
{ "file_path": "ivy/scripts/test_dependencies.py", "repo_id": "ivy", "token_count": 2810 }
73