repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
listlengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
google/jax
9,923
google__jax-9923
[ "9567", "9567" ]
4cdc25f1f7969c5b4439766d297cfe5c977d9356
diff --git a/jax/_src/custom_derivatives.py b/jax/_src/custom_derivatives.py --- a/jax/_src/custom_derivatives.py +++ b/jax/_src/custom_derivatives.py @@ -25,6 +25,7 @@ tree_multimap, treedef_is_leaf, treedef_tuple, register_pytree_node_class) from jax._src import custom_api_util +from jax._src import dtypes from jax._src.util import cache, safe_zip, safe_map, split_list, Unhashable from jax._src.api_util import flatten_fun_nokwargs, argnums_partial from jax.core import raise_to_shaped @@ -943,13 +944,25 @@ def rev(objective_fn, res, g): else: return _closure_convert_for_avals(fun, in_tree, in_avals) -def _is_perturbed(x: Any) -> bool: - if isinstance(x, ad.JVPTracer): - return True - elif isinstance(x, core.Tracer): - return any(_is_perturbed(attr) for name, attr in x._contents()) - else: +def _maybe_perturbed(x: Any) -> bool: + # False if x can't represent an AD-perturbed value (i.e. a value + # with a nontrivial tangent attached), up to heuristics, and True otherwise. + # See https://github.com/google/jax/issues/6415 for motivation. + x = core.full_lower(x) + if not isinstance(x, core.Tracer): + # If x is not a Tracer, it can't be perturbed. return False + elif isinstance(x, pe.DynamicJaxprTracer): + # If x is a DynamicJaxprTracer then we're staging out; differentiation could + # happen later, but some types always have trivial tangents. + vspace = x.aval.at_least_vspace() + return not (vspace is core.abstract_unit or vspace is core.abstract_token or + vspace is dtypes.float0) + elif not isinstance(x, ad.JVPTracer): + # If x is not a JVPTracer, recursively check its contents. + return any(_maybe_perturbed(attr) for name, attr in x._contents()) + else: + return True # We can't be sure! @cache() def _closure_convert_for_avals(fun, in_tree, in_avals): @@ -957,7 +970,7 @@ def _closure_convert_for_avals(fun, in_tree, in_avals): jaxpr, out_pvals, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, in_avals) out_tree = out_tree() - (closure_consts, hoisted_consts), merge = partition_list(_is_perturbed, consts) + (closure_consts, hoisted_consts), merge = partition_list(_maybe_perturbed, consts) num_consts = len(hoisted_consts) def converted_fun(*args_hconsts):
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -5420,6 +5420,21 @@ def f_jvp(primals, tangents): shape = grad(lambda x: jnp.sum(f(x)))(jnp.array(1.)).shape self.assertEqual(shape, ()) + def test_maybe_perturbed_internal_helper_function(self): + # This is a unit test for an internal API. We include it so as not to + # regress https://github.com/google/jax/issues/9567. For an explanation of + # this helper function, see https://github.com/google/jax/issues/6415. + from jax._src.custom_derivatives import _maybe_perturbed + def f(x): + def g(y, _): + z = y * x + self.assertTrue(_maybe_perturbed(z)) + return y, None + g(1, None) + return lax.scan(g, 1, xs=None, length=1)[0] + + jax.jvp(f, (1.0,), (1.0,)) # assertions inside f + class CustomVJPTest(jtu.JaxTestCase):
[Incorrect gradients] `_is_perturbed` produces the wrong value when used inside `lax.scan` etc. This: ```python import jax import jax.lax as lax from jax._src.custom_derivatives import _is_perturbed def f(x): def g(y, _): z = y * x print(_is_perturbed(z)) return y, None g(1, None) return lax.scan(g, 1, xs=None, length=1)[0] jax.jvp(f, (1.0,), (1.0,)) ``` produces: ``` True False ``` The correct behaviour is for both to be `True`. I imagine this is silently setting a few gradients to zero, somewhere out there. [Incorrect gradients] `_is_perturbed` produces the wrong value when used inside `lax.scan` etc. This: ```python import jax import jax.lax as lax from jax._src.custom_derivatives import _is_perturbed def f(x): def g(y, _): z = y * x print(_is_perturbed(z)) return y, None g(1, None) return lax.scan(g, 1, xs=None, length=1)[0] jax.jvp(f, (1.0,), (1.0,)) ``` produces: ``` True False ``` The correct behaviour is for both to be `True`. I imagine this is silently setting a few gradients to zero, somewhere out there.
2022-03-16T22:48:06
google/jax
9,924
google__jax-9924
[ "9823" ]
4cdc25f1f7969c5b4439766d297cfe5c977d9356
diff --git a/jax/core.py b/jax/core.py --- a/jax/core.py +++ b/jax/core.py @@ -868,7 +868,8 @@ def f(x): with jax.ensure_compile_time_eval(): y = jnp.sin(3.0) z = jnp.sin(y) - if z > 0: # the value of z is availble and can be used in control flow + z_positive = z > 0 + if z_positive: # z_positive is usable in Python control flow return jnp.sin(x) else: return jnp.cos(x)
ensure_compile_time_eval doc example fails The example of [ensure_compile_time_eval](https://jax.readthedocs.io/en/latest/_autosummary/jax.ensure_compile_time_eval.html) ```python import jax import jax.numpy as jnp print(jax.__version__) # 0.3.1 @jax.jit def f(x): with jax.ensure_compile_time_eval(): y = jnp.sin(3.0) z = jnp.sin(y) #z = z.item() # adding this works if z > 0: # the value of z is availble and can be used in control flow return jnp.sin(x) else: return jnp.cos(x) f(0) ``` gives ConcretizationTypeError. Converting z to python float works.
Hah, oops, thanks! That makes sense. I'm embarrassed that I apparently didn't test this... Also putting the `z > 0` inside the context manager works, i.e. having a line like `positive = z > 0` and then testing `if positive`.
2022-03-16T23:09:44
google/jax
9,938
google__jax-9938
[ "9936" ]
1d5833d2f15fe81d8866f4b5481e364262a6cb04
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -544,7 +544,12 @@ def disable_jit(): """Context manager that disables :py:func:`jit` behavior under its dynamic context. For debugging it is useful to have a mechanism that disables :py:func:`jit` - everywhere in a dynamic context. + everywhere in a dynamic context. Note that this not only disables explicit uses + of `jit` by the user, but will also remove any implicit JIT compilation used by the + JAX library: this includes implicit JIT computation of `body` and `cond` + functions passed to higher-level primitives like :func:`scan` and :func:`while_loop`, + JIT used in implementations of :mod:`jax.numpy` functions, and any other case where + `jit` is used within an API's implementation. Values that have a data dependence on the arguments to a jitted function are traced and abstracted. For example, an abstract value may be a
DOC: `disable_jit` scope of influence Maybe the document of `disable_jit` should be updated. > For debugging it is useful to have a mechanism that disables [jit()](https://jax.readthedocs.io/en/latest/_autosummary/jax.jit.html#jax.jit) everywhere in a dynamic context. The document says that `disable_jit` will disable `jit()`, not mentions that it will also affect behavior of control flow operators such as `lax.scan`. _Originally posted by @YouJiacheng in https://github.com/google/jax/discussions/9913#discussioncomment-2373983_
2022-03-17T17:18:30
google/jax
9,942
google__jax-9942
[ "9941" ]
c3a4a6e63da11246611247feac7ff4c00750ae21
diff --git a/jax/_src/nn/functions.py b/jax/_src/nn/functions.py --- a/jax/_src/nn/functions.py +++ b/jax/_src/nn/functions.py @@ -258,7 +258,7 @@ def gelu(x: Array, approximate: bool = True) -> Array: else: return jnp.array(x * (lax.erf(x / np.sqrt(2)) + 1) / 2, dtype=x.dtype) -@partial(jax.jit, static_argnames=("glu",)) +@partial(jax.jit, static_argnames=("axis",)) def glu(x: Array, axis: int = -1) -> Array: """Gated linear unit activation function.
diff --git a/tests/nn_test.py b/tests/nn_test.py --- a/tests/nn_test.py +++ b/tests/nn_test.py @@ -81,7 +81,7 @@ def testEluValue(self): self.assertAllClose(val, 1e4, check_dtypes=False) def testGluValue(self): - val = nn.glu(jnp.array([1.0, 0.0])) + val = nn.glu(jnp.array([1.0, 0.0]), axis=0) self.assertAllClose(val, jnp.array([0.5])) @parameterized.parameters(False, True)
bug in jax.nn.glu There is an issue with `jax.nn.glu` in the latest release https://github.com/google/jax/blob/728e4fd3fad334e551dcd1a13b47b8d51aae5a9f/jax/_src/nn/functions.py#L261 Here the `static_argnames` should be `axis`. Right now calling `glu` results in a `TracerIntegerConversionError` ```python import jax import jax.numpy as jnp jax.nn.glu(jnp.ones((1, 4)), axis=1) ``` full error messages/tracebacks. ``` TracerIntegerConversionError Traceback (most recent call last) Input In [8], in <module> ----> 1 jax.nn.glu(jnp.ones((1, 4)), 1) [... skipping hidden 14 frame] File ~/transformers-env/lib/python3.8/site-packages/jax/_src/nn/functions.py:269, in glu(x, axis) 261 @partial(jax.jit, static_argnames=("glu",)) 262 def glu(x: Array, axis: int = -1) -> Array: 263 """Gated linear unit activation function. 264 265 Args: 266 x : input array 267 axis: the axis along which the split should be computed (default: -1) 268 """ --> 269 size = x.shape[axis] 270 assert size % 2 == 0, "axis size must be divisible by 2" 271 x1, x2 = jnp.split(x, 2, axis) File ~/transformers-env/lib/python3.8/site-packages/jax/core.py:473, in Tracer.__index__(self) 472 def __index__(self): --> 473 raise TracerIntegerConversionError(self) TracerIntegerConversionError: The __index__() method was called on the JAX Tracer object Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)> See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.TracerIntegerConversionError ```
Thanks for the report - it looks like a typo. The fact that this made it into a release makes me worried that we don't have good test coverage for the function. I'll make a fix
2022-03-17T18:38:36
google/jax
9,955
google__jax-9955
[ "9593" ]
6cd9804163203e4da13b33171c5889b6d17e5f43
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -5159,6 +5159,10 @@ def _nbytes(arr): return size(arr) * _dtype(arr).itemsize +def _itemsize(arr): + return _dtype(arr).itemsize + + def _clip(number, min=None, max=None, out=None, *, a_min=None, a_max=None): # ndarray.clip has a slightly different API from clip (min -> a_min, max -> a_max) # TODO: remove after deprecation window @@ -5655,6 +5659,7 @@ def _set_shaped_array_attributes(shaped_array): setattr(shaped_array, "astype", core.aval_method(_astype)) setattr(shaped_array, "view", core.aval_method(_view)) setattr(shaped_array, "nbytes", core.aval_property(_nbytes)) + setattr(shaped_array, "itemsize", core.aval_property(_itemsize)) setattr(shaped_array, "clip", core.aval_method(_clip)) setattr(shaped_array, "_array_module", staticmethod(__array_module__)) @@ -5685,6 +5690,7 @@ def _set_device_array_base_attributes(device_array): setattr(device_array, "astype", _astype) setattr(device_array, "view", _view) setattr(device_array, "nbytes", property(_nbytes)) + setattr(device_array, "itemsize", property(_itemsize)) setattr(device_array, "clip", _clip) _set_device_array_base_attributes(device_array.DeviceArray)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -4203,6 +4203,20 @@ def testNbytes(self, shape, dtype): self._CheckAgainstNumpy(np_op, jnp_op, args_maker) self._CompileAndCheck(jnp_op, args_maker) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_{}".format( + jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype} + for shape in array_shapes + for dtype in all_dtypes)) + def testItemsize(self, shape, dtype): + rng = jtu.rand_default(self.rng()) + np_op = lambda x: np.asarray(x).itemsize + jnp_op = lambda x: jnp.asarray(x).itemsize + args_maker = lambda: [rng(shape, dtype)] + self._CheckAgainstNumpy(np_op, jnp_op, args_maker) + self._CompileAndCheck(jnp_op, args_maker) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}_dtype={}".format( jtu.format_shape_dtype_string(shape, a_dtype), dtype),
JAX arrays should expose itemsize property (bug?) Similar to issue #3985, in which JAX arrays were made to mimic NumPy's `numpy.ndarray.nbytes` property, they should also mimic the `numpy.ndarray.itemsize` property. Note that JAX **does** already expose `itemsize` in its dtypes. This unexpected lack of parity with NumPy might imply this is more like a bug than an enhancement. ```python import numpy as onp import jax.numpy as jnp a = onp.ones(0, dtype='ushort') ja = jnp.ones_like(a) print(f'{a.itemsize = }') print(f'{a.dtype.itemsize = }') if hasattr(ja, 'itemsize'): print(f'{ja.itemsize = }') else: print("jax array does not define itemsize.") print(f'{ja.dtype.itemsize = }') ``` currently outputs ``` a.itemsize = 2 a.dtype.itemsize = 2 jax array does not define itemsize. ja.dtype.itemsize = 2 ``` Thanks in advance!
2022-03-18T11:42:36
google/jax
9,968
google__jax-9968
[ "9899" ]
c3581a221842c09dc1b2f301012c3a01734f6b43
diff --git a/jax/_src/nn/functions.py b/jax/_src/nn/functions.py --- a/jax/_src/nn/functions.py +++ b/jax/_src/nn/functions.py @@ -16,6 +16,7 @@ from functools import partial import operator +import warnings import numpy as np from typing import Any, Optional, Tuple, Union @@ -333,7 +334,7 @@ def softmax(x: Array, return unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True) @partial(jax.jit, static_argnames=("axis",)) -def normalize(x: Array, +def standardize(x: Array, axis: Optional[Union[int, Tuple[int, ...]]] = -1, mean: Optional[Array] = None, variance: Optional[Array] = None, @@ -351,6 +352,15 @@ def normalize(x: Array, jnp.square(x), axis, keepdims=True, where=where) - jnp.square(mean) return (x - mean) * lax.rsqrt(variance + epsilon) +def normalize(x: Array, + axis: Optional[Union[int, Tuple[int, ...]]] = -1, + mean: Optional[Array] = None, + variance: Optional[Array] = None, + epsilon: Array = 1e-5, + where: Optional[Array] = None) -> Array: + r"""Normalizes an array by subtracting ``mean`` and dividing by :math:`\sqrt{\mathrm{variance}}`.""" + warnings.warn("jax.nn.normalize will be deprecated. Use jax.nn.standardize instead.", DeprecationWarning) + return standardize(x, axis, mean, variance, epsilon, where) @partial(jax.jit, static_argnames=("num_classes", "dtype", "axis")) def _one_hot(x: Array, num_classes: int, *, diff --git a/jax/example_libraries/stax.py b/jax/example_libraries/stax.py --- a/jax/example_libraries/stax.py +++ b/jax/example_libraries/stax.py @@ -34,7 +34,7 @@ import jax.numpy as jnp from jax.nn import (relu, log_softmax, softmax, softplus, sigmoid, elu, - leaky_relu, selu, gelu, normalize) + leaky_relu, selu, gelu, standardize) from jax.nn.initializers import glorot_normal, normal, ones, zeros # aliases for backwards compatibility @@ -137,7 +137,7 @@ def apply_fun(params, x, **kwargs): # TODO(phawkins): jnp.expand_dims should accept an axis tuple. # (https://github.com/numpy/numpy/issues/12290) ed = tuple(None if i in axis else slice(None) for i in range(jnp.ndim(x))) - z = normalize(x, axis, epsilon=epsilon) + z = standardize(x, axis, epsilon=epsilon) if center and scale: return gamma[ed] * z + beta[ed] if center: return z + beta[ed] if scale: return gamma[ed] * z diff --git a/jax/nn/__init__.py b/jax/nn/__init__.py --- a/jax/nn/__init__.py +++ b/jax/nn/__init__.py @@ -32,6 +32,7 @@ log_softmax as log_softmax, logsumexp as logsumexp, normalize as normalize, + standardize as standardize, one_hot as one_hot, relu as relu, relu6 as relu6,
diff --git a/tests/nn_test.py b/tests/nn_test.py --- a/tests/nn_test.py +++ b/tests/nn_test.py @@ -126,13 +126,13 @@ def testSoftmaxWhereMask(self, fn): self.assertAllClose(out_masked, out_filtered) - def testNormalizeWhereMask(self): + def testStandardizeWhereMask(self): x = jnp.array([5.5, 1.3, -4.2, 0.9]) m = jnp.array([True, False, True, True]) x_filtered = jnp.take(x, jnp.array([0, 2, 3])) - out_masked = jnp.take(nn.normalize(x, where=m), jnp.array([0, 2, 3])) - out_filtered = nn.normalize(x_filtered) + out_masked = jnp.take(nn.standardize(x, where=m), jnp.array([0, 2, 3])) + out_filtered = nn.standardize(x_filtered) self.assertAllClose(out_masked, out_filtered)
Rename jax.nn.normalize to jax.nn.standardize As discussed: the jax.nn.normalize function actually standardizes by default, so we should name it accordingly. Nobody reading `jax.nn.normalize(x)` expects the result not to have norm-1, and [PyTorch agrees with this too](https://pytorch.org/docs/stable/generated/torch.nn.functional.normalize.html). The proposal here is just to rename to avoid user bugs. This would also open up the name `jax.nn.normalize` for a PyTorch-like implementation, but that should be as a follow-up, if desired.
Hrm, I guess we'd need to do a deprecation cycle for this. The steps may be: 1. in the first PR, rename `jax.nn.normalize` -> `jax.nn.standardize`, but also define `jax.nn.normalize` as an alias; 2. then, add a DeprecationWarning to `jax.nn.normalize` which points to the new API, and in the same CL update all Google-internal users to call `jax.nn.standardize`; 3. make a jax release; 4. wait [three months](https://jax.readthedocs.io/en/latest/api_compatibility.html); 5. either make `jax.nn.normalize` raise an error (instead of a warning) and wait some time, or just delete it; 6. optionally, repurpose `jax.nn.normalize`. The first two steps could be done in rapid succession. Anyone want to take it on? 😄 I had a go at implementing this in https://github.com/google/jax/pull/9968. Let me know if I got it wrong or any suggested changes.
2022-03-19T08:41:28
google/jax
9,971
google__jax-9971
[ "9969" ]
9ce77eb10ad37b3ce13f598a1bd568b707e88907
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -1158,7 +1158,7 @@ def jacfun(*args, **kwargs): def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0, - holomorphic: bool = False) -> Callable: + has_aux: bool = False, holomorphic: bool = False) -> Callable: """Hessian of ``fun`` as a dense array. Args: @@ -1168,6 +1168,9 @@ def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0, containers thereof. argnums: Optional, integer or sequence of integers. Specifies which positional argument(s) to differentiate with respect to (default ``0``). + has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the + first element is considered the output of the mathematical function to be + differentiated and the second element is auxiliary data. Default False. holomorphic: Optional, bool. Indicates whether ``fun`` is promised to be holomorphic. Default False. @@ -1218,7 +1221,8 @@ def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0, ``(out1, out2, ..., in1, in2, ..., in1, in2, ...)``. To flatten pytrees into 1D vectors, consider using :py:func:`jax.flatten_util.flatten_pytree`. """ - return jacfwd(jacrev(fun, argnums, holomorphic), argnums, holomorphic) + return jacfwd(jacrev(fun, argnums, has_aux=has_aux, holomorphic=holomorphic), + argnums, has_aux=has_aux, holomorphic=holomorphic) def _std_basis(pytree): leaves, _ = tree_flatten(pytree)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1217,6 +1217,26 @@ def test_hessian(self): f = lambda x: jnp.dot(x, jnp.dot(A, x)) assert np.allclose(hessian(f)(x), A + A.T) + @jtu.skip_on_devices("tpu") + def test_hessian_holomorphic(self): + R = self.rng().randn + A = R(4, 4) + x = R(4) * (1 + 2j) + + f = lambda x: jnp.dot(x, jnp.dot(A, x)) + assert np.allclose(hessian(f, holomorphic=True)(x), A + A.T) + + @jtu.skip_on_devices("tpu") + def test_hessian_aux(self): + R = self.rng().randn + A = R(4, 4) + x = R(4) + + f = lambda x: (jnp.dot(x, jnp.dot(A, x)), x) + h, aux = hessian(f, has_aux=True)(x) + assert np.allclose(h, A + A.T) + assert np.allclose(aux, x) + def test_std_basis(self): basis = api._std_basis(jnp.zeros(3)) assert getattr(basis, "shape", None) == (3, 3)
jax.hessian uses positional argument `holomorphic` where it should be `has_aux` EDIT: Fixed a Typo. Hi, this is a bit of a minor issue, but the jax source implementation of `hessian` has a slight mixup (or misses) in the function arguments. It uses `holomorphic` in place of `has_aux`, which is slightly confusing. As shown below (see https://jax.readthedocs.io/en/latest/_modules/jax/_src/api.html): ```python def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0, holomorphic: bool = False) -> Callable: """Doc ... """ return jacfwd(jacrev(fun, argnums, holomorphic), argnums, holomorphic) ``` However, the argument `holomorphic` is used in the positional arg of `has_aux` for both jacfwd and jacrev: ```python def jacfwd(fun: Callable, argnums: Union[int, Sequence[int]] = 0, has_aux: bool = False, holomorphic: bool = False) -> Callable: ... ``` Current behaviour: ```python import jax def f(x): return jax.numpy.sin(x) def f_aux(x): return f(x), x print(jax.hessian(f)(10.)) # 0.5440211 print(jax.hessian(f_aux, holomorphic=True)(10.)) # (DeviceArray(0.5440211, dtype=float32, weak_type=True), 10.0) print(jax.hessian(f, holomorphic=True)(10.)) # TypeError: expected function with aux output to return a two-element tuple, but got type ... ``` Should be: ```python import jax def f(x): return jax.numpy.sin(x) def f_aux(x): return f(x), x print(jax.hessian(f)(10.)) # 0.5440211 print(jax.hessian(f_aux, has_aux=True)(10.)) # (DeviceArray(0.5440211, dtype=float32, weak_type=True), 10.0) print(jax.hessian(f, has_aux=True)(10.)) # TypeError: expected function with aux output to return a two-element tuple, but got type ... ``` Fix: ```python def hessian( fun: Callable, argnums: Union[int, Sequence[int]] = 0, has_aux: bool = False, holomorphic: bool = False ) -> Callable: """Doc ...""" return jacrev(jacfwd(fun, argnums, has_aux=has_aux, holomorphic=holomorphic), argnums, has_aux=has_aux, holomorphic=holomorphic) ```
2022-03-20T05:55:27
google/jax
9,992
google__jax-9992
[ "9991" ]
f1f1af9c0d2817aab1635d2efb0fb481d7b4d340
diff --git a/jax/interpreters/mlir.py b/jax/interpreters/mlir.py --- a/jax/interpreters/mlir.py +++ b/jax/interpreters/mlir.py @@ -46,6 +46,15 @@ import jax.interpreters.xla as xla import numpy as np +# TODO(jakevdp): remove this when minimum_jaxlib_version >= 0.3.3 +if jax._src.lib.mlir_api_version >= 4: + FuncOp = func_dialect.FuncOp +else: + from jax._src.lib.mlir.dialects import builtin + FuncOp = builtin.FuncOp +# mypy gets confused by conditional imports, so alias to Any for now. +FuncOpType = Any + map, unsafe_map = util.safe_map, map zip, unsafe_zip = util.safe_zip, zip @@ -341,7 +350,7 @@ class ModuleContext: name_stack: NameStack # Cached primitive lowerings. - cached_primitive_lowerings: Dict[Any, func_dialect.FuncOp] + cached_primitive_lowerings: Dict[Any, FuncOpType] @property def axis_env(self) -> xla.AxisEnv: @@ -356,8 +365,7 @@ def __init__( module: Optional[ir.Module] = None, ip: Optional[ir.InsertionPoint] = None, symbol_table: Optional[ir.SymbolTable] = None, - cached_primitive_lowerings: Optional[Dict[Any, - func_dialect.FuncOp]] = None): + cached_primitive_lowerings: Optional[Dict[Any, FuncOpType]] = None): assert platform is not None self.context = context or make_ir_context() self.module = module or ir.Module.create(loc=ir.Location.unknown(self.context)) @@ -539,7 +547,7 @@ def lower_jaxpr_to_fun( arg_shardings: Optional[Sequence[Optional[xc.OpSharding]]] = None, result_shardings: Optional[Sequence[Optional[xc.OpSharding]]] = None, input_output_aliases: Optional[Sequence[Optional[int]]] = None -) -> func_dialect.FuncOp: +) -> FuncOpType: """Lowers jaxpr and its callees to an IR function. Assumes that an MLIR context, location, and insertion point are set. @@ -573,7 +581,7 @@ def aval_to_types(aval): flat_input_types = util.flatten(input_types) flat_output_types = util.flatten(output_types) ftype = ir.FunctionType.get(flat_input_types, flat_output_types) - func_op = func_dialect.FuncOp(name, ftype, ip=ctx.ip) + func_op = FuncOp(name, ftype, ip=ctx.ip) func_op.attributes["sym_visibility"] = ir.StringAttr.get( "public" if public else "private") ctx.symbol_table.insert(func_op) @@ -655,7 +663,7 @@ def aval_to_types(aval): return func_op def _emit_lowering_rule_as_fun(lowering_rule, - ctx: LoweringRuleContext) -> func_dialect.FuncOp: + ctx: LoweringRuleContext) -> FuncOpType: """Emits the contents of a lowering rule as a private function.""" input_types = map(aval_to_ir_types, ctx.avals_in) output_types = map(aval_to_ir_types, ctx.avals_out) @@ -663,8 +671,7 @@ def _emit_lowering_rule_as_fun(lowering_rule, flat_output_types = util.flatten(output_types) ftype = ir.FunctionType.get(flat_input_types, flat_output_types) assert ctx.primitive is not None - func_op = func_dialect.FuncOp( - ctx.primitive.name, ftype, ip=ctx.module_context.ip) + func_op = FuncOp(ctx.primitive.name, ftype, ip=ctx.module_context.ip) func_op.attributes["sym_visibility"] = ir.StringAttr.get("private") ctx.module_context.symbol_table.insert(func_op) entry_block = func_op.add_entry_block()
LLVM update broke tests on main f1f1af9c0d2817aab1635d2efb0fb481d7b4d340 broke our tests. I think it's a version skew issue: the current version should probably work correctly with the next jaxlib release, but we need to gate some things so that it will remain compatible with the current minimum jaxlib.
2022-03-22T18:09:07
google/jax
9,996
google__jax-9996
[ "9985" ]
622107cde7620052ad0d7503e727d8ca7af469d8
diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py --- a/jax/_src/lax/control_flow.py +++ b/jax/_src/lax/control_flow.py @@ -971,10 +971,15 @@ def _cond_batching_rule(axis_size, axis_name, main_type, args, dims, branches, l # for the select we broadcast the input operands for simplicity and leave # optimizations to XLA. # TODO(mattjj,frostig): assumes branches are side-effect-free, revise! - index, *ops = [batching.bdim_at_front(x, d, axis_size) for x, d in zip(args, dims)] + index, *ops = [ + batching.bdim_at_front(x, d, axis_size) for x, d in zip(args, dims)] + + in_batched = [a is not core.abstract_unit for a in branches[0].in_avals] + out_batched = [a is not core.abstract_unit for a in branches[0].out_avals] branches_batched = [ - batching.batch_jaxpr(jaxpr, axis_size, [True] * len(ops), True, axis_name, main_type)[0] + batching.batch_jaxpr( + jaxpr, axis_size, in_batched, out_batched, axis_name, main_type)[0] for jaxpr in branches] branch_outs = [] @@ -987,7 +992,7 @@ def _cond_batching_rule(axis_size, axis_name, main_type, args, dims, branches, l branch_outs.append(core.jaxpr_as_fun(jaxpr)(*ops_)) out = [_bcast_select_n(index, *outs) if outs[0] is not core.unit else outs[0] for outs in zip(*branch_outs)] - return out, [0] * len(branch_outs[0]) + return out, [0 if b else None for b in out_batched] else: ops_bat = [d is not batching.not_mapped for d in op_dims] ops = [batching.moveaxis(x, d, 0) if b else x diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -118,6 +118,7 @@ def __init__(self, trace, val, batch_dim: Optional[int], if type(batch_dim) is int: aval = raise_to_shaped(core.get_aval(val)) assert aval is core.abstract_unit or 0 <= batch_dim < len(aval.shape) # type: ignore + assert aval is not core.abstract_unit or batch_dim is not_mapped self._trace = trace self.val = val self.batch_dim = batch_dim
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -2393,5 +2393,11 @@ def body(carry): return lax.while_loop(cond, body, (i, jnp.ones(3)))[1] jax.vmap(f, in_axes=(0, 1))(jnp.arange(4), jnp.ones((3, 4))) + def test_cond_ad_batched_unit(self): + # see issue #9985 + def cond_id(x): + return lax.cond(x < 0., lambda x: x, lambda x: x, x) + jax.vmap(jax.jacrev(lambda x: cond_id(cond_id(x))))(jnp.ones(1)) + if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
AttributeError: 'Unit' object has no attribute 'shape' while tracing function. What does it mean?? ### Discussed in https://github.com/google/jax/discussions/9979 <div type='discussions-op-text'> <sup>Originally posted by **RadostW** March 21, 2022</sup> Inside a nested scan when using `jax.lax.cond` I get error message that is hard to understand. Original code was very complex, sadly minimal example I could make is still quite complex. Bizzarly, when changing small details code runs fine (see commented lines). Working fine: `x * d_www[0,0,0] # Ok` `x * f_www(x)[0,0,0,0] # Ok` Throwing hard to understand error `jnp.array([1.0]) * f_www(x)[0, 0, 0, 0] * d_www[0, 0, 0] # AttributeError` Which makes me think that both `d_www` creation and `f_www` creation work fine. I've found no similar issues when searching. ~~R ```python import jax.numpy as jnp import jax def upper(y): return jnp.exp(y) def lower(y): return jnp.exp(2 * y) def branch_fun(q): x = q[0] ret = jax.lax.cond(x > 1.0, upper, lower, x) return jnp.array([[ret]]) def L_w(f): def wrapped(x): return jax.numpy.tensordot(jax.jacobian(f)(x), branch_fun(x), axes=1) return wrapped def solve_many(): t0 = 0.0 w0 = jax.numpy.zeros(1) id_ = lambda x: x f_www = L_w(L_w(L_w(id_))) def step( x, d_www, ): new_x = ( # x * d_www[0,0,0] # Ok # x * f_www(x)[0,0,0,0] # Ok jnp.array([1.0]) * f_www(x)[0, 0, 0, 0] * d_www[0, 0, 0] # AttributeError # x * f_www(x)[0,0,0,0] * d_www[0,0,0] # AttributeError # contract_all(f_www(x), d_www) # AttributeError # f_www(x)[:,0,0,0] # AttributeError ) return new_x key = jax.random.PRNGKey(0) def scan_func(x, y): xp = step(x, y) return (xp, xp) def chunk_function(x, y): z = jax.lax.scan(scan_func, jnp.array([0.1]), y)[0] return z, z def get_solution_fragment(starting_state, key): wiener_integrals = jax.random.normal(key, shape=(4, 1, 1, 1)) last_state, solution_values = jax.lax.scan( chunk_function, starting_state, jnp.reshape(wiener_integrals, (2, 2, 1, 1, 1)), ) return (last_state, last_state) @jax.vmap def get_solution(key): _, chunked_solution = jax.lax.scan( lambda state, key: get_solution_fragment(state, key), (jnp.array([0.1])), jax.random.split(key, 1), ) return chunked_solution keys = jax.random.split(key, 1) solutions = get_solution(keys) return solutions def test_branched_coefficients(): trajectories = solve_many() if __name__ == "__main__": trajectories = solve_many() ``` ``` The above exception was the direct cause of the following exception: Traceback (most recent call last): File "test_branched_coefficients.py", line 91, in <module> trajectories = solve_many() File "test_branched_coefficients.py", line 81, in solve_many solutions = get_solution(keys) File "test_branched_coefficients.py", line 72, in get_solution _, chunked_solution = jax.lax.scan( AttributeError: 'Unit' object has no attribute 'shape' ``` </div>
minimal repro: ```python import jax.numpy as jnp import jax id_f = lambda x: x def b(x): return jax.lax.cond(x[0], id_f, id_f, x) # jnp.where(x[0], id_f(x), id_f(x)) is ok f = jax.jacrev(lambda x: b(b(x))) # jacfwd is ok def repro(): jax.vmap(f)(jnp.ones((1,1))) # without vmap is ok if __name__ == "__main__": with jax.disable_jit(): repro() ```
2022-03-23T00:46:16
google/jax
10,027
google__jax-10027
[ "10025" ]
a04b777c545bb7ef0bb1e07bb14e845b0a1852bd
diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py --- a/jax/_src/lax/control_flow.py +++ b/jax/_src/lax/control_flow.py @@ -1854,8 +1854,7 @@ def _scan_partial_eval(trace, *tracers, reverse, length, num_consts, num_carry, out_extensive = [next(out_extensive_iter) if i is None else _maybe_device_put(tracers[i].pval[1]) if tracers[i].is_known() else tracers[i] for i in fwd_extensive] - assert all(a.strip_named_shape() == core.raise_to_shaped( - core.get_aval(out)).strip_named_shape() + assert all(core.typematch(a, core.get_aval(out)) for a, out in zip(extensive_avals, out_extensive)) out_flat = out_carry + out_extensive diff --git a/jax/core.py b/jax/core.py --- a/jax/core.py +++ b/jax/core.py @@ -1880,7 +1880,7 @@ def _map_shaped_array(size: int, axis: Optional[int], aval: ShapedArray # TODO: Extend the named shape if axis is None: return aval return ShapedArray(tuple_delete(aval.shape, axis), aval.dtype, - named_shape=aval.named_shape) + named_shape=aval.named_shape, weak_type=aval.weak_type) def _unmap_shaped_array(size: int, axis_name, axis: Optional[int], aval: ShapedArray) -> ShapedArray: @@ -1889,7 +1889,7 @@ def _unmap_shaped_array(size: int, axis_name, axis: Optional[int], named_shape.pop(axis_name, None) if axis is None: return aval.replace(named_shape=named_shape) return ShapedArray(tuple_insert(aval.shape, axis, size), aval.dtype, - named_shape=named_shape) + named_shape=named_shape, weak_type=aval.weak_type) AvalMapHandlerPair = Tuple[Callable, Callable] aval_mapping_handlers: Dict[Type, AvalMapHandlerPair] = {
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -24,6 +24,7 @@ import jax import jax.numpy as jnp import jax.scipy as jsp +from jax._src import dtypes from jax._src import test_util as jtu from jax import lax from jax._src.lax import parallel @@ -1280,6 +1281,17 @@ def ppermute(input): ans = vmapped_gradients_fn(vector) # doesn't crash self.assertAllClose(ans, jnp.ones(2), check_dtypes=False) + def testBatchingPreservesWeakType(self): + # Regression test for https://github.com/google/jax/issues/10025 + x = jnp.ravel(1) + self.assertTrue(dtypes.is_weakly_typed(x)) + @vmap + def f(x): + self.assertTrue(dtypes.is_weakly_typed(x), f"{x} is not weakly-typed") + return x + y = f(x) + self.assertTrue(dtypes.is_weakly_typed(y)) + Array = Any ArrayElt = Any
Batch tracers strip weak type ```python from jax import vmap import jax.numpy as jnp x = jnp.arange(1, dtype='uint8') # strongly-typed uint8 y = jnp.ravel(1) # weakly-typed int32 print(jnp.add(x, y).dtype) # uint8 print(vmap(jnp.add)(x, y).dtype) # int32 ``` This is causing the test failure in #9529 Edit: showing this more directly: ```python def print_avals(y): print(f"{y.aval=}") print_avals(y) # y.aval=ShapedArray(int32[1], weak_type=True) vmap(print_avals)(y) # y.aval=ShapedArray(int32[]) ```
2022-03-24T18:50:52
google/jax
10,037
google__jax-10037
[ "9661" ]
563e0c6ba883d23788f058cd5fb714e4c1cfb813
diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -28,7 +28,7 @@ from jax._src.ad_util import (add_jaxvals, add_jaxvals_p, zeros_like_jaxval, zeros_like_aval, zeros_like_p, Zero) from jax._src.util import (unzip2, safe_map, safe_zip, split_list, wrap_name, - as_hashable_function, cache) + as_hashable_function, weakref_lru_cache) from jax.tree_util import register_pytree_node from jax import linear_util as lu from jax._src.api_util import flatten_fun, flatten_fun_nokwargs @@ -586,7 +586,8 @@ def traceable(num_primals, in_tree_def, *primals_and_tangents): def call_transpose(primitive, params, call_jaxpr, args, ct, _, reduce_axes): all_args, in_tree_def = tree_flatten(((), args, ct)) # empty consts - fun = lu.hashable_partial(lu.wrap_init(backward_pass), call_jaxpr, reduce_axes, False) + fun = lu.hashable_partial(lu.wrap_init(backward_pass), call_jaxpr, + reduce_axes, False) fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def) if config.jax_experimental_name_stack: new_params = params @@ -603,31 +604,29 @@ def call_transpose(primitive, params, call_jaxpr, args, ct, _, reduce_axes): def remat_transpose(params, call_jaxpr, primals_in, cotangents_in, cotangent_in_avals, reduce_axes): - # backward_pass can only transpose linear computations, but the call_jaxpr embedded in - # remat contains primal (non-linear) equations too. Hence, we have to eliminate those - # (in this case via partial_eval) before we call into backward_pass again. - typed_call_jaxpr = core.ClosedJaxpr(call_jaxpr, []) + call_jaxpr = _close_jaxpr(call_jaxpr) unknowns = map(is_undefined_primal, primals_in) - primal_jaxpr, tangent_jaxpr, out_unknowns = \ - pe.partial_eval_jaxpr(typed_call_jaxpr, unknowns=unknowns, instantiate=True) # type: ignore - - def do_transpose(primals_in, cotangents_in): - # NOTE: This is passing in undefined primals in place of tangent arguments, but it - # should all work out, because we're only computing the primal part here. - residuals = core.jaxpr_as_fun(primal_jaxpr)(*primals_in)[len(cotangents_in):] - # Now that we have a purely linear jaxpr, we can transpose it - cotangents_out = backward_pass( - tangent_jaxpr.jaxpr, reduce_axes, False, (), primals_in + residuals, cotangents_in) - # backward_pass will return cotangents computed for all invars, but some of them - # are residuals appended by partial eval, so we need to skip those before we return. - return cotangents_out[:len(primals_in)] - - flat_args, in_tree_def = tree_flatten((primals_in, cotangents_in)) - flat_do_transpose, out_tree = flatten_fun_nokwargs(lu.wrap_init(do_transpose), in_tree_def) - flat_cotangents_out = pe.remat_call_p.bind(flat_do_transpose, *flat_args, **params) + primal_jaxpr, tangent_jaxpr, _ = \ + pe.partial_eval_jaxpr(call_jaxpr, unknowns=unknowns, instantiate=True) # type: ignore + args, in_tree_def = tree_flatten((primals_in, cotangents_in)) + transpose = lu.hashable_partial(lu.wrap_init(_remat_transpose), primal_jaxpr, + tangent_jaxpr, reduce_axes) + flat_transpose, out_tree = flatten_fun_nokwargs(transpose, in_tree_def) + flat_cotangents_out = pe.remat_call_p.bind(flat_transpose, *args, **params) return tree_unflatten(out_tree(), flat_cotangents_out) primitive_transposes[pe.remat_call_p] = remat_transpose +def _remat_transpose(primal_jaxpr, tangent_jaxpr, reduce_axes, + primals_in, cotangents_in): + res = core.jaxpr_as_fun(primal_jaxpr)(*primals_in)[len(cotangents_in):] + cotangents_out = backward_pass(tangent_jaxpr.jaxpr, reduce_axes, False, (), + (*primals_in, *res), cotangents_in) + return cotangents_out[:len(primals_in)] + +@weakref_lru_cache +def _close_jaxpr(jaxpr: core.Jaxpr) -> core.ClosedJaxpr: + return core.ClosedJaxpr(jaxpr, []) + @lu.transformation_with_aux def nonzero_outputs(*args, **kwargs): results = yield args, kwargs @@ -680,7 +679,7 @@ def jvp_jaxpr(jaxpr, nonzeros, instantiate): inst = tuple(instantiate) if isinstance(instantiate, list) else instantiate return _jvp_jaxpr(jaxpr, tuple(nonzeros), inst) -@cache() +@weakref_lru_cache def _jvp_jaxpr(jaxpr, nonzeros, instantiate): assert len(jaxpr.in_avals) == len(nonzeros) f = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -4328,10 +4328,19 @@ def test_linearize_caching(self): identity = jax.checkpoint(jax.jit(lambda x: 2 * x)) _, f_lin = jax.linearize(identity, 1.) with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841 - for _ in range(10): + for _ in range(20): f_lin(1.).block_until_ready() self.assertEqual(count[0], 1) # cached after first execution + def test_vjp_caching(self): + # https://github.com/google/jax/issues/9661 + identity = jax.checkpoint(jax.jit(lambda x: 2 * x)) + _, f_vjp = jax.vjp(identity, 1.) + with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841 + for _ in range(20): + f_vjp(1.)[0].block_until_ready() + self.assertEqual(count[0], 2) # eval_jaxpr on fwd, backward_pass on bwd + class JaxprTest(jtu.JaxTestCase):
Using `jax.jit` inside a function decorated by `jax.checkpoint` causes recompilation every time Using a jitted function inside a function decorated by `jax.checkpoint` causes a lot of extra compilations even if the arguments still have the same shape. Calculating the gradient for such a function causes a memory leak in the long rung since all the compiled jitted functions seem to be stored in the memory. This can be observed by the high memory footprint of `backend_compile` which cannot be seen if the checkpointing is disabled. A self-consistent example would be: ```python import jax import jax.numpy as jnp @jax.jit def f(a): return jnp.sum(a) @jax.checkpoint def g(a): return f(a) arr = jnp.array([[1,2,3,4,5],[6,7,8,9,10]], dtype=float) g_v_and_grad = jax.value_and_grad(g) for i in range(3): working_arr = arr + i print(g_v_and_grad(working_arr)) ``` Running the script with `env JAX_LOG_COMPILES=1` enabled one can observe: ``` WARNING:absl:Finished tracing + transforming prim_fun for jit in 0.0002334117889404297 sec WARNING:absl:Finished tracing + transforming fn for jit in 0.0003993511199951172 sec WARNING:absl:Compiling fn (139703279463296 for args (ShapedArray(float32[2,5]), ShapedArray(int32[], weak_type=True)). WARNING:absl:Finished XLA compilation of fn in 0.04700160026550293 sec WARNING:absl:Finished tracing + transforming f for jit in 0.0010411739349365234 sec WARNING:absl:Finished tracing + transforming <unnamed wrapped function> for jit in 0.00015473365783691406 sec WARNING:absl:Compiling <unnamed wrapped function> (139703209762752 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.04526233673095703 sec WARNING:absl:Finished tracing + transforming prim_fun for jit in 0.00016546249389648438 sec WARNING:absl:Finished tracing + transforming <unnamed wrapped function> for jit in 0.00014591217041015625 sec WARNING:absl:Compiling <unnamed wrapped function> (139703209798976 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.00750732421875 sec WARNING:absl:Finished tracing + transforming backward_pass for jit in 0.0011491775512695312 sec WARNING:absl:Compiling backward_pass (139703209802560 for args (ShapedArray(float32[]),). WARNING:absl:Finished XLA compilation of transpose(jvp(f)) in 0.041948556900024414 sec (DeviceArray(55., dtype=float32), DeviceArray([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=float32)) WARNING:absl:Finished tracing + transforming <unnamed wrapped function> for jit in 0.00014543533325195312 sec WARNING:absl:Compiling <unnamed wrapped function> (139703209800384 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.007508516311645508 sec WARNING:absl:Finished tracing + transforming <unnamed wrapped function> for jit in 0.0001461505889892578 sec WARNING:absl:Compiling <unnamed wrapped function> (139703209863232 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.007668972015380859 sec WARNING:absl:Finished tracing + transforming backward_pass for jit in 0.0005974769592285156 sec WARNING:absl:Compiling backward_pass (139703209362624 for args (ShapedArray(float32[]),). WARNING:absl:Finished XLA compilation of transpose(jvp(f)) in 0.005425214767456055 sec (DeviceArray(65., dtype=float32), DeviceArray([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=float32)) WARNING:absl:Finished tracing + transforming <unnamed wrapped function> for jit in 0.00014638900756835938 sec WARNING:absl:Compiling <unnamed wrapped function> (139703209350720 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.007513523101806641 sec WARNING:absl:Finished tracing + transforming <unnamed wrapped function> for jit in 0.00015473365783691406 sec WARNING:absl:Compiling <unnamed wrapped function> (139703209372160 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.007587909698486328 sec WARNING:absl:Finished tracing + transforming backward_pass for jit in 0.0005350112915039062 sec WARNING:absl:Compiling backward_pass (139703209370048 for args (ShapedArray(float32[]),). WARNING:absl:Finished XLA compilation of transpose(jvp(f)) in 0.0054433345794677734 sec (DeviceArray(75., dtype=float32), DeviceArray([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=float32)) ``` Comment out the checkpoint decorator leads to the wanted behavior: ``` WARNING:absl:Finished tracing + transforming prim_fun for jit in 0.0002498626708984375 sec WARNING:absl:Finished tracing + transforming fn for jit in 0.00040721893310546875 sec WARNING:absl:Compiling fn (140693235752000 for args (ShapedArray(float32[2,5]), ShapedArray(int32[], weak_type=True)). WARNING:absl:Finished XLA compilation of fn in 0.04748940467834473 sec WARNING:absl:Finished tracing + transforming f for jit in 0.0010097026824951172 sec WARNING:absl:Compiling f (140692730754112 for args (ShapedArray(float32[2,5]),). WARNING:absl:Finished XLA compilation of jvp(f) in 0.04457998275756836 sec WARNING:absl:Finished tracing + transforming prim_fun for jit in 0.0001583099365234375 sec WARNING:absl:Finished tracing + transforming backward_pass for jit in 0.0004944801330566406 sec WARNING:absl:Compiling backward_pass (140692730730304 for args (ShapedArray(float32[]),). WARNING:absl:Finished XLA compilation of transpose(jvp(f)) in 0.041858673095703125 sec (DeviceArray(55., dtype=float32), DeviceArray([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=float32)) (DeviceArray(65., dtype=float32), DeviceArray([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=float32)) (DeviceArray(75., dtype=float32), DeviceArray([[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.]], dtype=float32)) ```
Hey, since this is a big blocker for our project at the moment, I would be very thankful if some of the jax-internals experts could help with this issue? I tried to dig into the code why this happening but found nothing yet :( Thanks for pinging this and highlighting that it's a blocker. Sorry for not getting to it sooner!
2022-03-25T23:29:09
google/jax
10,049
google__jax-10049
[ "10045" ]
a68b0f3a0a99d319fbfd7ddb7ff6942bf46f667e
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -1966,6 +1966,27 @@ def array_equiv(a1, a2): empty = zeros +# General np.from* style functions mostly delegate to numpy. + +@_wraps(np.frombuffer) +def frombuffer(buffer, dtype=float, count=-1, offset=0): + return asarray(np.frombuffer(buffer=buffer, dtype=dtype, count=count, offset=offset)) + + +@_wraps(np.fromfunction) +def fromfunction(function, shape, *, dtype=float, **kwargs): + shape = core.canonicalize_shape(shape, context="shape argument of jnp.fromfunction()") + for i in range(len(shape)): + in_axes = [0 if i == j else None for j in range(len(shape))] + function = jax.vmap(function, in_axes=tuple(in_axes[::-1])) + return function(*(arange(s, dtype=dtype) for s in shape), **kwargs) + + +@_wraps(np.fromstring) +def fromstring(string, dtype=float, count=-1, *, sep): + return asarray(np.fromstring(string=string, dtype=dtype, count=count, sep=sep)) + + @_wraps(np.eye) def eye(N, M=None, k=0, dtype=None): lax_internal._check_user_dtype_supported(dtype, "eye") diff --git a/jax/numpy/__init__.py b/jax/numpy/__init__.py --- a/jax/numpy/__init__.py +++ b/jax/numpy/__init__.py @@ -122,6 +122,9 @@ floating as floating, fmax as fmax, fmin as fmin, + frombuffer as frombuffer, + fromfunction as fromfunction, + fromstring as fromstring, full as full, full_like as full_like, gcd as gcd,
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -5922,6 +5922,27 @@ def testDefaultDtypes(self): self.assertEqual(jnp.float_, np.float32 if precision == '32' else np.float64) self.assertEqual(jnp.complex_, np.complex64 if precision == '32' else np.complex128) + def testFromBuffer(self): + buf = b'\x01\x02\x03' + expected = np.frombuffer(buf, dtype='uint8') + actual = jnp.frombuffer(buf, dtype='uint8') + self.assertArraysEqual(expected, actual) + + def testFromFunction(self): + def f(x, y, z): + return x + 2 * y + 3 * z + shape = (3, 4, 5) + expected = np.fromfunction(f, shape=shape) + actual = jnp.fromfunction(f, shape=shape) + self.assertArraysEqual(expected, actual) + + def testFromString(self): + s = "1,2,3" + expected = np.fromstring(s, sep=',', dtype=int) + actual = jnp.fromstring(s, sep=',', dtype=int) + self.assertArraysEqual(expected, actual) + + # Most grad tests are at the lax level (see lax_test.py), but we add some here # as needed for e.g. particular compound ops of interest. @@ -6093,6 +6114,7 @@ def testWrappedSignaturesMatch(self): 'identity': ['like'], 'full': ['order', 'like'], 'full_like': ['subok', 'order'], + 'fromfunction': ['like'], 'histogram': ['normed'], 'histogram2d': ['normed'], 'histogramdd': ['normed'],
Support for `numpy.fromiter` As far as I can tell there is not an existing issue or implementation for `jax.numpy.fromiter`, which currently raises: ``` [NotImplementedError: Numpy function <built-in function fromiter> not yet implemented]() ```
Thanks for the issue! We haven't implemented this because there's not really any way to do better than `jnp.asarray(np.fromiter(x))`, though we could certainly add that function as a convenience. What do you think? That's exactly what I ended up doing – I think it would be neat to add it just to get more function parity with `numpy`, even if it doesn't really do anything clever. @jakevdp Jax could actually do something 'clever' for `fromiter(x)`, as `jnp.asarray(np.fromiter(x))` materelize everything eagerly. That said, the staging behaviour can be achieved with something like `jnp.array(list(x))`. Initial implementations of `fromiter` and related functions in #10049. Thanks! Wow! Very impressive turn-around time on these issues! Thank you so much @jakevdp! Some of the functions in this family—namely `fromiter` and `fromfile`—are inherently side-effecting and thus incompatible with JAX staging (`jit` etc.). I think we should avoid introducing those into `jax.numpy`. Parity with standard numpy is only one goal; others include behavior invariance under `jit` and avoiding impure functions in our core libraries. Other functions seem like a plus. For instance, in the context of staging, `fromstring` could be a convenient way to introduce a constant. Thanks Roy - good points! I'll make the modifications to #10049
2022-03-28T19:01:32
google/jax
10,072
google__jax-10072
[ "10045" ]
e08bc27bf0bfd5cfeb8c1bdc52386561c0296e6d
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -1973,6 +1973,42 @@ def frombuffer(buffer, dtype=float, count=-1, offset=0): return asarray(np.frombuffer(buffer=buffer, dtype=dtype, count=count, offset=offset)) +def fromfile(*args, **kwargs): + """Unimplemented JAX wrapper for jnp.fromfile. + + This function is left deliberately unimplemented because it may be non-pure and thus + unsafe for use with JIT and other JAX transformations. Consider using + ``jnp.asarray(np.fromfile(...))`` instead, although care should be taken if ``np.fromfile`` + is used within jax transformations because of its potential side-effect of consuming the + file object; for more information see `Common Gotchas: Pure Functions + <https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions>`_. + """ + raise NotImplementedError( + "jnp.fromfile() is not implemented because it may be non-pure and thus unsafe for use " + "with JIT and other JAX transformations. Consider using jnp.asarray(np.fromfile(...)) " + "instead, although care should be taken if np.fromfile is used within a jax transformations " + "because of its potential side-effect of consuming the file object; for more information see " + "https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions") + + +def fromiter(*args, **kwargs): + """Unimplemented JAX wrapper for jnp.fromiter. + + This function is left deliberately unimplemented because it may be non-pure and thus + unsafe for use with JIT and other JAX transformations. Consider using + ``jnp.asarray(np.fromiter(...))`` instead, although care should be taken if ``np.fromiter`` + is used within jax transformations because of its potential side-effect of consuming the + iterable object; for more information see `Common Gotchas: Pure Functions + <https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions>`_. + """ + raise NotImplementedError( + "jnp.fromiter() is not implemented because it may be non-pure and thus unsafe for use " + "with JIT and other JAX transformations. Consider using jnp.asarray(np.fromiter(...)) " + "instead, although care should be taken if np.fromiter is used within a jax transformations " + "because of its potential side-effect of consuming the iterable object; for more information see " + "https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions") + + @_wraps(np.fromfunction) def fromfunction(function, shape, *, dtype=float, **kwargs): shape = core.canonicalize_shape(shape, context="shape argument of jnp.fromfunction()") diff --git a/jax/numpy/__init__.py b/jax/numpy/__init__.py --- a/jax/numpy/__init__.py +++ b/jax/numpy/__init__.py @@ -123,7 +123,9 @@ fmax as fmax, fmin as fmin, frombuffer as frombuffer, + fromfile as fromfile, fromfunction as fromfunction, + fromiter as fromiter, fromstring as fromstring, full as full, full_like as full_like,
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -6245,7 +6245,7 @@ def test_lax_numpy_docstrings(self): # Test that docstring wrapping & transformation didn't fail. # Functions that have their own docstrings & don't wrap numpy. - known_exceptions = {'broadcast_arrays', 'vectorize'} + known_exceptions = {'broadcast_arrays', 'fromfile', 'fromiter', 'vectorize'} for name in dir(jnp): if name in known_exceptions or name.startswith('_'):
Support for `numpy.fromiter` As far as I can tell there is not an existing issue or implementation for `jax.numpy.fromiter`, which currently raises: ``` [NotImplementedError: Numpy function <built-in function fromiter> not yet implemented]() ```
Thanks for the issue! We haven't implemented this because there's not really any way to do better than `jnp.asarray(np.fromiter(x))`, though we could certainly add that function as a convenience. What do you think? That's exactly what I ended up doing – I think it would be neat to add it just to get more function parity with `numpy`, even if it doesn't really do anything clever. @jakevdp Jax could actually do something 'clever' for `fromiter(x)`, as `jnp.asarray(np.fromiter(x))` materelize everything eagerly. That said, the staging behaviour can be achieved with something like `jnp.array(list(x))`. Initial implementations of `fromiter` and related functions in #10049. Thanks! Wow! Very impressive turn-around time on these issues! Thank you so much @jakevdp! Some of the functions in this family—namely `fromiter` and `fromfile`—are inherently side-effecting and thus incompatible with JAX staging (`jit` etc.). I think we should avoid introducing those into `jax.numpy`. Parity with standard numpy is only one goal; others include behavior invariance under `jit` and avoiding impure functions in our core libraries. Other functions seem like a plus. For instance, in the context of staging, `fromstring` could be a convenient way to introduce a constant. Thanks Roy - good points! I'll make the modifications to #10049 So the resolution of the original question is that we don't want to support `jnp.fromiter` in JAX because it's a potentially side-effecting operation that will possibly cause surprising behavior with JIT. But we were able to implement a few other `jnp.from*` operations. Thanks for the request! Might it be worthwhile to add a description to the `jnp.fromiter` function such that it explains the reasoning @froystig mentioned in the error instead of `Numpy function <built-in function fromiter> not yet implemented]()` in addition to a suggested solution (`jnp.asarray(np.fromiter(x))` along with impure + jit caveat)? One thing I really like about JAX is when the errors have references to solutions and rationale, which the library does excellently in many other places! That's a great idea!
2022-03-29T20:49:20
google/jax
10,198
google__jax-10198
[ "10163" ]
8b6b736ef311e9ac471acd2d14b19501bccfece4
diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py --- a/jax/experimental/sparse/bcoo.py +++ b/jax/experimental/sparse/bcoo.py @@ -84,6 +84,8 @@ def _bcoo_nse(mat, n_batch=0, n_dense=0): mask = mask.sum(list(range(n_batch, mask.ndim))) return mask.max() +# TODO(jakevdp): add a custom autodiff rule that errors if remove_zeros=True, because +# it produces wrong values. See https://github.com/google/jax/issues/10163 def _bcoo_sum_duplicates(data, indices, shape, nse=None, remove_zeros=True): if nse is None and isinstance(jnp.array(0), core.Tracer): raise ValueError("When used with JIT, vmap, or another transform, sum_duplicates() " @@ -1229,7 +1231,8 @@ def _bcoo_spdot_general_unbatched(lhs_data, lhs_indices, rhs_data, rhs_indices, out_indices = out_indices.at[:, :, lhs_j.shape[-1]:].set(rhs_j[None, :]) out_indices = out_indices.reshape(len(out_data), out_indices.shape[-1]) out_nse = (lhs.nse if lhs_j.shape[1] else 1) * (rhs.nse if rhs_j.shape[1] else 1) - return _bcoo_sum_duplicates(out_data, out_indices, out_shape, nse=out_nse) + # Note: remove_zeros=True is incompatible with autodiff. + return _bcoo_sum_duplicates(out_data, out_indices, out_shape, nse=out_nse, remove_zeros=False) @bcoo_spdot_general_p.def_impl def _bcoo_spdot_general_impl(lhs_data, lhs_indices, rhs_data, rhs_indices, *, lhs_spinfo: BCOOInfo, rhs_spinfo: BCOOInfo, dimension_numbers): @@ -1770,7 +1773,8 @@ def sum_duplicates(self, nse=None, remove_zeros=True): If it is smaller than the number required, data will be silently discarded. remove_zeros : bool (default=True). If True, remove explicit zeros from the data as part of summing duplicates. If False, then explicit zeros at unique indices - will remain among the specified elements. + will remain among the specified elements. Note: remove_zeros=True is incompatible + with autodiff. """ data, indices = _bcoo_sum_duplicates(self.data, self.indices, self.shape, nse=nse, remove_zeros=remove_zeros)
diff --git a/tests/sparse_test.py b/tests/sparse_test.py --- a/tests/sparse_test.py +++ b/tests/sparse_test.py @@ -1454,6 +1454,38 @@ def f_sparse(lhs_data, rhs_data): self.assertAllClose(jf_dense_0, jf_sparse_0, rtol=tol) self.assertAllClose(jf_dense_1, jf_sparse_1, rtol=tol) + def test_bcoo_spdot_general_ad_bug(self): + # Regression test for https://github.com/google/jax/issues/10163 + A_indices = jnp.array([[0, 1], [0, 2], [1, 1], [1, 2], [1, 0]]) + A_values = jnp.array([-2.0, 1.0, -1.0, 0.5, 2.0]) + A_shape = (2, 3) + + B_indices = jnp.array([[0, 2], [2, 1], [0, 3], [1, 3], [1, 0], [0, 0]]) + B_values = jnp.array([10.0, 100.0, 1000.0, -5.0, -50.0, -500.0]) + B_shape = (3, 4) + + def sp_sp_product(v1, v2): + A = sparse.BCOO((v1, A_indices), shape=A_shape) + B = sparse.BCOO((v2, B_indices), shape=B_shape) + return (A @ B).todense() + + def sp_de_product(v1, v2): + A = sparse.BCOO((v1, A_indices), shape=A_shape) + B = sparse.BCOO((v2, B_indices), shape=B_shape).todense() + return A @ B + + def de_de_product(v1, v2): + sparse1 = sparse.BCOO((v1, A_indices), shape=A_shape).todense() + dense2 = sparse.BCOO((v2, B_indices), shape=B_shape).todense() + return sparse1 @ dense2 + + sp_sp_jac = jax.jacfwd(sp_sp_product, argnums=1)(A_values, B_values) + sp_de_jac = jax.jacfwd(sp_de_product, argnums=1)(A_values, B_values) + de_de_jac = jax.jacfwd(de_de_product, argnums=1)(A_values, B_values) + + self.assertAllClose(sp_sp_jac, de_de_jac) + self.assertAllClose(sp_de_jac, de_de_jac) + @unittest.skipIf(jtu.device_under_test() == "tpu", "TPU has insufficient precision") @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}[n_batch={}]_{}[n_batch={}]_in_axes={}".format(
[sparse] Wrong jacobian computation for bcoo_spdot_general It seems that the Jacobian computed by `bcoo_spdot_general`, invoked through the operator `@` does not organize the values in the correct order. In order to show the bug, I have written the following code. ```python import jax import jax.numpy as jnp from jax.experimental.sparse.bcoo import BCOO A_indices = jnp.array([[0, 1], [0, 2], [1, 1], [1, 2], [1, 0]]) A_values = jnp.array([-2.0, 1.0, -1.0, 0.5, 2.0]) A_shape = (2, 3) B_indices = jnp.array([[0, 2], [2, 1], [0, 3], [1, 3], [1, 0], [0, 0]]) B_values = jnp.array([10.0, 100.0, 1000.0, -5.0, -50.0, -500.0]) B_shape = (3, 4) def sp_sp_product(v1, v2): sparse1 = BCOO((v1, A_indices), shape=A_shape) sparse2 = BCOO((v2, B_indices), shape=B_shape) return (sparse1 @ sparse2).todense() def sp_dense_product(v1, v2): sparse1 = BCOO((v1, A_indices), shape=A_shape) dense2 = BCOO((v2, B_indices), shape=B_shape).todense() return sparse1 @ dense2 sp_sp_product_jac = jax.jacfwd(sp_sp_product, argnums=1)(A_values, B_values) sparse_dense_product_jac = jax.jacfwd(sp_dense_product, argnums=1)(A_values, B_values) assert jnp.array_equal( sp_sp_product_jac, sparse_dense_product_jac ), "The two Jacobian should be the same" ``` `sparse_dense_product_jac` is correct (as it can be verified analitically), while `sp_sp_product_jac` is incorrect since it should be identical to `sparse_dense_product_jac` but it is not. The shape of `sp_sp_product_jac` in the expected one and the values in `sp_sp_product_jac` seems correct but they are not placed in the expected location. Probably there is a problem in the indices of the sparse jacobian matrix.
@jakevdp As the author of https://github.com/google/jax/pull/8848, have you any thought about it? I'll take a look Still working on debugging this... it's a tricky one. Inspecting the code it seems that the problem is that the indices relative to the output JVP are assumed to be the indices of the output of the result of the product `bcoo_spdot_general`. I have tried to return these indices modifying the function like that. ```python def _bcoo_spdot_general_jvp(primals, tangents, **kwds): lhs_data, lhs_indices, rhs_data, rhs_indices = primals lhs_data_dot, lhs_indices_dot, rhs_data_dot, rhs_indices_dot = tangents primals_out = _bcoo_spdot_general(*primals, **kwds) assert type(lhs_indices_dot) is ad.Zero assert type(rhs_indices_dot) is ad.Zero if type(lhs_data_dot) is not ad.Zero: data_dot_out, indices_dot_out = _bcoo_spdot_general( lhs_data_dot, lhs_indices, rhs_data, rhs_indices, **kwds ) if type(rhs_data_dot) is not ad.Zero: data_dot_out, indices_dot_out = _bcoo_spdot_general( lhs_data, lhs_indices, rhs_data_dot, rhs_indices, **kwds ) return primals_out, [data_dot_out, indices_dot_out] ``` This code does not crash, but the tangent indices `indices_dot_out` are not passed to the JVP function of `bcoo_todense` (that makes the same assumption that tangent and primal data vectors share the same indices) https://github.com/google/jax/blob/3184dd65a222354bffa2466d9a375162f5649132/jax/experimental/sparse/bcoo.py#L256 so the final result is identical. The assumption that the tangent and primal vectors share the same indices is fundamental to how autodiff of sparse objects works (I'm not sure how you could have nonzero gradients in non-existent values!). I'm not sure that your modified function is the correct approach, because the gradient of integer indices is not those indices themselves, but must be a symbolic zero. My suspicion at the moment is that this has something to do with the interaction between batching and autodiff that happens within a jacobian... we end up with a sparse array with batched data and unbatched indices, and I think our test coverage is not all that extensive for that case. OK, I think the issue comes down to this line in the spdot_general implementation: https://github.com/google/jax/blob/5522ed1702de3e806ecb41b7adc00f9841e27a62/jax/experimental/sparse/bcoo.py#L1090 `sum_duplicates` sorts the indices and moves explicit zero-values to the end - since explicit zeros are different in each batch, it has to return a different set of indices for each batch, such that the output data no longer shares indices. We need a version of `sum_duplicates` that works correctly when data is batched and indices are not. Put another way, this issue has brought up a more general concern that I hadn't considered before: sparse autodiff is fundamentally incompatible with any operation in which the output indices depend on the values in the data, because that may break the property that primals and tangents must share indices. Here's a shorter example showing how grad of `sum_duplicates` is incorrect if there are zeros in the data. I believe this is another effect of the root cause of this bug: ```python from functools import partial import jax import jax.numpy as jnp from jax.experimental import sparse indices = jnp.array([[1], [2]]) data = jnp.array([0.0, 1.0]) def f(data, sum_duplicates = True): mat = sparse.BCOO((data, indices), shape=(3,)) if sum_duplicates: mat = mat.sum_duplicates() return mat.todense().sum() print(jax.grad(partial(f, sum_duplicates=True))(data)) # [0. 1.] print(jax.grad(partial(f, sum_duplicates=False))(data)) # [1. 1.] ``` It seems to me that you changed your mind with respect to the cause in the last comment, but I reply to your previous comment sharing my current knowledge. > My suspicion at the moment is that this has something to do with the interaction between batching and autodiff that happens within a jacobian... we end up with a sparse array with batched data and unbatched indices, and I think our test coverage is not all that extensive for that case. The error can be reproduced for the JVP only, without the batching involved in the computation of the Jacobian. ```python import jax import jax.numpy as jnp from jax.experimental.sparse.bcoo import BCOO A_indices = jnp.array([[0, 1], [0, 2], [1, 1], [1, 2], [1, 0]]) A_values = jnp.array([-2.0, 1.0, -1.0, 0.5, 2.0]) A_shape = (2, 3) B_indices = jnp.array([[0, 2], [2, 1], [0, 3], [1, 3], [1, 0], [0, 0]]) B_values = jnp.array([10.0, 100.0, 1000.0, -5.0, -50.0, -500.0]) B_shape = (3, 4) def sp_sp_product(b_v): sparse1 = BCOO((A_values, A_indices), shape=A_shape) sparse2 = BCOO((b_v, B_indices), shape=B_shape) return (sparse1 @ sparse2).todense() def sp_dense_product(b_v): sparse1 = BCOO((A_values, A_indices), shape=A_shape) dense2 = BCOO((b_v, B_indices), shape=B_shape).todense() return sparse1 @ dense2 first_row_vector = jnp.eye(B_values.size)[0] _, sp_sp_product_jvp = jax.jvp(sp_sp_product, (B_values,), (first_row_vector,)) _, sp_dense_product_jvp = jax.jvp(sp_dense_product, (B_values,), (first_row_vector,)) assert jnp.array_equal( sp_sp_product_jvp, sp_dense_product_jvp ), "The two JVP should be the same" ``` > Put another way, this issue has brought up a more general concern that I hadn't considered before: sparse autodiff is fundamentally incompatible with any operation in which the output indices depend on the values in the data, because that may break the property that primals and tangents must share indices. I totally agree with this! In my opinion, a workaround is to remove the constraint that the tangent vector of indices is Zero and use it to store the indices. I know that it is not mathematically correct, but if this choice stays hidden in the code maybe it causes no harm. I'm looking into it to see if that could work in practice without a major refactor of the API. > I'm not sure that your modified function is the correct approach, because the gradient of integer indices is not those indices themselves, but must be a symbolic zero. You are right, this constraint is hard to circumvent because it is deeply integrated into JAX. I have written a version of the code that does not causes the bug for the examples I posted, but the code breaks in other tests linked to sparse matrices because when the tangent of the indices isn't zero, then an assertion fails in https://github.com/google/jax/blob/0c02f7935aa90d1d895f34882186e889e2b8923e/jax/interpreters/ad.py#L454 I'm willing to help fix the problem, but I do not have sufficient knowledge of JAX to come up with a solution that is consistent with the established inner workings of the library. I don't think this will require any deep fix, we just need to not use a problematic version of `sum_duplicates` in the `bcoo_spdot_general` implementation. I think the solution will be to split the current `sum_duplicates` utility into a couple utilities, or perhaps one with several options: `sum_duplicates`, `sort_indices`, and `eliminate_zeros`. The first two are fine for autodiff, the third should error on autodiff. Then in functions that we want to be differentiable, we don't call the third.
2022-04-08T18:05:30
google/jax
10,260
google__jax-10260
[ "10209" ]
86c8446c008db65ed2cf0797a7196c240bb6fc5b
diff --git a/jax/_src/lax/linalg.py b/jax/_src/lax/linalg.py --- a/jax/_src/lax/linalg.py +++ b/jax/_src/lax/linalg.py @@ -1816,8 +1816,7 @@ def _schur_impl(operand, *, compute_schur_vectors, sort_eig_vals, select_callable=select_callable) -def _schur_translation_rule(ctx, avals_in, avals_out, operand, *, - compute_schur_vectors, sort_eig_vals): +def _schur_translation_rule(ctx, *args, **kwargs): raise NotImplementedError( "Schur decomposition is only implemented on the CPU backend.")
[GPU] jax.scipy.linalg.sqrtm --> TypeError: _schur_translation_rule() got an unexpected keyword argument 'select_callable' - [x] Check for duplicate issues. - [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this: When run on colab w/ GPU runtime the following code ([notebook](https://colab.research.google.com/drive/1zFK5h_6Jnzw5kPRKpR9Dko4kVpQa2qxL)), ``` import jax import numpy as np print(jax.__version__) jax.scipy.linalg.sqrtm(np.random.uniform(size=(8,8))) ``` results in, - [x] Include stack trace ``` 0.3.4 --------------------------------------------------------------------------- JaxStackTraceBeforeTransformation Traceback (most recent call last) [/usr/lib/python3.7/runpy.py](https://localhost:8080/#) in _run_module_as_main(***failed resolving arguments***) 192 return _run_code(code, main_globals, None, --> 193 "__main__", mod_spec) 194 53 frames [/usr/lib/python3.7/runpy.py](https://localhost:8080/#) in _run_code(***failed resolving arguments***) 84 __spec__ = mod_spec) ---> 85 exec(code, run_globals) 86 return run_globals [/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py](https://localhost:8080/#) in <module>() 15 from ipykernel import kernelapp as app ---> 16 app.launch_new_instance() [/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py](https://localhost:8080/#) in launch_instance(***failed resolving arguments***) 845 app.initialize(argv) --> 846 app.start() 847 [/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py](https://localhost:8080/#) in start(***failed resolving arguments***) 498 try: --> 499 self.io_loop.start() 500 except KeyboardInterrupt: [/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py](https://localhost:8080/#) in start(***failed resolving arguments***) 131 asyncio.set_event_loop(self.asyncio_loop) --> 132 self.asyncio_loop.run_forever() 133 finally: [/usr/lib/python3.7/asyncio/base_events.py](https://localhost:8080/#) in run_forever(***failed resolving arguments***) 540 while True: --> 541 self._run_once() 542 if self._stopping: [/usr/lib/python3.7/asyncio/base_events.py](https://localhost:8080/#) in _run_once(***failed resolving arguments***) 1785 else: -> 1786 handle._run() 1787 handle = None # Needed to break cycles when an exception occurs. [/usr/lib/python3.7/asyncio/events.py](https://localhost:8080/#) in _run(***failed resolving arguments***) 87 try: ---> 88 self._context.run(self._callback, *self._args) 89 except Exception as exc: [/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py](https://localhost:8080/#) in _handle_events(***failed resolving arguments***) 121 fileobj, handler_func = self.handlers[fd] --> 122 handler_func(fileobj, events) 123 [/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py](https://localhost:8080/#) in null_wrapper(***failed resolving arguments***) 299 _state.contexts = cap_contexts[0] --> 300 return fn(*args, **kwargs) 301 finally: [/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py](https://localhost:8080/#) in _handle_events(***failed resolving arguments***) 451 if zmq_events & zmq.POLLIN and self.receiving(): --> 452 self._handle_recv() 453 if not self.socket: [/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py](https://localhost:8080/#) in _handle_recv(***failed resolving arguments***) 480 callback = self._recv_callback --> 481 self._run_callback(callback, msg) 482 [/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py](https://localhost:8080/#) in _run_callback(***failed resolving arguments***) 430 # inside our blanket exception handler rather than outside. --> 431 callback(*args, **kwargs) 432 except Exception: [/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py](https://localhost:8080/#) in null_wrapper(***failed resolving arguments***) 299 _state.contexts = cap_contexts[0] --> 300 return fn(*args, **kwargs) 301 finally: [/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py](https://localhost:8080/#) in dispatcher(***failed resolving arguments***) 282 def dispatcher(msg): --> 283 return self.dispatch_shell(stream, msg) 284 return dispatcher [/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py](https://localhost:8080/#) in dispatch_shell(***failed resolving arguments***) 232 try: --> 233 handler(stream, idents, msg) 234 except Exception: [/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py](https://localhost:8080/#) in execute_request(***failed resolving arguments***) 398 reply_content = self.do_execute(code, silent, store_history, --> 399 user_expressions, allow_stdin) 400 [/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py](https://localhost:8080/#) in do_execute(***failed resolving arguments***) 207 try: --> 208 res = shell.run_cell(code, store_history=store_history, silent=silent) 209 finally: [/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py](https://localhost:8080/#) in run_cell(***failed resolving arguments***) 536 self._last_traceback = None --> 537 return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) 538 [/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py](https://localhost:8080/#) in run_cell(***failed resolving arguments***) 2717 has_raised = self.run_ast_nodes(code_ast.body, cell_name, -> 2718 interactivity=interactivity, compiler=compiler, result=result) 2719 [/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py](https://localhost:8080/#) in run_ast_nodes(***failed resolving arguments***) 2827 code = compiler(mod, cell_name, "single") -> 2828 if self.run_code(code, result): 2829 return True [/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py](https://localhost:8080/#) in run_code(***failed resolving arguments***) 2881 #rprint('Running code', repr(code_obj)) # dbg -> 2882 exec(code_obj, self.user_global_ns, self.user_ns) 2883 finally: [<ipython-input-1-f00b9a3dc36a>](https://localhost:8080/#) in <module>() 5 ----> 6 jax.scipy.linalg.sqrtm(np.random.uniform(size=(16,16))) [/usr/local/lib/python3.7/dist-packages/jax/_src/scipy/linalg.py](https://localhost:8080/#) in sqrtm(***failed resolving arguments***) 657 raise NotImplementedError("Blocked version is not implemented yet.") --> 658 return _sqrtm(A) [/usr/local/lib/python3.7/dist-packages/jax/_src/scipy/linalg.py](https://localhost:8080/#) in _sqrtm(***failed resolving arguments***) 637 def _sqrtm(A): --> 638 T, Z = schur(A, output='complex') 639 sqrt_T = _sqrtm_triu(T) [/usr/local/lib/python3.7/dist-packages/jax/_src/scipy/linalg.py](https://localhost:8080/#) in schur(***failed resolving arguments***) 116 "Expected 'output' to be either 'real' or 'complex', got output={}.".format(output)) --> 117 return _schur(a, output) 118 [/usr/local/lib/python3.7/dist-packages/jax/_src/scipy/linalg.py](https://localhost:8080/#) in _schur(***failed resolving arguments***) 109 a = a.astype(jnp.result_type(a.dtype, 0j)) --> 110 return lax_linalg.schur(a) 111 JaxStackTraceBeforeTransformation: TypeError: _schur_translation_rule() got an unexpected keyword argument 'select_callable' The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception. -------------------- The above exception was the direct cause of the following exception: UnfilteredStackTrace Traceback (most recent call last) [<ipython-input-1-f00b9a3dc36a>](https://localhost:8080/#) in <module>() 5 ----> 6 jax.scipy.linalg.sqrtm(np.random.uniform(size=(16,16))) [/usr/local/lib/python3.7/dist-packages/jax/_src/scipy/linalg.py](https://localhost:8080/#) in sqrtm(A, blocksize) 657 raise NotImplementedError("Blocked version is not implemented yet.") --> 658 return _sqrtm(A) [/usr/local/lib/python3.7/dist-packages/jax/_src/traceback_util.py](https://localhost:8080/#) in reraise_with_filtered_traceback(*args, **kwargs) 161 try: --> 162 return fun(*args, **kwargs) 163 except Exception as e: [/usr/local/lib/python3.7/dist-packages/jax/_src/api.py](https://localhost:8080/#) in cache_miss(*args, **kwargs) 434 device=device, backend=backend, name=flat_fun.__name__, --> 435 donated_invars=donated_invars, inline=inline) 436 out_pytree_def = out_tree() [/usr/local/lib/python3.7/dist-packages/jax/core.py](https://localhost:8080/#) in bind(self, fun, *args, **params) 1708 def bind(self, fun, *args, **params): -> 1709 return call_bind(self, fun, *args, **params) 1710 [/usr/local/lib/python3.7/dist-packages/jax/core.py](https://localhost:8080/#) in call_bind(primitive, fun, *args, **params) 1720 tracers = map(top_trace.full_raise, args) -> 1721 outs = top_trace.process_call(primitive, fun, tracers, params) 1722 return map(full_lower, apply_todos(env_trace_todo(), outs)) [/usr/local/lib/python3.7/dist-packages/jax/core.py](https://localhost:8080/#) in process_call(self, primitive, f, tracers, params) 613 def process_call(self, primitive, f, tracers, params): --> 614 return primitive.impl(f, *tracers, **params) 615 process_map = process_call [/usr/local/lib/python3.7/dist-packages/jax/_src/dispatch.py](https://localhost:8080/#) in _xla_call_impl(***failed resolving arguments***) 142 compiled_fun = _xla_callable(fun, device, backend, name, donated_invars, --> 143 *unsafe_map(arg_spec, args)) 144 try: [/usr/local/lib/python3.7/dist-packages/jax/linear_util.py](https://localhost:8080/#) in memoized_fun(fun, *args) 271 else: --> 272 ans = call(fun, *args) 273 cache[key] = (ans, fun.stores) [/usr/local/lib/python3.7/dist-packages/jax/_src/dispatch.py](https://localhost:8080/#) in _xla_callable_uncached(fun, device, backend, name, donated_invars, *arg_specs) 169 return lower_xla_callable(fun, device, backend, name, donated_invars, --> 170 *arg_specs).compile().unsafe_call 171 [/usr/local/lib/python3.7/dist-packages/jax/_src/profiler.py](https://localhost:8080/#) in wrapper(*args, **kwargs) 205 with TraceAnnotation(name, **decorator_kwargs): --> 206 return func(*args, **kwargs) 207 return wrapper [/usr/local/lib/python3.7/dist-packages/jax/_src/dispatch.py](https://localhost:8080/#) in lower_xla_callable(fun, device, backend, name, donated_invars, *arg_specs) 259 module_name, closed_jaxpr, backend.platform, --> 260 mlir.ReplicaAxisContext(axis_env), name_stack, donated_invars) 261 else: [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in lower_jaxpr_to_module(module_name, jaxpr, platform, axis_context, name_stack, donated_args, replicated_args, arg_shardings, result_shardings) 493 arg_shardings=arg_shardings, result_shardings=result_shardings, --> 494 input_output_aliases=input_output_aliases) 495 [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in lower_jaxpr_to_fun(ctx, name, jaxpr, public, replace_units_with_dummy, replace_tokens_with_dummy, replicated_args, arg_shardings, result_shardings, input_output_aliases) 636 jaxpr.jaxpr, map(ir_constants, jaxpr.consts), --> 637 *args) 638 outs = [] [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in jaxpr_subcomp(ctx, jaxpr, consts, *args) 722 ans = rule(rule_ctx, *map(_unwrap_singleton_ir_values, in_nodes), --> 723 **eqn.params) 724 [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in _xla_call_lower(***failed resolving arguments***) 783 backend, ctx.module_context, ctx.avals_in, ctx.avals_out, --> 784 *args) 785 [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in _call_lowering(fn_name, stack_name, call_jaxpr, backend, ctx, avals_in, avals_out, *args) 771 symbol_name = lower_jaxpr_to_fun(sub_ctx, fn_name, --> 772 core.ClosedJaxpr(call_jaxpr, ())).name.value 773 call = func_dialect.CallOp(flat_output_types, [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in lower_jaxpr_to_fun(ctx, name, jaxpr, public, replace_units_with_dummy, replace_tokens_with_dummy, replicated_args, arg_shardings, result_shardings, input_output_aliases) 636 jaxpr.jaxpr, map(ir_constants, jaxpr.consts), --> 637 *args) 638 outs = [] [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in jaxpr_subcomp(ctx, jaxpr, consts, *args) 722 ans = rule(rule_ctx, *map(_unwrap_singleton_ir_values, in_nodes), --> 723 **eqn.params) 724 [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in cached_lowering(ctx, *args, **params) 935 if func is None: --> 936 func = _emit_lowering_rule_as_fun(partial(f, **params), ctx) 937 ctx.module_context.cached_primitive_lowerings[key] = func [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in _emit_lowering_rule_as_fun(lowering_rule, ctx) 665 map(len, input_types)) --> 666 outs = lowering_rule(ctx, *_unwrap_singleton_ir_values(unflattened_args)) 667 func_dialect.ReturnOp(util.flatten(map(wrap_singleton_ir_values, outs))) [/usr/local/lib/python3.7/dist-packages/jax/interpreters/mlir.py](https://localhost:8080/#) in fallback(ctx, *args, **params) 952 xla_computation = xla.primitive_subcomputation( --> 953 module_ctx.platform, module_ctx.axis_env, prim, *ctx.avals_in, **params) 954 submodule_str = xc._xla.mlir.xla_computation_to_mlir_module(xla_computation) [/usr/local/lib/python3.7/dist-packages/jax/interpreters/xla.py](https://localhost:8080/#) in primitive_subcomputation(platform, axis_env, prim, *avals, **params) 445 name_stack=new_name_stack()) --> 446 ans = f(ctx.replace(builder=c), avals, None, *xla_args, **params) 447 if prim.multiple_results: [/usr/local/lib/python3.7/dist-packages/jax/interpreters/xla.py](https://localhost:8080/#) in f_new(ctx, avals_in, avals_out, *xla_args, **params) 1035 return jaxpr_subcomp(ctx, jaxpr, _xla_consts(ctx.builder, consts), -> 1036 *xla_args) 1037 return f_new [/usr/local/lib/python3.7/dist-packages/jax/interpreters/xla.py](https://localhost:8080/#) in jaxpr_subcomp(ctx, jaxpr, consts, *args) 611 ans = rule(eqn_ctx, map(aval, eqn.invars), map(aval, eqn.outvars), --> 612 *in_nodes, **eqn.params) 613 UnfilteredStackTrace: TypeError: _schur_translation_rule() got an unexpected keyword argument 'select_callable' The stack trace below excludes JAX-internal frames. The preceding is the original exception that occurred, unmodified. -------------------- The above exception was the direct cause of the following exception: TypeError Traceback (most recent call last) [<ipython-input-1-f00b9a3dc36a>](https://localhost:8080/#) in <module>() 4 print(jax.__version__) 5 ----> 6 jax.scipy.linalg.sqrtm(np.random.uniform(size=(16,16))) [/usr/local/lib/python3.7/dist-packages/jax/_src/scipy/linalg.py](https://localhost:8080/#) in sqrtm(A, blocksize) 656 if blocksize > 1: 657 raise NotImplementedError("Blocked version is not implemented yet.") --> 658 return _sqrtm(A) TypeError: _schur_translation_rule() got an unexpected keyword argument 'select_callable' ```
2022-04-13T16:50:59
google/jax
10,286
google__jax-10286
[ "10281" ]
7ad1120da0b4afe450a751aff9faf81a3d95d34d
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -3425,6 +3425,10 @@ def _normalize_index(index, axis_size): @partial(jit, static_argnames=('axis',)) def take_along_axis(arr, indices, axis: Optional[int]): _check_arraylike("take_along_axis", arr, indices) + index_dtype = dtypes.dtype(indices) + if not dtypes.issubdtype(index_dtype, integer): + raise TypeError("take_along_axis indices must be of integer type, got " + f"{str(index_dtype)}") if axis is None: if ndim(indices) != 1: msg = "take_along_axis indices must be 1D if axis=None, got shape {}" @@ -3442,13 +3446,9 @@ def replace(tup, val): return tuple(lst) use_64bit_index = _any([not core.is_constant_dim(d) or d >= (1 << 31) for d in arr.shape]) - index_dtype = int64 if use_64bit_index else int32 + index_dtype = dtype(int64 if use_64bit_index else int32) indices = lax.convert_element_type(indices, index_dtype) - bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1)) - indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis])) - arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis])) - axis_size = arr.shape[axis] arr_shape = replace(arr.shape, 1) idx_shape = indices.shape @@ -3471,26 +3471,38 @@ def replace(tup, val): start_index_map.append(i) collapsed_slice_dims.append(i) j += 1 - elif not core.symbolic_equal_dim(idx_shape[i], 1): + elif core.symbolic_equal_dim(idx_shape[i], 1): + # If idx_shape[i] == 1, we can just take the entirety of the arr's axis + # and avoid forming an iota index. + offset_dims.append(i) + slice_sizes.append(arr_shape[i]) + elif core.symbolic_equal_dim(arr_shape[i], 1): + # If the array dimension is 1 but the index dimension is not, we + # broadcast the array dimension to the index dimension by repeatedly + # gathering the first element. + gather_indices.append(zeros(gather_index_shape, dtype=index_dtype)) + slice_sizes.append(1) + start_index_map.append(i) + collapsed_slice_dims.append(i) + j += 1 + else: + # Otherwise, idx_shape[i] == arr_shape[i]. Use an iota index so + # corresponding elements of array and index are gathered. # TODO(mattjj): next line needs updating for dynamic shapes - iota = lax.iota(_dtype(indices), out_shape[i]) # type: ignore - iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,)) + iota = lax.broadcasted_iota(index_dtype, gather_index_shape, j) gather_indices.append(iota) slice_sizes.append(1) start_index_map.append(i) collapsed_slice_dims.append(i) j += 1 - else: - # If idx_shape[i] == 1, we can just take the entirety of the arr's axis - # and avoid forming an iota index. - offset_dims.append(i) - slice_sizes.append(arr_shape[i]) + gather_indices = lax.concatenate(gather_indices, dimension=j) dnums = lax.GatherDimensionNumbers( offset_dims=tuple(offset_dims), collapsed_slice_dims=tuple(collapsed_slice_dims), start_index_map=tuple(start_index_map)) + # TODO(phawkins): change the mode to "fill". return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes)) ### Indexing
JIT fails to optimise batched array indexing: huge performance difference Batched array indexing is the batched version of this kind operation: ```python x = np.array([[0, 2], [1, 4], [2, 3]]) y = 2 print(x[y]) # [2 3] ``` --- I implemented batched array indexing in two ways. However, after JIT compilation, the performance is very different: Method 1: `jax.vmap` ```python @jax.jit def f1(x, y): return jax.vmap(getitem)(x, y) ``` Method 2: `np.take_along_axis` ```python @jax.jit def f2(x, y): return np.take_along_axis(x, y[..., None, None], axis=1)[:, 0] ``` --- Test: I am testing on the TPU platform. ```python import jax import jax.numpy as np import jax.random as rand from operator import getitem key = rand.PRNGKey(42) x = rand.randint(key, (32, 128, 1024), 0, 100) y = rand.randint(key, (32,), 0, 128) @jax.jit def f1(x, y): return jax.vmap(getitem)(x, y) @jax.jit def f2(x, y): return np.take_along_axis(x, y[..., None, None], axis=1)[:, 0] a = f1(x, y) # shape: (32, 1024) b = f2(x, y) assert np.array_equal(a, b) import timeit print(timeit.timeit('f1(x, y).block_until_ready()', globals=globals(), number=5000)) print(timeit.timeit('f2(x, y).block_until_ready()', globals=globals(), number=5000)) ``` Output: ``` 0.7506967119406909 10.882259733974934 ``` This means that the `jax.vmap` method is much faster than the `np.take_along_axis` method after JIT compilation. --- And the HLO of the `np.take_along_axis` method is also much longer. Method 1: `jax.vmap` ```llvm HloModule xla_computation_f1.21, is_scheduled=true %fused_computation (param_0.2: s32[32,128,1024], param_1.2: s32[64]) -> s32[32,1024] { %param_0.2 = s32[32,128,1024]{2,1,0:T(8,128)} parameter(0) %param_1.2 = s32[64]{0:T(256)} parameter(1) %reshape.6 = s32[32,2]{1,0:T(8,128)} reshape(s32[64]{0:T(256)} %param_1.2) %transpose.5 = s32[32,2]{1,0:T(8,128)} transpose(s32[32,2]{1,0:T(8,128)} %reshape.6), dimensions={0,1} %gather.1 = s32[32,1024]{1,0:T(8,128)} gather(s32[32,128,1024]{2,1,0:T(8,128)} %param_0.2, s32[32,2]{1,0:T(8,128)} %transpose.5), offset_dims={1}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,1024}, indices_are_sorted=true, metadata={op_type="gather" op_name="xla_computation(f1)/jit(f1)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(1,), collapsed_slice_dims=(0, 1), start_index_map=(0, 1)) slice_sizes=(1, 1, 1024) unique_indices=True indices_are_sorted=True mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="/home/ayaka/main.py" source_line=18} %transpose.4 = s32[32,1024]{1,0:T(8,128)} transpose(s32[32,1024]{1,0:T(8,128)} %gather.1), dimensions={0,1} ROOT %reshape.5 = s32[32,1024]{1,0:T(8,128)} reshape(s32[32,1024]{1,0:T(8,128)} %transpose.4) } %fused_computation.2 (param_0.9: s32[32]) -> s32[32,1] { %param_0.9 = s32[32]{0:T(256)} parameter(0) %constant.9 = s32[]{:T(256)} constant(0), metadata={op_type="lt" op_name="xla_computation(f1)/jit(f1)/lt" source_file="/home/ayaka/main.py" source_line=18} %broadcast.4 = s32[32]{0:T(256)} broadcast(s32[]{:T(256)} %constant.9), dimensions={}, metadata={op_type="lt" op_name="xla_computation(f1)/jit(f1)/lt" source_file="/home/ayaka/main.py" source_line=18} %compare.3 = pred[32]{0:T(1024)(128)(4,1)} compare(s32[32]{0:T(256)} %param_0.9, s32[32]{0:T(256)} %broadcast.4), direction=LT, metadata={op_type="lt" op_name="xla_computation(f1)/jit(f1)/lt" source_file="/home/ayaka/main.py" source_line=18} %constant.6 = s32[]{:T(256)} constant(128), metadata={op_type="add" op_name="xla_computation(f1)/jit(f1)/add" source_file="/home/ayaka/main.py" source_line=18} %broadcast.3 = s32[32]{0:T(256)} broadcast(s32[]{:T(256)} %constant.6), dimensions={}, metadata={op_type="add" op_name="xla_computation(f1)/jit(f1)/add" source_file="/home/ayaka/main.py" source_line=18} %add.3 = s32[32]{0:T(256)} add(s32[32]{0:T(256)} %param_0.9, s32[32]{0:T(256)} %broadcast.3), metadata={op_type="add" op_name="xla_computation(f1)/jit(f1)/add" source_file="/home/ayaka/main.py" source_line=18} %select.3 = s32[32]{0:T(256)} select(pred[32]{0:T(1024)(128)(4,1)} %compare.3, s32[32]{0:T(256)} %add.3, s32[32]{0:T(256)} %param_0.9), metadata={op_type="select_n" op_name="xla_computation(f1)/jit(f1)/select_n" source_file="/home/ayaka/main.py" source_line=18} ROOT %bitcast.1 = s32[32,1]{0,1:T(2,128)} bitcast(s32[32]{0:T(256)} %select.3), metadata={op_type="broadcast_in_dim" op_name="xla_computation(f1)/jit(f1)/broadcast_in_dim[shape=(32, 1) broadcast_dimensions=(0,)]" source_file="/home/ayaka/main.py" source_line=18} } %fused_computation.3 (param_0.11: s32[32,1], param_1.15: s32[32,1]) -> s32[32,2] { %param_1.15 = s32[32,1]{0,1:T(2,128)} parameter(1) %constant.5 = s32[]{:T(256)} constant(-2147483648) %pad.3 = s32[32,2]{0,1:T(2,128)} pad(s32[32,1]{0,1:T(2,128)} %param_1.15, s32[]{:T(256)} %constant.5), padding=0_0x0_1, metadata={op_type="concatenate" op_name="xla_computation(f1)/jit(f1)/concatenate[dimension=1]" source_file="/home/ayaka/main.py" source_line=18} %param_0.11 = s32[32,1]{0,1:T(2,128)} parameter(0) %pad.2 = s32[32,2]{0,1:T(2,128)} pad(s32[32,1]{0,1:T(2,128)} %param_0.11, s32[]{:T(256)} %constant.5), padding=0_0x1_0, metadata={op_type="concatenate" op_name="xla_computation(f1)/jit(f1)/concatenate[dimension=1]" source_file="/home/ayaka/main.py" source_line=18} ROOT %maximum.1 = s32[32,2]{0,1:T(2,128)} maximum(s32[32,2]{0,1:T(2,128)} %pad.3, s32[32,2]{0,1:T(2,128)} %pad.2), metadata={op_type="concatenate" op_name="xla_computation(f1)/jit(f1)/concatenate[dimension=1]" source_file="/home/ayaka/main.py" source_line=18} } ENTRY %xla_computation_f1.21 (parameter.1: s32[32,128,1024], parameter.2: s32[32]) -> (s32[32,1024]) { %parameter.2 = s32[32]{0:T(256)} parameter(1) %parameter.1 = s32[32,128,1024]{2,1,0:T(8,128)} parameter(0) %fusion.2 = s32[32,1]{0,1:T(2,128)} fusion(s32[32]{0:T(256)} %parameter.2), kind=kLoop, calls=%fused_computation.2, metadata={op_type="broadcast_in_dim" op_name="xla_computation(f1)/jit(f1)/broadcast_in_dim[shape=(32, 1) broadcast_dimensions=(0,)]" source_file="/home/ayaka/main.py" source_line=18} %iota.0 = s32[32,1]{0,1:T(2,128)} iota(), iota_dimension=0, metadata={op_type="iota" op_name="xla_computation(f1)/jit(f1)/iota[dtype=int32 shape=(32, 1) dimension=0]" source_file="/home/ayaka/main.py" source_line=18} %fusion.3 = s32[32,2]{0,1:T(2,128)} fusion(s32[32,1]{0,1:T(2,128)} %fusion.2, s32[32,1]{0,1:T(2,128)} %iota.0), kind=kLoop, calls=%fused_computation.3, metadata={op_type="concatenate" op_name="xla_computation(f1)/jit(f1)/concatenate[dimension=1]" source_file="/home/ayaka/main.py" source_line=18} %copy = s32[32,2]{1,0:T(8,128)} copy(s32[32,2]{0,1:T(2,128)} %fusion.3), metadata={op_type="concatenate" op_name="xla_computation(f1)/jit(f1)/concatenate[dimension=1]" source_file="/home/ayaka/main.py" source_line=18} %reshape.1 = s32[64]{0:T(256)} reshape(s32[32,2]{1,0:T(8,128)} %copy) %fusion = s32[32,1024]{1,0:T(8,128)} fusion(s32[32,128,1024]{2,1,0:T(8,128)} %parameter.1, s32[64]{0:T(256)} %reshape.1), kind=kCustom, calls=%fused_computation, metadata={op_type="gather" op_name="xla_computation(f1)/jit(f1)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(1,), collapsed_slice_dims=(0, 1), start_index_map=(0, 1)) slice_sizes=(1, 1, 1024) unique_indices=True indices_are_sorted=True mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="/home/ayaka/main.py" source_line=18} ROOT %tuple.20 = (s32[32,1024]{1,0:T(8,128)}) tuple(s32[32,1024]{1,0:T(8,128)} %fusion) } ``` Method 2: `np.take_along_axis` ```llvm HloModule xla_computation_f2.43, is_scheduled=true %fused_computation (param_0.2: s32[32,128,1024], param_1.3: s32[131072]) -> s32[32768] { %param_0.2 = s32[32,128,1024]{2,1,0:T(8,128)} parameter(0) %param_1.3 = s32[131072]{0:T(1024)} parameter(1) %reshape.17 = s32[32,1,1024,4]{3,2,1,0:T(8,128)} reshape(s32[131072]{0:T(1024)} %param_1.3) %slice.1 = s32[32,1,1024,3]{3,2,1,0:T(8,128)} slice(s32[32,1,1024,4]{3,2,1,0:T(8,128)} %reshape.17), slice={[0:32], [0:1], [0:1024], [0:3]} %transpose.6 = s32[32,1,1024,3]{3,2,1,0:T(8,128)} transpose(s32[32,1,1024,3]{3,2,1,0:T(8,128)} %slice.1), dimensions={0,1,2,3} %gather.3 = s32[32,1,1024]{2,1,0:T(2,128)} gather(s32[32,128,1024]{2,1,0:T(8,128)} %param_0.2, s32[32,1,1024,3]{3,2,1,0:T(8,128)} %transpose.6), offset_dims={}, collapsed_slice_dims={0,1,2}, start_index_map={0,1,2}, index_vector_dim=3, slice_sizes={1,1,1}, metadata={op_type="gather" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0, 1, 2), start_index_map=(0, 1, 2)) slice_sizes=(1, 1, 1) unique_indices=False indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="/home/ayaka/main.py" source_line=22} %transpose.5 = s32[32,1,1024]{2,1,0:T(2,128)} transpose(s32[32,1,1024]{2,1,0:T(2,128)} %gather.3), dimensions={0,1,2} ROOT %reshape.16 = s32[32768]{0:T(1024)} reshape(s32[32,1,1024]{2,1,0:T(2,128)} %transpose.5) } %fused_computation.1 (param_0.4: s32[32,1,1024,1], param_1.14: s32[32,1,1024,1], param_2.7: s32[32,1,1024,1]) -> s32[32,1,1024,3] { %param_2.7 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} parameter(2) %constant.14 = s32[]{:T(256)} constant(-2147483648) %pad.6 = s32[32,1,1024,3]{2,0,3,1:T(8,128)} pad(s32[32,1,1024,1]{2,0,3,1:T(8,128)} %param_2.7, s32[]{:T(256)} %constant.14), padding=0_0x0_0x0_0x0_2, metadata={op_type="concatenate" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/concatenate[dimension=3]" source_file="/home/ayaka/main.py" source_line=22} %param_1.14 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} parameter(1) %pad.5 = s32[32,1,1024,3]{2,0,3,1:T(8,128)} pad(s32[32,1,1024,1]{2,0,3,1:T(8,128)} %param_1.14, s32[]{:T(256)} %constant.14), padding=0_0x0_0x0_0x1_1, metadata={op_type="concatenate" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/concatenate[dimension=3]" source_file="/home/ayaka/main.py" source_line=22} %maximum.3 = s32[32,1,1024,3]{2,0,3,1:T(8,128)} maximum(s32[32,1,1024,3]{2,0,3,1:T(8,128)} %pad.6, s32[32,1,1024,3]{2,0,3,1:T(8,128)} %pad.5), metadata={op_type="concatenate" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/concatenate[dimension=3]" source_file="/home/ayaka/main.py" source_line=22} %param_0.4 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} parameter(0) %pad.4 = s32[32,1,1024,3]{2,0,3,1:T(8,128)} pad(s32[32,1,1024,1]{2,0,3,1:T(8,128)} %param_0.4, s32[]{:T(256)} %constant.14), padding=0_0x0_0x0_0x2_0, metadata={op_type="concatenate" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/concatenate[dimension=3]" source_file="/home/ayaka/main.py" source_line=22} ROOT %maximum.2 = s32[32,1,1024,3]{2,0,3,1:T(8,128)} maximum(s32[32,1,1024,3]{2,0,3,1:T(8,128)} %maximum.3, s32[32,1,1024,3]{2,0,3,1:T(8,128)} %pad.4), metadata={op_type="concatenate" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/concatenate[dimension=3]" source_file="/home/ayaka/main.py" source_line=22} } %fused_computation.2 (param_0.6: s32[32], param_1.9: s32[32], param_2.6: pred[32]) -> s32[32,1,1024,1] { %param_2.6 = pred[32]{0:T(1024)(128)(4,1)} parameter(2) %broadcast.29 = pred[32,1,1024,1]{2,0,3,1:T(8,128)(4,1)} broadcast(pred[32]{0:T(1024)(128)(4,1)} %param_2.6), dimensions={0}, metadata={op_type="broadcast_in_dim" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/broadcast_in_dim[shape=(32, 1, 1024) broadcast_dimensions=(0, 1)]" source_file="/home/ayaka/main.py" source_line=22} %param_1.9 = s32[32]{0:T(256)} parameter(1) %broadcast.27 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} broadcast(s32[32]{0:T(256)} %param_1.9), dimensions={0}, metadata={op_type="broadcast_in_dim" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/broadcast_in_dim[shape=(32, 1, 1024) broadcast_dimensions=(0, 1)]" source_file="/home/ayaka/main.py" source_line=22} %param_0.6 = s32[32]{0:T(256)} parameter(0) %broadcast.26 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} broadcast(s32[32]{0:T(256)} %param_0.6), dimensions={0}, metadata={op_type="broadcast_in_dim" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/broadcast_in_dim[shape=(32, 1, 1024) broadcast_dimensions=(0, 1)]" source_file="/home/ayaka/main.py" source_line=22} ROOT %select.4 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} select(pred[32,1,1024,1]{2,0,3,1:T(8,128)(4,1)} %broadcast.29, s32[32,1,1024,1]{2,0,3,1:T(8,128)} %broadcast.27, s32[32,1,1024,1]{2,0,3,1:T(8,128)} %broadcast.26), metadata={op_type="select_n" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/select_n" source_file="/home/ayaka/main.py" source_line=22} } %fused_computation.3 (param_0.7: s32[32]) -> s32[32] { %param_0.7 = s32[32]{0:T(256)} parameter(0) %constant.16 = s32[]{:T(256)} constant(128), metadata={op_type="add" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/add" source_file="/home/ayaka/main.py" source_line=22} %broadcast.30 = s32[32]{0:T(256)} broadcast(s32[]{:T(256)} %constant.16), dimensions={}, metadata={op_type="add" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/add" source_file="/home/ayaka/main.py" source_line=22} ROOT %add.4 = s32[32]{0:T(256)} add(s32[32]{0:T(256)} %param_0.7, s32[32]{0:T(256)} %broadcast.30), metadata={op_type="add" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/add" source_file="/home/ayaka/main.py" source_line=22} } %fused_computation.4 (param_0.8: s32[32]) -> pred[32] { %param_0.8 = s32[32]{0:T(256)} parameter(0) %constant.17 = s32[]{:T(256)} constant(0), metadata={op_type="lt" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/lt" source_file="/home/ayaka/main.py" source_line=22} %broadcast.31 = s32[32]{0:T(256)} broadcast(s32[]{:T(256)} %constant.17), dimensions={}, metadata={op_type="lt" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/lt" source_file="/home/ayaka/main.py" source_line=22} ROOT %compare.4 = pred[32]{0:T(1024)(128)(4,1)} compare(s32[32]{0:T(256)} %param_0.8, s32[32]{0:T(256)} %broadcast.31), direction=LT, metadata={op_type="lt" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/lt" source_file="/home/ayaka/main.py" source_line=22} } ENTRY %xla_computation_f2.43 (parameter.1: s32[32,128,1024], parameter.2: s32[32]) -> (s32[32,1024]) { %constant.2 = s32[]{:T(256)} constant(0), metadata={op_type="lt" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/lt" source_file="/home/ayaka/main.py" source_line=22} %parameter.2 = s32[32]{0:T(256)} parameter(1), metadata={op_type="broadcast_in_dim" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/broadcast_in_dim[shape=(32, 1, 1024) broadcast_dimensions=(0, 1)]" source_file="/home/ayaka/main.py" source_line=22} %parameter.1 = s32[32,128,1024]{2,1,0:T(8,128)} parameter(0) %fusion.3 = s32[32]{0:T(256)} fusion(s32[32]{0:T(256)} %parameter.2), kind=kLoop, calls=%fused_computation.3, metadata={op_type="add" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/add" source_file="/home/ayaka/main.py" source_line=22} %fusion.4 = pred[32]{0:T(1024)(128)(4,1)} fusion(s32[32]{0:T(256)} %parameter.2), kind=kLoop, calls=%fused_computation.4, metadata={op_type="lt" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/lt" source_file="/home/ayaka/main.py" source_line=22} %fusion.2 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} fusion(s32[32]{0:T(256)} %parameter.2, s32[32]{0:T(256)} %fusion.3, pred[32]{0:T(1024)(128)(4,1)} %fusion.4), kind=kLoop, calls=%fused_computation.2, metadata={op_type="select_n" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/select_n" source_file="/home/ayaka/main.py" source_line=22} %iota.5 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} iota(), iota_dimension=2, metadata={op_type="broadcast_in_dim" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/broadcast_in_dim[shape=(32, 1, 1024, 1) broadcast_dimensions=(2,)]" source_file="/home/ayaka/main.py" source_line=22} %iota.4 = s32[32,1,1024,1]{2,0,3,1:T(8,128)} iota(), iota_dimension=0, metadata={op_type="broadcast_in_dim" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/broadcast_in_dim[shape=(32, 1, 1024, 1) broadcast_dimensions=(0,)]" source_file="/home/ayaka/main.py" source_line=22} %fusion.1 = s32[32,1,1024,3]{2,0,3,1:T(8,128)} fusion(s32[32,1,1024,1]{2,0,3,1:T(8,128)} %iota.5, s32[32,1,1024,1]{2,0,3,1:T(8,128)} %fusion.2, s32[32,1,1024,1]{2,0,3,1:T(8,128)} %iota.4), kind=kLoop, calls=%fused_computation.1, metadata={op_type="concatenate" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/concatenate[dimension=3]" source_file="/home/ayaka/main.py" source_line=22} %copy.1 = s32[32,1,1024,3]{3,2,0,1:T(8,128)} copy(s32[32,1,1024,3]{2,0,3,1:T(8,128)} %fusion.1) %pad.clone = s32[32,1,1024,4]{3,2,0,1:T(8,128)} pad(s32[32,1,1024,3]{3,2,0,1:T(8,128)} %copy.1, s32[]{:T(256)} %constant.2), padding=0_0x0_0x0_0x0_1 %reshape.12 = s32[131072]{0:T(1024)} reshape(s32[32,1,1024,4]{3,2,0,1:T(8,128)} %pad.clone) %fusion = s32[32768]{0:T(1024)} fusion(s32[32,128,1024]{2,1,0:T(8,128)} %parameter.1, s32[131072]{0:T(1024)} %reshape.12), kind=kCustom, calls=%fused_computation, metadata={op_type="gather" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0, 1, 2), start_index_map=(0, 1, 2)) slice_sizes=(1, 1, 1) unique_indices=False indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="/home/ayaka/main.py" source_line=22} %reshape.7 = s32[32,1024]{1,0:T(8,128)} reshape(s32[32768]{0:T(1024)} %fusion), metadata={op_type="gather" op_name="xla_computation(f2)/jit(f2)/jit(take_along_axis)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0, 1, 2), start_index_map=(0, 1, 2)) slice_sizes=(1, 1, 1) unique_indices=False indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="/home/ayaka/main.py" source_line=22} ROOT %tuple.42 = (s32[32,1024]{1,0:T(8,128)}) tuple(s32[32,1024]{1,0:T(8,128)} %reshape.7) } ```
Thanks for the report. I agree with your conclusions. I filed a bug with the TPU compiler team (Google bug b/229234664). The plot thickens. On reflection this is sort of an XLA bug and sort of not an XLA bug. It's an XLA bug in the sense that XLA perhaps should be able to optimize the HLO we are generating, but JAX perhaps could generate more optimal HLO. In particular, we broadcast the input shapes in the implementation of `jnp.take_along_axis` and that does bad things to performance. Maybe we can avoid that broadcasting on the JAX side. However, we added that broadcasting to fix #[1521](https://github.com/google/jax/issues/1521). So to remove the broadcasting I'll need to remember what went wrong for that issue.
2022-04-14T19:59:54
google/jax
10,308
google__jax-10308
[ "10300" ]
0c1021ad4b8200e946275e0c97a45d64a8f7f208
diff --git a/jax/_src/nn/functions.py b/jax/_src/nn/functions.py --- a/jax/_src/nn/functions.py +++ b/jax/_src/nn/functions.py @@ -379,7 +379,7 @@ def _one_hot(x: Array, num_classes: int, *, f"but {num_classes} != {axis_size}") from None axis_idx = lax.axis_index(axis) return jnp.asarray(x == axis_idx, dtype=dtype) - axis = operator.index(axis) + axis = operator.index(axis) # type: ignore[arg-type] lhs = lax.expand_dims(x, (axis,)) rhs_shape = [1] * x.ndim rhs_shape.insert(output_pos_axis, num_classes) diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -157,7 +157,7 @@ def sharding_to_proto(sharding: SpatialSharding): proto.type = xc.OpSharding.Type.REPLICATED else: proto.type = xc.OpSharding.Type.OTHER - proto.tile_assignment_dimensions = list(sharding) + proto.tile_assignment_dimensions = list(sharding) # type: ignore proto.tile_assignment_devices = list(range(np.product(sharding))) # type: ignore return proto
pre-commit mypy check seems outdated https://github.com/google/jax/blob/375777f43c919dd4133c4b438c31f920e0dad6a6/.pre-commit-config.yaml#L16-L21 It seems that it use `jaxlib==0.1.74`. In addition, `mypy` version can be updated to `v0.942`.
And I can't pass mypy typecheck locally on main branch ``` jax/_src/lib/__init__.py:111: error: Skipping analyzing "jaxlib.cpu_feature_guard": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:111: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports jax/_src/lib/__init__.py:124: error: Skipping analyzing "jaxlib.cusolver": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:129: error: Skipping analyzing "jaxlib.hipsolver": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:134: error: Skipping analyzing "jaxlib.cusparse": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:139: error: Skipping analyzing "jaxlib.hipsparse": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:147: error: Skipping analyzing "jaxlib.cuda_prng": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:152: error: Skipping analyzing "jaxlib.hip_prng": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:157: error: Skipping analyzing "jaxlib.cuda_linalg": module is installed, but missing library stubs or py.typed marker [import] jax/_src/lib/__init__.py:162: error: Skipping analyzing "jaxlib.hip_linalg": module is installed, but missing library stubs or py.typed marker [import] jax/interpreters/xla.py:160: error: Argument 1 to "list" has incompatible type "Union[Tuple[int, ...], Tuple[Optional[Tuple[int, ...]], ...]]"; expected "Iterable[int]" [arg-type] jax/_src/nn/functions.py:382: error: Argument 1 to "index" has incompatible type "Union[int, Hashable]"; expected "SupportsIndex" [arg-type] jax/experimental/jax2tf/converters_eval/examples.py:19: error: Cannot find implementation or library stub for module named "flax" [import] jax/experimental/gda_serialization/serialization.py:26: error: Cannot find implementation or library stub for module named "tensorstore" [import] jax/tools/jax_to_ir.py:86: error: Cannot find implementation or library stub for module named "tensorflow" [import] jax/experimental/jax2tf/converters_eval/converters.py:25: error: Cannot find implementation or library stub for module named "tensorflow" [import] jax/experimental/jax2tf/converters_eval/converters.py:26: error: Cannot find implementation or library stub for module named "tensorflowjs.converters" [import] Found 16 errors in 7 files (checked 257 source files) ```
2022-04-15T15:55:41
google/jax
10,341
google__jax-10341
[ "10333" ]
a48752a5786e5debda219cfaccb8aa694266e6d5
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -3344,7 +3344,11 @@ def unpackbits(a, axis: Optional[int] = None, count=None, bitorder='big'): return swapaxes(unpacked, axis, -1) -@_wraps(np.take, skip_params=['out']) +@_wraps(np.take, skip_params=['out'], lax_description="""\ +In the JAX version, the ``mode`` argument defaults to a special version of ``"clip"`` +that handles negative indices. See :attr:`jax.numpy.ndarray.at` for more discussion +of out-of-bounds indexing in JAX. +""") def take(a, indices, axis: Optional[int] = None, out=None, mode=None): return _take(a, indices, None if axis is None else operator.index(axis), out, mode)
`jnp.take` out of bound documentation incorrect Please: - [x] Check for duplicate issues. - [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this: ``` jnp.take( jnp.arange(5), jnp.arange(5 * 3), axis=0 ) # DeviceArray([0, 1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4], dtype=int32) jnp.take( jnp.arange(5), jnp.arange(5 * 3), axis=0, mode="raise", ) # NotImplementedError ``` According to the [documentation](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.take.html), the default out-of-bound behavior is "raise". However, this is not the case in practice
Thanks for the report - I'll work on a documentation fix.
2022-04-18T17:13:06
google/jax
10,367
google__jax-10367
[ "10366" ]
7008b3213291e672015ba3c2caad7583033c04df
diff --git a/jax/_src/custom_derivatives.py b/jax/_src/custom_derivatives.py --- a/jax/_src/custom_derivatives.py +++ b/jax/_src/custom_derivatives.py @@ -22,8 +22,9 @@ from jax import core from jax import linear_util as lu from jax.custom_transpose import custom_transpose -from jax.tree_util import (tree_flatten, tree_unflatten, tree_map, treedef_is_leaf, - treedef_tuple, register_pytree_node_class) +from jax.tree_util import (tree_flatten, tree_unflatten, tree_map, + treedef_is_leaf, treedef_tuple, + register_pytree_node_class, tree_leaves) from jax._src import custom_api_util from jax._src import dtypes from jax._src.util import cache, safe_zip, safe_map, split_list, Unhashable @@ -564,21 +565,17 @@ def __call__(self, *args: Any, **kwargs: Any) -> ReturnValue: # pytype: disable out_tree = aux if fst else aux[0] return tree_unflatten(out_tree, out_flat) -@partial(partial, tree_map) def _check_for_tracers(x): - if isinstance(x, core.Tracer): - msg = ("Found a JAX Tracer object passed as an argument to a custom_vjp " - "function in a position indicated by nondiff_argnums as " - "non-differentiable. Tracers cannot be passed as non-differentiable " - "arguments to custom_vjp functions; instead, nondiff_argnums should " - "only be used for arguments that can't be or contain JAX tracers, " - "e.g. function-valued arguments. In particular, array-valued " - "arguments should typically not be indicated as nondiff_argnums. " - "\n\n" - "This behavior recently changed in JAX. " - "See https://github.com/google/jax/blob/main/docs/custom_vjp_update.md " - "for more information.") - raise UnexpectedTracerError(msg) + for leaf in tree_leaves(x): + if isinstance(x, core.Tracer): + msg = ("Found a JAX Tracer object passed as an argument to a custom_vjp " + "function in a position indicated by nondiff_argnums as " + "non-differentiable. Tracers cannot be passed as non-differentiable " + "arguments to custom_vjp functions; instead, nondiff_argnums should " + "only be used for arguments that can't be or contain JAX tracers, " + "e.g. function-valued arguments. In particular, array-valued " + "arguments should typically not be indicated as nondiff_argnums.") + raise UnexpectedTracerError(msg) @lu.transformation_with_aux def _flatten_fwd(in_tree, *args): @@ -610,11 +607,11 @@ def _flatten_bwd(in_tree, in_avals, out_trees, *args): zero = object() # non-pytree sentinel to replace Nones in py_cts_in dummy = tree_unflatten(in_tree, [object()] * in_tree.num_leaves) cts_in_flat = [] - append_cts = lambda x, d: cts_in_flat.extend([x] * len(tree_flatten(d)[0])) + append = lambda x, d: cts_in_flat.extend([x] * len(tree_flatten(d)[0])) or x try: if not isinstance(py_cts_in, tuple): raise ValueError - tree_map(append_cts, + tree_map(append, tuple(zero if ct is None else ct for ct in py_cts_in), dummy) except ValueError: _, in_tree2 = tree_flatten(py_cts_in)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -6442,6 +6442,26 @@ def g_rev(res, w_bar): jtu.check_grads(h, (jnp.float32(3.14),), order=1, modes=['rev']) + def test_pytrees_not_required_to_contain_nones(self): + class A(list): + pass + + def unflatten(_, children): + assert children[0] is not None + return A(children) + + tree_util.register_pytree_node(A, lambda x: (x, None), unflatten) + + @jax.custom_vjp + def f(x): + return x[0] + def f_fwd(x): + return x[0], None + def f_bwd(_, g): + return A([g]), + f.defvjp(f_fwd, f_bwd) + + jax.grad(f)(A([1.])) # doesn't crash def transpose_unary(f, x_example): def transposed(y):
`tree_flatten` gets `None` when used in `defvjp` Please: - [x] Check for duplicate issues. - [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this: I wanted to define a `custom_vjp` for a function that takes a string to indicate what operations are to be conducted. Since `str` is not a validate type for JAX whereas any objects that can be flatten and unflatten as a `pytree` is, I thought of this hacky way to define my own string object: ```python import jax import jax.numpy as jnp from functools import partial OP_TO_IDX, IDX_TO_OP = {"add": 0, "mul": 1}, {0: "add", 1: "mul"} @jax.tree_util.register_pytree_node_class class OpStr(object): def __init__(self, op): assert op in OP_TO_IDX, "can only be one of the ops" self.op = op def __eq__(self, other): return self.op == other def __hash__(self): return hash(self.op) def tree_flatten(self): return ([OP_TO_IDX[self.op]]), None @classmethod def tree_unflatten(cls, aux_data, children): idx = int(children[0]) return cls(op=IDX_TO_OP[idx]) @partial(jax.custom_vjp, nondiff_argnums=(2,)) def op(x, y, op): if op == "add": z = x + y if op == "mul": z = x * y return z def op_fwd(x, y, op): cache = (x, y, op) if op == "add": z = x + y if op == "mul": z = x * y return z, cache def op_bwd(cache, dz): x, y, op = cache if op == "add": dz_dx = dz_dy = 1.0 if op == "mul": dz_dx, dz_dy = y, x return dz_dx, dz_dy op.defvjp(op_fwd, op_bwd) grad_op = jax.grad(op) grad_op(1.0, 1.0, OpStr("add")) ``` - [x] If applicable, include full error messages/tracebacks. But this gives me ``` Traceback (most recent call last): File "/Users/wangy1/Documents/GitHub/dgl/tests/jax/test_op_str.py", line 47, in <module> grad_op(1.0, 1.0, OpStr("add")) File "/Users/wangy1/Documents/GitHub/dgl/tests/jax/test_op_str.py", line 24, in tree_unflatten idx = int(children[0]) TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType' ``` Which is not expected since the flattened tree shouldn't be `None`.
It sounds like you're running into the issues discussed here: https://jax.readthedocs.io/en/latest/pytrees.html#custom-pytrees-and-initialization Do the recommendations there answer your question? Indeed, to underscore Jake's point, pytrees are technically required to be able to contain any Python object, just like Python tuples can. That is, pytrees are defined in terms of isomorphisms to tuples. Some bits of JAX internals code rely on that property. In this case, we're tree-mapping an assertion function which returns None (which is how Python represents 'no return value'); there's still a result pytree being built, with Nones substituted in place for what were previously leaves. But since this custom pytree node class doesn't handle containing nones, it breaks. That said, while this bit of JAX internals code is relying on something that technically is part of the pytree contract, usually it's possible for us to rewrite the code not to demand this property hold true (i.e. not to require a custom pytree type to contain Nones). I think we can do that in this case too...
2022-04-19T19:57:08
google/jax
10,495
google__jax-10495
[ "10485" ]
b81f57b88cdced5cb59cd6c60bcaafc01f1190ac
diff --git a/jax/_src/lax/linalg.py b/jax/_src/lax/linalg.py --- a/jax/_src/lax/linalg.py +++ b/jax/_src/lax/linalg.py @@ -1150,18 +1150,22 @@ def qr_impl(operand, full_matrices): return q, r def _qr_translation_rule(ctx, avals_in, avals_out, operand, *, full_matrices): + operand_aval, = avals_in + shape = operand_aval.shape + m, n = shape[-2:] + if m == 0 or n == 0: + return [_eye_like_xla(ctx.builder, avals_out[0]), + _zeros_like_xla(ctx.builder, avals_out[1])] return xops.QR(operand, full_matrices) def qr_abstract_eval(operand, full_matrices): if isinstance(operand, ShapedArray): if operand.ndim < 2: raise ValueError("Argument to QR decomposition must have ndims >= 2") - batch_dims = operand.shape[:-2] - m = operand.shape[-2] - n = operand.shape[-1] + *batch_dims, m, n = operand.shape k = m if full_matrices else min(m, n) - q = operand.update(shape=batch_dims + (m, k)) - r = operand.update(shape=batch_dims + (k, n)) + q = operand.update(shape=(*batch_dims, m, k)) + r = operand.update(shape=(*batch_dims, k, n)) else: q = operand r = operand @@ -1193,6 +1197,13 @@ def qr_batching_rule(batched_args, batch_dims, full_matrices): x = batching.moveaxis(x, bd, 0) return qr_p.bind(x, full_matrices=full_matrices), (0, 0) +def _empty_qr(a, *, full_matrices): + *batch_shape, m, n = a.shape + k = m if full_matrices else min(m, n) + q = jnp.broadcast_to(jnp.eye(m, k, dtype=a.dtype), (*batch_shape, m, k)) + r = jnp.empty((*batch_shape, k, n), dtype=a.dtype) + return [q, r] + def _qr_cpu_gpu_lowering(geqrf_impl, orgqr_impl, ctx, operand, *, full_matrices): operand_aval, = ctx.avals_in @@ -1200,6 +1211,11 @@ def _qr_cpu_gpu_lowering(geqrf_impl, orgqr_impl, ctx, operand, *, dims = operand_aval.shape m, n = dims[-2:] batch_dims = dims[:-2] + + if m == 0 or n == 0: + return mlir.lower_fun(_empty_qr, multiple_results=True)( + ctx, operand, full_matrices=full_matrices) + r, tau, info_geqrf = geqrf_impl(operand_aval.dtype, operand) if m < n: q = mhlo.SliceOp(r,
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -654,22 +654,7 @@ def testJspSVDBasic(self): {"testcase_name": "_shape={}_mode={}".format( jtu.format_shape_dtype_string(shape, dtype), mode), "shape": shape, "dtype": dtype, "mode": mode} - for shape in [(3, 4), (3, 3), (4, 3)] - for dtype in [np.float32] - for mode in ["full", "r", "economic"])) - def testScipyQrModes(self, shape, dtype, mode): - rng = jtu.rand_default(self.rng()) - jsp_func = partial(jax.scipy.linalg.qr, mode=mode) - sp_func = partial(scipy.linalg.qr, mode=mode) - args_maker = lambda: [rng(shape, dtype)] - self._CheckAgainstNumpy(sp_func, jsp_func, args_maker, rtol=1E-5, atol=1E-5) - self._CompileAndCheck(jsp_func, args_maker) - - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_shape={}_mode={}".format( - jtu.format_shape_dtype_string(shape, dtype), mode), - "shape": shape, "dtype": dtype, "mode": mode} - for shape in [(3, 4), (3, 3), (4, 3)] + for shape in [(0, 2), (2, 0), (3, 4), (3, 3), (4, 3)] for dtype in [np.float32] for mode in ["reduced", "r", "full", "complete"])) def testNumpyQrModes(self, shape, dtype, mode): @@ -686,7 +671,7 @@ def testNumpyQrModes(self, shape, dtype, mode): {"testcase_name": "_shape={}_fullmatrices={}".format( jtu.format_shape_dtype_string(shape, dtype), full_matrices), "shape": shape, "dtype": dtype, "full_matrices": full_matrices} - for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)] + for shape in [(0, 0), (2, 0), (0, 2), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)] for dtype in float_types + complex_types for full_matrices in [False, True])) def testQr(self, shape, dtype, full_matrices): @@ -713,11 +698,12 @@ def testQr(self, shape, dtype, full_matrices): # Norm, adjusted for dimension and type. def norm(x): n = np.linalg.norm(x, axis=(-2, -1)) - return n / (max_rank * jnp.finfo(dtype).eps) + return n / (max(1, max_rank) * jnp.finfo(dtype).eps) def compare_orthogonal(q1, q2): # Q is unique up to sign, so normalize the sign first. - sum_of_ratios = np.sum(np.divide(q1, q2), axis=-2, keepdims=True) + ratio = np.divide(np.where(q2 == 0, 0, q1), np.where(q2 == 0, 1, q2)) + sum_of_ratios = ratio.sum(axis=-2, keepdims=True) phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios)) q1 *= phases self.assertTrue(np.all(norm(q1 - q2) < 30)) @@ -1334,6 +1320,22 @@ def testExpm(self, n, dtype): self._CheckAgainstNumpy(osp_fun, jsp_fun_triu, args_maker_triu) self._CompileAndCheck(jsp_fun_triu, args_maker_triu) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_mode={}".format( + jtu.format_shape_dtype_string(shape, dtype), mode), + "shape": shape, "dtype": dtype, "mode": mode} + # Skip empty shapes because scipy fails: https://github.com/scipy/scipy/issues/1532 + for shape in [(3, 4), (3, 3), (4, 3)] + for dtype in [np.float32] + for mode in ["full", "r", "economic"])) + def testScipyQrModes(self, shape, dtype, mode): + rng = jtu.rand_default(self.rng()) + jsp_func = partial(jax.scipy.linalg.qr, mode=mode) + sp_func = partial(scipy.linalg.qr, mode=mode) + args_maker = lambda: [rng(shape, dtype)] + self._CheckAgainstNumpy(sp_func, jsp_func, args_maker, rtol=1E-5, atol=1E-5) + self._CompileAndCheck(jsp_func, args_maker) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
lax.linalg.qr fails for empty matrices It should instead return empty `q` and `r` matrices. This arose in #10458 ```python In [1]: import jax.numpy as jnp In [2]: from jax import lax In [3]: lax.linalg.qr(jnp.empty((0, 0))) WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.) ** On entry to SGEQRF parameter number 4 had an illegal value ** On entry to SORGQR parameter number 5 had an illegal value --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-4-ed0ed6237394> in <module> ----> 1 lax.linalg.qr(jnp.empty((0, 0))) <snip> ~/github/google/jax/jax/_src/dispatch.py in backend_compile(backend, built_c, options) 646 # we use a separate function call to ensure that XLA compilation appears 647 # separately in Python profiling results --> 648 return backend.compile(built_c, compile_options=options) 649 650 # TODO(phawkins): update users. RuntimeError: INVALID_ARGUMENT: shape has invalid element type: ```
It looks like the problem is the LDA argument to GEQRF is being set to `M`, when it must be of `size >= max(1, M)`. So when `M` is zero, the argument is invalid. I'm trying to figure out exactly where this is set... Maybe @hawkinsp has ideas? I'd probably just avoid calling the LAPACK implementation if `m == 0`, if it is unhappy when that happens. We do something similar for SVD already: https://github.com/google/jax/blob/cdd11670959b930e01d23199fadb32b183f03ca5/jax/_src/lax/linalg.py#L1392 Thanks for the tip - I'll work on a fix.
2022-04-29T17:02:56
google/jax
10,500
google__jax-10500
[ "10491" ]
dc6282c424e7becd5ed0cdb7ee9bcb1e0edf0c72
diff --git a/jax/_src/dtypes.py b/jax/_src/dtypes.py --- a/jax/_src/dtypes.py +++ b/jax/_src/dtypes.py @@ -375,11 +375,20 @@ def _lattice_result_type(*args): result_type = _least_upper_bound(*{_jax_type(d, w) for d, w in zip(dtypes, weak_types)}) return dtype(result_type), any(result_type is t for t in _weak_types) -def result_type(*args): - """Convenience function to apply JAX argument dtype promotion.""" +def result_type(*args, return_weak_type_flag=False): + """Convenience function to apply JAX argument dtype promotion. + + Args: + return_weak_type_flag : if True, then return a ``(dtype, weak_type)`` tuple. + If False, just return `dtype` + + Returns: + dtype or (dtype, weak_type) depending on the value of the ``return_weak_type`` argument. + """ if len(args) == 0: raise ValueError("at least one array or dtype is required") dtype, weak_type = _lattice_result_type(*(float_ if arg is None else arg for arg in args)) if weak_type: dtype = _default_types['f' if dtype == _bfloat16_dtype else dtype.kind] - return canonicalize_dtype(dtype) + dtype = canonicalize_dtype(dtype) + return (dtype, weak_type) if return_weak_type_flag else dtype
diff --git a/tests/dtypes_test.py b/tests/dtypes_test.py --- a/tests/dtypes_test.py +++ b/tests/dtypes_test.py @@ -265,6 +265,15 @@ def testResultTypeNone(self): # This matches the behavior of np.result_type(None) => np.float_ self.assertEqual(dtypes.result_type(None), dtypes.canonicalize_dtype(dtypes.float_)) + def testResultTypeWeakFlag(self): + float_ = dtypes.canonicalize_dtype(dtypes.float_) + x_weak = jnp.array(1.) + x_strong = x_weak.astype(float_) + self.assertEqual(dtypes.result_type(x_weak), float_) + self.assertEqual(dtypes.result_type(x_weak, return_weak_type_flag=True), (float_, True)) + self.assertEqual(dtypes.result_type(x_strong), float_) + self.assertEqual(dtypes.result_type(x_strong, return_weak_type_flag=True), (float_, False)) + @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype.*") def testObservedPromotionTable(self):
jnp.promote_dtype does not preserve weak types Assuming the type promotion lattice presented in the [great documentation](https://jax.readthedocs.io/en/latest/type_promotion.html) by @jakevdp, I'd assume that the promotion of two python-native `float*` would be a new `float*`. This is confirmed by the table in the same page. However, ``` >>> jnp.promote_types(float, float) dtype('float64') # I expected float* ``` I do understand that there might be internal reasons why this is useful. I guess that internally jax uses this API to do some type promotion before calling lax primitives. However, if someone tries to use this API to build objects that respect the same type-promotion semantics as Jax's arrays (like I need to do in NetKet for some lazy objects representing sums of matrices), I cannot use `jnp.promote_types` because it discards the `weak` attribute and promotes everything to non-weak types. I acknowledge it's not a mainstream use-case, but it would be useful if Jax exposed an API that performs the same type promotion as jax does internally.
Thanks for the question - this is due to an implementation detail. `jnp.promote_types` is JAX's implementation of numpy's `np.promote_types`, which returns a dtype. A dtype cannot be weak – `weak_type` is a property of JAX arrays, not of dtypes, and so there's no way for `jnp.promote_types` to return a weak type. It's not allowed by the API, which is defined by numpy, not by JAX. Nevertheless, in binary operations between weak types, you'll see that weakness is preserved:: ```python >>> jnp.add(1.0, 1.0) DeviceArray(2., dtype=float32, weak_type=True) ``` Ah, I see. That's unfortunate. I'm essentially trying to implement something like: ```python @pytree class MatrixSum: def __init__(self, mat1: ArrayLike, mat2: ArrayLike, dtype=None ): if dtype is None: dtype = jnp.promote_dtypes(mat1, mat2) self._dtype = dtype self._mat1 = jnp.asarray(mat1, dtype=dtype) self._mat2 = jnp.asarray(mat2, dtype=dtype) @property def dtype(self): return self._dtype def compute_something(self) -> Array[self.dtype]: # return a Jax array that might be weakly typed if mat1 and mat2 where. ``` And I wish this behaved (at least within the boundaries of my API) as a jax array, propagating weak and strong dtypes of the input `mat1` and `mat2`. But I see that it's impossible to do that within the realms of only using `dtype`. I'll have to find a new solution... There are APIs within `jax.dtypes` that return `dtype, weak_type` pairs for type promotion. That's essentially what JAX uses to implement binary type promotion. thanks... I think what I'd need would be to use `jax._src.dtypes._lattice_result_type` (which is not exported in `jax.dtypes`) and to be able to declare my types as weakly typed, for example by adding a property `MyType.weak_type` to an object Right now, if an object has a `dtype` property, numpy correctly recognises it. That's not the same for jax. ```python >>> class Test: ... def __init__(self, dtype, weak): ... self.dtype = dtype ... self.weak_type = weak ... >>> t = Test(np.float32, True) >>> np.result_type(t, np.float64) dtype('float64') >>> jnp.result_type(t, np.float64) dtype('float64') >>> jax._src.dtypes.is_weakly_typed(t) False ``` you can trick jax by adding an `MyType.aval.weak_type` field but that's a bit convoluted ```python >>> t.aval = t >>> jax._src.dtypes.is_weakly_typed(t) True ``` In short, what I am saying is that it would be useful if jax stabilised the `weak_dtype` interface by - recognising a field `.weak_type` in objects much like it recognises `.dtype` to the extent of type promotion - exposeed `jax._src.dtypes._lattice_result_type` as a public API, or a similar function that returns both the resulting dtype and the `weak_dtype` boolean flag. Thanks, I agree we should have some pulic API for dtype promotion that preserves weak types. What would you think about adding keyword to `jax.dtypes.promote_types` that would allow it to return a `(dtype, weak_type)` pair? Regarding the second request ("recognizing a field `.weak_type` in objects") can you say more? I'm not sure I understand exactly what you have in mind. What functions should recognize this attribute, and in what context? >Thanks, I agree we should have some pulic API for dtype promotion that preserves weak types. What would you think about adding keyword to `jax.dtypes.promote_types` that would allow it to return a (dtype, weak_type) pair? I would very much like that, yes. > Regarding the second request ("recognizing a field .weak_type in objects") can you say more? I'm not sure I understand exactly what you have in mind. What functions should recognize this attribute, and in what context? Right now jax and numpy `(j)np.result_type` works with arbitrary types, as long as they expose a `CustomType.dtype` attribute (see the example above, with the custom type `Test`. I would propose that `jax.dtypes.promote_types` should define a similar protocol, where it will check for the attribute `CustomType.weak_type` to determine if an object has a weak type, as well as the `.dtype` attribute that it already checks as part of numpy api. Note that what I propose would be necessary to make `jax.dtypes.promote_types` extensible and compatible with user-defined types. For example: ```python >>> class Test: ... def __init__(self, dtype, weak): ... self.dtype = dtype ... self.weak_type = weak ... >>> t = Test(np.float64, True, return_weak_flag=True) >>> jax.dtypes.promote_types(t, jnp.array(3.0, dtype=jnp.float32)) (jnp.float32, False) >>> jax.dtypes.promote_types(t, jnp.array(3.0), return_weak_flag=True) (jnp.float64, True) ``` Hmm, at first glance, I'm not sure I like the idea of cementing a `weak_type` attribute into the public JAX API. While it's true that jax arrays have a `weak_type` attribute at the moment, that is an implementation detail rather than something that is intended to be a load-bearing API. For example, we've talked about moving JAX to its own dtype system where weak dtypes are actual dtypes, and the `weak_type` attribute can go away. A few other options that might work and be more future-proof: - we could use something like the existing `__jax_array__` method within type promotion of custom objects. Then you could have it return an appropriate jax array - we could let custom objects export an `aval` attribute that will return a jax abstract value, and pre-empt the `dtype` attribute - we could provide some sort of hook that would allow `dtypes.is_weakly_typed` to work correctly with custom objects; perhaps delegating to a method like `_is_weakly_typed` What do you think? >we could provide some sort of hook that would allow dtypes.is_weakly_typed to work correctly with custom objects; perhaps delegating to a method like _is_weakly_typed I think that's the best option. You can add some API that can be used to customise how `_is_weakly_typed` behaves with custom types for now. Eventually you can even deprecate it if you move to tax-specific dtypes (which I agree would be the most elegant solution). One more comment here: in my opinion this is a misuse of the `promote_dtypes` API. The docstring specifies that the inputs should be a "dtype or dtype specifier". The fact that this happens to accept arbitrary objects that have a `dtype` attribute is unintended, I think, and you probably shouldn't rely on that. Perhaps `result_type` would be a better API to use in this case, and it would make more sense I think to add a `return_weak_flag` there, because the inputs can be arbitrary objects rather than more narrow "dtype or dtype specifier" (and it already calls `is_weakly_typed` on the inputs, so that would be a better place to hook-in any customization of this behavior). I do agree with you. Since Dtypes cannot have Weak dtype attached at the moment, I think that whatever API Jax exposes now should accept objects, not dtypes. Just to be explicit, since I'm not sure from your reply if you saw it: JAX does already expose `jnp.result_type`, which is, I think, the correct API for type promotion of arbitrary objects, and calls `dtypes.is_weakly_typed` on those objects. We'd just need to add a way for custom objects to tell `is_weakly_typed` what to return.
2022-04-29T19:59:55
google/jax
10,546
google__jax-10546
[ "10540" ]
a8c6742881ae368882835b799fe6909dff867fb8
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -808,22 +808,22 @@ def ravel_multi_index(multi_index, dims, mode='raise', order='C'): _UNRAVEL_INDEX_DOC = """\ Unlike numpy's implementation of unravel_index, negative indices are accepted -and out-of-bounds indices are clipped. +and out-of-bounds indices are clipped into the valid range. """ @_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC) def unravel_index(indices, shape): _check_arraylike("unravel_index", indices) - sizes = append(array(shape), 1) - cumulative_sizes = cumprod(sizes[::-1])[::-1] - total_size = cumulative_sizes[0] - # Clip so raveling and unraveling an oob index will not change the behavior - clipped_indices = clip(indices, -total_size, total_size - 1) - # Add enough trailing dims to avoid conflict with clipped_indices - cumulative_sizes = expand_dims(cumulative_sizes, range(1, 1 + _ndim(indices))) - clipped_indices = expand_dims(clipped_indices, axis=0) - idx = clipped_indices % cumulative_sizes[:-1] // cumulative_sizes[1:] - return tuple(idx) + shape = atleast_1d(shape) + if shape.ndim != 1: + raise ValueError("unravel_index: shape should be a scalar or 1D sequence.") + out_indices = [None] * len(shape) + for i, s in reversed(list(enumerate(shape))): + indices, out_indices[i] = divmod(indices, s) + oob_pos = indices > 0 + oob_neg = indices < -1 + return tuple(where(oob_pos, s - 1, where(oob_neg, 0, i)) + for s, i in zip(shape, out_indices)) @_wraps(np.resize) @partial(jit, static_argnames=('new_shape',))
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -4216,22 +4216,26 @@ def jnp_fun(a, c): else: self._CompileAndCheck(jnp_fun, args_maker) - @parameterized.parameters( - (0, (2, 1, 3)), - (5, (2, 1, 3)), - (0, ()), - (np.array([0, 1, 2]), (2, 2)), - (np.array([[[0, 1], [2, 3]]]), (2, 2))) - def testUnravelIndex(self, flat_index, shape): - args_maker = lambda: (flat_index, shape) - np_fun = jtu.with_jax_dtype_defaults(np.unravel_index, use_defaults=not hasattr(flat_index, 'dtype')) - self._CheckAgainstNumpy(np_fun, jnp.unravel_index, args_maker) - self._CompileAndCheck(jnp.unravel_index, args_maker) - - def testUnravelIndexOOB(self): - self.assertEqual(jnp.unravel_index(2, (2,)), (1,)) - self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1)) - self.assertEqual(jnp.unravel_index(-3, (2,)), (0,)) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_idx={}".format(shape, + jtu.format_shape_dtype_string(idx_shape, dtype)), + "shape": shape, "idx_shape": idx_shape, "dtype": dtype} + for shape in nonempty_nonscalar_array_shapes + for dtype in int_dtypes + for idx_shape in all_shapes)) + def testUnravelIndex(self, shape, idx_shape, dtype): + size = prod(shape) + rng = jtu.rand_int(self.rng(), low=-((2 * size) // 3), high=(2 * size) // 3) + + def np_fun(index, shape): + # Adjust out-of-bounds behavior to match jax's documented behavior. + index = np.clip(index, -size, size - 1) + index = np.where(index < 0, index + size, index) + return np.unravel_index(index, shape) + jnp_fun = jnp.unravel_index + args_maker = lambda: [rng(idx_shape, dtype), shape] + self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker) + self._CompileAndCheck(jnp_fun, args_maker) def testAstype(self): rng = self.rng()
Unexpected behaviour in `jnp.unravel_index` I'm getting erroneous values for `jnp.unravel_index`. For an index of `0`, some shapes give different results than the expected `(0,0)`. Some examples: ```python import jax.numpy as jnp side = 10 jnp.unravel_index(0, (side, side)) # (DeviceArray(0, dtype=int32), DeviceArray(0, dtype=int32)) side = 201_995 jnp.unravel_index(0, (side, side)) # (DeviceArray(0, dtype=int32), DeviceArray(0, dtype=int32)) side = 201_996 jnp.unravel_index(0, (side, side)) # (DeviceArray(-1, dtype=int32), DeviceArray(130531, dtype=int32)) side = 205_000 jnp.unravel_index(0, (side, side)) # (DeviceArray(-1, dtype=int32), DeviceArray(82039, dtype=int32)) side = 500_000 jnp.unravel_index(0, (side, side)) # (DeviceArray(0, dtype=int32), DeviceArray(0, dtype=int32)) ``` I'm using Jax version `0.3.6`.
This is due to int32 arithmetic overflowing for arrays this large. In your incorrect example the total size, i.e. square of `side`, is greater than the maximal value representable by a 32-bit integer. If you want to do those calculations on the host, then it's better to stick to regular `numpy` (then you also won't unnecessarily do scalar computations on the device). If you really need to perform those operations on the device, please try [running with X64 mode](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision) and perform this compute with larger integer types. Agreed with that synopsis, but I wonder if we could make the implementation safer with respect to potential integer overflow? After all, neither the inputs nor the expected outputs in this case are out of the representable range. Another potential improvement: in the common case of a static `shape` argument, we should be able to warn or error at trace time if there is not enough range to represent the output.
2022-05-03T19:12:30
google/jax
10,679
google__jax-10679
[ "10661" ]
41e1635ac11fc5515b27a79a5eaf77eafeefdb7a
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -184,6 +184,7 @@ # -- Options for myst ---------------------------------------------- myst_heading_anchors = 3 # auto-generate 3 levels of heading anchors +myst_enable_extensions = ['dollarmath'] nb_execution_mode = "force" nb_execution_allow_errors = False
documentation format not correct https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#:~:text=int32%2C%20weak_type%3DTrue)-,Summary,-%23 summary format not correct. ![3598734593485](https://user-images.githubusercontent.com/37238933/167804363-8ee6fec3-0bcf-4bc4-be0c-76bb7750a33e.png)
Thanks for the report – looks like some issue with myst-nb conversion. You can see the properly formatted version in the original notebook: https://colab.sandbox.google.com/github/google/jax/blob/main/docs/notebooks/Common_Gotchas_in_JAX.ipynb#scrollTo=SipXS5qiqk8e
2022-05-12T17:38:27
google/jax
10,700
google__jax-10700
[ "8355" ]
4d22d228b6ca021ce663d5d8aaf21ef73b198844
diff --git a/docs/autodidax.py b/docs/autodidax.py --- a/docs/autodidax.py +++ b/docs/autodidax.py @@ -1884,7 +1884,9 @@ def pprint_xla_call(names: DefaultDict[Var, str], eqn: JaxprEqn) -> PPrint: # ### `linearize` # # In the case of `linearize`, we want to stage out the linear part of a `jvp` -# computation. That is, if we have `jvp : (a -> b) -> (a, T a) -> (b, T b)`, +# computation. That is, in terms of +# [Haskell-like type signatures](https://wiki.haskell.org/Type_signature), +# if we have `jvp : (a -> b) -> (a, T a) -> (b, T b)`, # then we write `linearize : (a -> b) -> a -> (b, T a -o T b)`, using `T a` to # mean "the tangent type of `a`" and using the "lollipop" `-o` rather than the # arrow `->` to indicate a _linear_ function. We define the semantics of diff --git a/jax/_src/custom_derivatives.py b/jax/_src/custom_derivatives.py --- a/jax/_src/custom_derivatives.py +++ b/jax/_src/custom_derivatives.py @@ -781,8 +781,8 @@ def custom_gradient(fun): and the VJP (gradient) function. See https://www.tensorflow.org/api_docs/python/tf/custom_gradient. - If the mathematical function to be differentiated has type signature ``a -> - b``, then the Python callable ``fun`` should have signature + If the mathematical function to be differentiated has Haskell-like signature + ``a -> b``, then the Python callable ``fun`` should have the signature ``a -> (b, CT b --o CT a)`` where we use ``CT x`` to denote a cotangent type for ``x`` and the ``--o`` arrow to denote a linear function. See the example below. That is, ``fun`` should return a pair where the first element @@ -1001,7 +1001,7 @@ def linear_call(fun: Callable, fun_transpose: Callable, residual_args, linear_args): """Call a linear function, with a custom implementation for its transpose. - The type signatures of ``fun`` and ``fun_transpose`` are: + The `Haskell-like type signatures`_ of ``fun`` and ``fun_transpose`` are: .. code-block:: haskell @@ -1081,6 +1081,7 @@ def linear_call(fun: Callable, fun_transpose: Callable, residual_args, Returns: The call result, i.e. ``fun(residual_args, linear_args)``. + .. _Haskell-like type signatures: https://wiki.haskell.org/Type_signature """ operands_res, res_tree = tree_flatten(residual_args) operands_lin, lin_tree = tree_flatten(linear_args) diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py --- a/jax/_src/lax/control_flow.py +++ b/jax/_src/lax/control_flow.py @@ -153,7 +153,7 @@ def scanned_fun(loop_carry, _): def fori_loop(lower, upper, body_fun, init_val): """Loop from ``lower`` to ``upper`` by reduction to :func:`jax.lax.while_loop`. - The type signature in brief is + The `Haskell-like type signature`_ in brief is .. code-block:: haskell @@ -191,6 +191,8 @@ def fori_loop(lower, upper, body_fun, init_val): Returns: Loop value from the final iteration, of type ``a``. + + .. _Haskell-like type signature: https://wiki.haskell.org/Type_signature """ if not callable(body_fun): raise TypeError("lax.fori_loop: body_fun argument should be callable.") @@ -235,7 +237,7 @@ def while_loop(cond_fun: Callable[[T], BooleanNumeric], init_val: T) -> T: """Call ``body_fun`` repeatedly in a loop while ``cond_fun`` is True. - The type signature in brief is + The `Haskell-like type signature`_ in brief is .. code-block:: haskell @@ -275,6 +277,8 @@ def while_loop(cond_fun, body_fun, init_val): Returns: The output from the final iteration of body_fun, of type ``a``. + + .. _Haskell-like type signature: https://wiki.haskell.org/Type_signature """ if not (callable(body_fun) and callable(cond_fun)): raise TypeError("lax.while_loop: body_fun and cond_fun arguments should be callable.") @@ -1354,7 +1358,7 @@ def scan(f: Callable[[Carry, X], Tuple[Carry, Y]], unroll: int = 1) -> Tuple[Carry, Y]: """Scan a function over leading array axes while carrying along state. - The type signature in brief is + The `Haskell-like type signature`_ in brief is .. code-block:: haskell @@ -1422,6 +1426,8 @@ def scan(f, init, xs, length=None): A pair of type ``(c, [b])`` where the first element represents the final loop carry value and the second element represents the stacked outputs of the second output of ``f`` when scanned over the leading axis of the inputs. + + .. _Haskell-like type signature: https://wiki.haskell.org/Type_signature """ if not callable(f): raise TypeError("lax.scan: f argument should be a callable.")
non-Python type signatures in docs are confusing for a Python package The docs include confusing type signatures, using what I assume is Haskell syntax (please correct me if I'm wrong---I never learned Haskell). Two examples I found are: https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html#jax.lax.scan https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.fori_loop.html#jax.lax.fori_loop PS: sorry, I didn't see an option to report an issue with the docs, so just used the "bugs" issue template.
I think the choice is made consiously, it's more 'math like notation' than just 'haskell syntax'. That may be so, but I'd be surprised if I'm the only reader that can't parse this. I would argue that it depends on the reader. I personally find the syntax of the documentation much more readable than the convention Python uses for `Callable`s, so I'd err on the side of keeping it. Perhaps a good compromise would be to link to some resource each time we use haskell signatures? Yes, that sounds reasonable. At the moment the docs I linked to don't even mention that these are haskell signatures, leaving the reader completely in the dark if they don't happen to be familiar with them already. Does anyone know a good reference for this haskell signature syntax? I think the problem people have is not "type signature syntax" per se, but automatic currying. Which is also why Haskell like syntax is so much better, as it provides a lot more affordance. So a reference might be [this](https://wiki.haskell.org/Currying).
2022-05-13T19:30:23
google/jax
10,731
google__jax-10731
[ "10729" ]
268b4be21b7f3b9a92c45e4b77adda749f6e18cc
diff --git a/jax/_src/lax/convolution.py b/jax/_src/lax/convolution.py --- a/jax/_src/lax/convolution.py +++ b/jax/_src/lax/convolution.py @@ -14,6 +14,7 @@ import builtins from functools import partial +import operator from typing import Any, List, NamedTuple, Optional, Sequence, Tuple, Union import numpy as np @@ -141,6 +142,15 @@ def conv_general_dilated( padding = lax.padtype_to_pads( np.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape, # type: ignore[index] window_strides, padding) + else: + try: + padding = tuple((operator.index(lo), operator.index(hi)) + for lo, hi in padding) + except (ValueError, TypeError) as e: + raise ValueError( + "padding argument to conv_general_dilated should be a string or a " + f"sequence of (low, high) pairs, got {padding}") from e + preferred_element_type = ( None if preferred_element_type is None else dtypes.canonicalize_dtype(np.dtype(preferred_element_type)))
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1060,6 +1060,13 @@ def testConvTransposePaddingList(self): c = lax.conv_general_dilated(a[None, None], b[None, None], (1,1), [(0,0),(0,0)], (1,1)) self.assertAllClose(c, 9 * jnp.ones((1, 1, 26, 26))) + def testConvInvalidPadding(self): + x = jnp.ones((1, 10, 10, 5), dtype=jnp.bfloat16) + with self.assertRaisesRegex(ValueError, + r"padding argument.*, got \(3, 3\)"): + jax.lax.conv_general_dilated_patches(x, (5, 5), window_strides=(1, 1), + padding=(3, 3)) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_lhs_shape={}_rhs_shape={}_precision={}".format( jtu.format_shape_dtype_string(lhs_shape, dtype),
Suspected bug in jax.lax.conv_general_dilated_patches Trying to implement the solution [here](https://github.com/google/jax/discussions/5968) but ran into the following bug: ```import jax import jax.numpy as jnp dev = jax.local_devices()[0] ftmap_fn = jax.device_put(jnp.ones((1, 384, 384, 256), dtype=jnp.bfloat16), device=dev) W = 5 stride, padding = (4, 4), (W // 2, W // 2) jax.lax.conv_general_dilated_patches(ftmap_fn, (5, 5), stride, padding).shape ``` but i found the following error: ``` ... File ~/.local/lib/python3.8/site-packages/jax/_src/dispatch.py:609, in XlaCompiledComputation.from_xla_computation(name, xla_computation, nreps, device, backend, tuple_args, in_avals, out_avals, kept_var_idx) 606 options.parameter_is_tupled_arguments = tuple_args 607 with log_elapsed_time(f"Finished XLA compilation of {name} " 608 "in {elapsed_time} sec"): --> 609 compiled = compile_or_get_cached(backend, xla_computation, options) 610 buffer_counts = (None if len(out_avals) == 1 else 611 [aval_to_num_buffers(aval) for aval in out_avals]) 612 execute = _execute_compiled if nreps == 1 else _execute_replicated File ~/.local/lib/python3.8/site-packages/jax/_src/dispatch.py:578, in compile_or_get_cached(backend, computation, compile_options) 575 ir_str = (computation if isinstance(computation, str) 576 else computation.as_hlo_text()) 577 _dump_ir_to_file(module_name, ir_str) --> 578 return backend_compile(backend, computation, compile_options) File ~/.local/lib/python3.8/site-packages/jax/_src/profiler.py:206, in annotate_function.<locals>.wrapper(*args, **kwargs) 203 @wraps(func) 204 def wrapper(*args, **kwargs): 205 with TraceAnnotation(name, **decorator_kwargs): --> 206 return func(*args, **kwargs) 207 return wrapper File ~/.local/lib/python3.8/site-packages/jax/_src/dispatch.py:532, in backend_compile(backend, built_c, options) 528 @profiler.annotate_function 529 def backend_compile(backend, built_c, options): 530 # we use a separate function call to ensure that XLA compilation appears 531 # separately in Python profiling results --> 532 return backend.compile(built_c, compile_options=options) RuntimeError: UNKNOWN: -:4:130: error: expected '[' ``` The error looks very cryptic cannot figure out what it means. Is it an XLA / Jax bug? Thanks! Environment: TPU-VM v3-8 with software version tf.2.8.0 ``` jax==0.3.5 jaxlib==0.3.5 libtpu-nightly==0.1.dev20220407 cloud-tpu-client==0.10 ```
The issue is that `padding` should be a list of pairs, one for each dimension. e.g., this works: ``` stride, padding = (4, 4), [(W // 2, W // 2), (W // 2, W //2)] ``` We are apparently missing some input validation that would have caught this sooner. I'll fix that!
2022-05-16T20:08:14
google/jax
10,885
google__jax-10885
[ "10884" ]
d43cb36dae7e2f4cf734de29431cc371a5efeac5
diff --git a/build/build_wheel.py b/build/build_wheel.py --- a/build/build_wheel.py +++ b/build/build_wheel.py @@ -278,7 +278,7 @@ def build_wheel(sources_path, output_path, cpu): ("Linux", "x86_64"): ("manylinux2014", "x86_64"), ("Linux", "aarch64"): ("manylinux2014", "aarch64"), ("Linux", "ppc64le"): ("manylinux2014", "ppc64le"), - ("Darwin", "x86_64"): ("macosx_10_9", "x86_64"), + ("Darwin", "x86_64"): ("macosx_10_12", "x86_64"), ("Darwin", "arm64"): ("macosx_11_0", "arm64"), ("Windows", "AMD64"): ("win", "amd64"), }[(platform.system(), cpu)]
Build failure on macOS M1 M1 wheel builds are broken at HEAD again, but nothing too serious this time: ``` ➜ python build/build.py _ _ __ __ | | / \ \ \/ / _ | |/ _ \ \ / | |_| / ___ \/ \ \___/_/ \/_/\_\ Bazel binary path: /opt/homebrew/bin/bazel Bazel version: 5.1.1 Python binary path: /Users/nicholasjunge/Workspaces/python/jax/venv/bin/python Python version: 3.9 NumPy version: 1.22.3 MKL-DNN enabled: yes Target CPU: arm64 Target CPU features: release CUDA enabled: no TPU enabled: no ROCm enabled: no Building XLA and installing it in the jaxlib source tree... /opt/homebrew/bin/bazel run --verbose_failures=true --config=mkl_open_source_only :build_wheel -- --output_path=/Users/nicholasjunge/Workspaces/python/jax/dist --cpu=arm64 Extracting Bazel installation... Starting local Bazel server and connecting to it... INFO: Options provided by the client: Inherited 'common' options: --isatty=0 --terminal_columns=80 INFO: Reading rc options for 'run' from /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: Inherited 'common' options: --experimental_repo_remote_exec INFO: Reading rc options for 'run' from /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: Inherited 'build' options: --apple_platform_type=macos --macos_minimum_os=10.9 --announce_rc --define open_source_build=true --spawn_strategy=standalone --enable_platform_specific_config --experimental_cc_shared_library --define=no_aws_support=true --define=no_gcp_support=true --define=no_hdfs_support=true --define=no_kafka_support=true --define=no_ignite_support=true --define=grpc_no_ares=true -c opt --config=short_logs --copt=-DMLIR_PYTHON_PACKAGE_PREFIX=jaxlib.mlir. --@org_tensorflow//tensorflow/compiler/xla/python:enable_gpu=false --@org_tensorflow//tensorflow/compiler/xla/python:enable_tpu=false INFO: Reading rc options for 'run' from /Users/nicholasjunge/Workspaces/python/jax/.jax_configure.bazelrc: Inherited 'build' options: --strategy=Genrule=standalone --repo_env PYTHON_BIN_PATH=/Users/nicholasjunge/Workspaces/python/jax/venv/bin/python --action_env=PYENV_ROOT --python_path=/Users/nicholasjunge/Workspaces/python/jax/venv/bin/python --distinct_host_configuration=false INFO: Found applicable config definition build:short_logs in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --output_filter=DONT_MATCH_ANYTHING INFO: Found applicable config definition build:mkl_open_source_only in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --define=tensorflow_mkldnn_contraction_kernel=1 INFO: Found applicable config definition build:macos in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --config=posix INFO: Found applicable config definition build:posix in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --copt=-fvisibility=hidden --copt=-Wno-sign-compare --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 WARNING: Download from https://storage.googleapis.com/mirror.tensorflow.org/github.com/tensorflow/runtime/archive/e4b355cf794b4df50a8d8150c7f44fe76c8a12d5.tar.gz failed: class java.io.FileNotFoundException GET returned 404 Not Found Loading: Loading: 1 packages loaded Analyzing: target //build:build_wheel (2 packages loaded, 0 targets configured) WARNING: Download from https://mirror.bazel.build/github.com/bazelbuild/rules_cc/archive/081771d4a0e9d7d3aa0eed2ef389fa4700dfb23e.tar.gz failed: class java.io.FileNotFoundException GET returned 404 Not Found Analyzing: target //build:build_wheel (226 packages loaded, 14251 targets configured) INFO: Analyzed target //build:build_wheel (227 packages loaded, 15127 targets configured). INFO: Found 1 target... [0 / 31] [Prepa] Expanding template build/build_wheel [578 / 3,068] Compiling llvm/lib/MC/MCParser/MasmParser.cpp; 2s local ... (10 actions, 9 running) ERROR: /private/var/tmp/_bazel_nicholasjunge/270a4a78734ae0f3124fa7265b8a65ef/external/llvm-project/mlir/BUILD.bazel:3153:11: Compiling mlir/lib/Support/Timing.cpp failed: (Aborted): wrapped_clang_pp failed: error executing command (cd /private/var/tmp/_bazel_nicholasjunge/270a4a78734ae0f3124fa7265b8a65ef/execroot/__main__ && \ exec env - \ APPLE_SDK_PLATFORM=MacOSX \ APPLE_SDK_VERSION_OVERRIDE=12.3 \ PATH=/Users/nicholasjunge/Workspaces/python/jax/venv/bin:/opt/homebrew/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/bin:/opt/homebrew/bin:/opt/homebrew/sbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Library/Apple/usr/bin:/opt/homebrew/opt/fzf/bin \ XCODE_VERSION_OVERRIDE=13.4.0.13F17a \ ZERO_AR_DATE=1 \ external/local_config_cc/wrapped_clang_pp '-D_FORTIFY_SOURCE=1' -fstack-protector -fcolor-diagnostics -Wall -Wthread-safety -Wself-assign -fno-omit-frame-pointer -g0 -O2 -DNDEBUG '-DNS_BLOCK_ASSERTIONS=1' '-std=c++11' 'DEBUG_PREFIX_MAP_PWD=.' -iquote external/llvm-project -iquote bazel-out/darwin_arm64-opt/bin/external/llvm-project -iquote external/llvm_terminfo -iquote bazel-out/darwin_arm64-opt/bin/external/llvm_terminfo -iquote external/llvm_zlib -iquote bazel-out/darwin_arm64-opt/bin/external/llvm_zlib -isystem external/llvm-project/mlir/include -isystem bazel-out/darwin_arm64-opt/bin/external/llvm-project/mlir/include -isystem external/llvm-project/llvm/include -isystem bazel-out/darwin_arm64-opt/bin/external/llvm-project/llvm/include -MD -MF bazel-out/darwin_arm64-opt/bin/external/llvm-project/mlir/_objs/Support/Timing.d '-DLLVM_ON_UNIX=1' '-DHAVE_BACKTRACE=1' '-DBACKTRACE_HEADER=<execinfo.h>' '-DLTDL_SHLIB_EXT=".so"' '-DLLVM_PLUGIN_EXT=".so"' '-DLLVM_ENABLE_THREADS=1' '-DHAVE_DEREGISTER_FRAME=1' '-DHAVE_LIBPTHREAD=1' '-DHAVE_PTHREAD_GETNAME_NP=1' '-DHAVE_PTHREAD_H=1' '-DHAVE_PTHREAD_SETNAME_NP=1' '-DHAVE_REGISTER_FRAME=1' '-DHAVE_SETENV_R=1' '-DHAVE_STRERROR_R=1' '-DHAVE_SYSEXITS_H=1' '-DHAVE_UNISTD_H=1' '-DHAVE_MACH_MACH_H=1' '-DHAVE_MALLOC_MALLOC_H=1' '-DHAVE_MALLOC_ZONE_STATISTICS=1' '-DHAVE_PROC_PID_RUSAGE=1' '-DHAVE_UNW_ADD_DYNAMIC_FDE=1' '-DLLVM_NATIVE_ARCH="AArch64"' '-DLLVM_NATIVE_ASMPARSER=LLVMInitializeAArch64AsmParser' '-DLLVM_NATIVE_ASMPRINTER=LLVMInitializeAArch64AsmPrinter' '-DLLVM_NATIVE_DISASSEMBLER=LLVMInitializeAArch64Disassembler' '-DLLVM_NATIVE_TARGET=LLVMInitializeAArch64Target' '-DLLVM_NATIVE_TARGETINFO=LLVMInitializeAArch64TargetInfo' '-DLLVM_NATIVE_TARGETMC=LLVMInitializeAArch64TargetMC' '-DLLVM_NATIVE_TARGETMCA=LLVMInitializeAArch64TargetMCA' '-DLLVM_HOST_TRIPLE="arm64-apple-darwin"' '-DLLVM_DEFAULT_TARGET_TRIPLE="arm64-apple-darwin"' -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -DBLAKE3_NO_AVX2 -DBLAKE3_NO_AVX512 -DBLAKE3_NO_SSE2 -DBLAKE3_NO_SSE41 '-DBLAKE3_USE_NEON=0' '-frandom-seed=bazel-out/darwin_arm64-opt/bin/external/llvm-project/mlir/_objs/Support/Timing.o' -isysroot __BAZEL_XCODE_SDKROOT__ -F__BAZEL_XCODE_SDKROOT__/System/Library/Frameworks -F__BAZEL_XCODE_DEVELOPER_DIR__/Platforms/MacOSX.platform/Developer/Library/Frameworks '-mmacosx-version-min=10.9' -no-canonical-prefixes -pthread '-fvisibility=hidden' -Wno-sign-compare '-DMLIR_PYTHON_PACKAGE_PREFIX=jaxlib.mlir.' '-std=c++17' -no-canonical-prefixes -Wno-builtin-macro-redefined '-D__DATE__="redacted"' '-D__TIMESTAMP__="redacted"' '-D__TIME__="redacted"' -target arm64-apple-macosx -c external/llvm-project/mlir/lib/Support/Timing.cpp -o bazel-out/darwin_arm64-opt/bin/external/llvm-project/mlir/_objs/Support/Timing.o) # Configuration: 32b36e8e2b8768da789a893fb050b01470cd5e09932078831a2959b0fe5ef2b0 # Execution platform: @local_execution_config_platform//:platform In file included from external/llvm-project/mlir/lib/Support/Timing.cpp:24: external/llvm-project/llvm/include/llvm/Support/RWMutex.h:98:8: error: 'shared_mutex' is unavailable: introduced in macOS 10.12 std::shared_mutex impl; ^ /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX12.3.sdk/usr/include/c++/v1/shared_mutex:180:58: note: 'shared_mutex' has been explicitly marked unavailable here class _LIBCPP_TYPE_VIS _LIBCPP_AVAILABILITY_SHARED_MUTEX shared_mutex ^ 1 error generated. Error in child process '/usr/bin/xcrun'. 1 Target //build:build_wheel failed to build INFO: Elapsed time: 149.959s, Critical Path: 5.80s INFO: 611 processes: 355 internal, 256 local. FAILED: Build did NOT complete successfully ERROR: Build failed. Not running target FAILED: Build did NOT complete successfully b'' Traceback (most recent call last): File "/Users/nicholasjunge/Workspaces/python/jax/build/build.py", line 528, in <module> main() File "/Users/nicholasjunge/Workspaces/python/jax/build/build.py", line 523, in main shell(command) File "/Users/nicholasjunge/Workspaces/python/jax/build/build.py", line 53, in shell output = subprocess.check_output(cmd) File "/opt/homebrew/Cellar/[email protected]/3.9.13_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/subprocess.py", line 424, in check_output return run(*popenargs, stdout=PIPE, timeout=timeout, check=True, File "/opt/homebrew/Cellar/[email protected]/3.9.13_1/Frameworks/Python.framework/Versions/3.9/lib/python3.9/subprocess.py", line 528, in run raise CalledProcessError(retcode, process.args, subprocess.CalledProcessError: Command '['/opt/homebrew/bin/bazel', 'run', '--verbose_failures=true', '--config=mkl_open_source_only', ':build_wheel', '--', '--output_path=/Users/nicholasjunge/Workspaces/python/jax/dist', '--cpu=arm64']' returned non-zero exit status 1. ``` The message is very helpful, and indeed, setting the minimum macOS version in the `.bazelrc` to 10.12 fixes the build.
2022-05-31T12:07:57
google/jax
10,925
google__jax-10925
[ "10833" ]
4bc2234db3de19661dd71a86cb5c1cc176051693
diff --git a/jax/_src/lax/convolution.py b/jax/_src/lax/convolution.py --- a/jax/_src/lax/convolution.py +++ b/jax/_src/lax/convolution.py @@ -534,6 +534,29 @@ def _conv_general_dilated_batch_rule( lhs_bdim, rhs_bdim = batch_dims lhs_spec, rhs_spec, out_spec = dimension_numbers + # Some of the cases that reshape into batch or feature dimensions do not work + # with size 0 batch dimensions. The best fix would be to extend HLO to support + # multiple batch dimensions. + if ((lhs_bdim is not None and lhs.shape[lhs_bdim] == 0) or + (rhs_bdim is not None and rhs.shape[rhs_bdim] == 0)): + lhs_shape_unbatched, rhs_shape_unbatched = list(lhs.shape), list(rhs.shape) + if lhs_bdim is not None: + lhs_shape_unbatched.pop(lhs_bdim) + if rhs_bdim is not None: + rhs_shape_unbatched.pop(rhs_bdim) + shape = _conv_general_dilated_shape_rule( + core.ShapedArray(lhs_shape_unbatched, lhs.dtype), + core.ShapedArray(rhs_shape_unbatched, rhs.dtype), + window_strides=window_strides, padding=padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, dimension_numbers=dimension_numbers, + feature_group_count=feature_group_count, + batch_group_count=batch_group_count) + return lax.full( + (0,) + shape, 0, + dtype=lhs.dtype if preferred_element_type is None + else preferred_element_type), 0 + + if lhs_bdim is not None and rhs_bdim is not None: assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim] if batch_group_count > 1: @@ -596,8 +619,7 @@ def _conv_general_dilated_batch_rule( new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]), group_count, rhs) new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim), - rhs_spec[0] + 1, - new_rhs) + rhs_spec[0] + 1, new_rhs) new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs) out = conv_general_dilated(lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, @@ -737,6 +759,10 @@ def _conv_general_dilated_lower( def _reshape_axis_into(src, dst, x): + # NB: `dst` is the number of the dimension that we should reshape into + # *after* `src` is removed from `x`'s list of dimensions. For example, if + # `src` is an added batch dimension, `dst` might name a target dimension in + # the unbatched list of dimensions. perm = [i for i in range(x.ndim) if i != src] perm.insert(dst, src) new_shape = list(np.delete(x.shape, src))
diff --git a/tests/lax_vmap_test.py b/tests/lax_vmap_test.py --- a/tests/lax_vmap_test.py +++ b/tests/lax_vmap_test.py @@ -114,17 +114,18 @@ def testOp(self, op_name, rng_factory, shapes, dtype, bdims, tol): "testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_" "rhs_dilation={}_dims={}_feature_group_count={}_batch_group_count={}" - "_lhs_bdim={}_rhs_bdim={}" + "_lhs_bdim={}_rhs_bdim={}_bdim_size={}" .format(jtu.format_shape_dtype_string(lhs_shape, dtype), jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, lhs_dil, rhs_dil, ",".join(dim_nums), - feature_group_count, batch_group_count, lhs_bdim, rhs_bdim), + feature_group_count, batch_group_count, lhs_bdim, rhs_bdim, + bdim_size), "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "strides": strides, "padding": padding, "lhs_dil": lhs_dil, "rhs_dil": rhs_dil, "dimension_numbers": dim_nums, "perms": perms, "lhs_bdim": lhs_bdim, "rhs_bdim": rhs_bdim, "feature_group_count": feature_group_count, - "batch_group_count": batch_group_count, + "batch_group_count": batch_group_count, "bdim_size": bdim_size, } for batch_group_count, feature_group_count in s([(1, 1), (2, 1), (1, 2)]) for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in s([ ((b * batch_group_count, i * feature_group_count, 6, 7), # lhs_shape @@ -142,7 +143,10 @@ def testOp(self, op_name, rng_factory, shapes, dtype, bdims, tol): for dim_nums, perms in s([ (("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])), (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])), - (("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3]))]) + (("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3])), + (("HWCN", "HWIO", "HWCN"), ([2, 3, 1, 0], [2, 3, 1, 0])), + ]) + for bdim_size in s([0, 5]) for lhs_bdim in s(itertools.chain([cast(Optional[int], None)], range(len(lhs_shape) + 1))) for rhs_bdim in s(itertools.chain([cast(Optional[int], None)], @@ -152,7 +156,7 @@ def testOp(self, op_name, rng_factory, shapes, dtype, bdims, tol): def testConvGeneralDilatedBatching( self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dil, rhs_dil, dimension_numbers, perms, feature_group_count, batch_group_count, - lhs_bdim, rhs_bdim): + lhs_bdim, rhs_bdim, bdim_size): rng = jtu.rand_default(self.rng()) tol = 1e-1 if dtypes.finfo(dtype).bits <= 32 else 1e-3 @@ -167,8 +171,9 @@ def testConvGeneralDilatedBatching( feature_group_count=feature_group_count, batch_group_count=batch_group_count, precision=lax.Precision.HIGHEST) - self._CheckBatching(conv, 5, (lhs_bdim, rhs_bdim), (lhs_shape, rhs_shape), - (dtype, dtype), rng, rtol=tol, atol=tol) + self._CheckBatching(conv, bdim_size, (lhs_bdim, rhs_bdim), + (lhs_shape, rhs_shape), (dtype, dtype), rng, rtol=tol, + atol=tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format( @@ -467,12 +472,11 @@ def testTranspose(self, shape, dtype, perm, bdims): for init_val, op, dtypes in [ (0, lax.add, default_dtypes), (1, lax.mul, default_dtypes), - (0, lax.max, all_dtypes), # non-monoidal + # non-monoidal for everything except unsigned integers + (0, lax.max, all_dtypes), (-np.inf, lax.max, float_dtypes), (dtypes.iinfo(np.int32).min, lax.max, [np.int32]), (dtypes.iinfo(np.int64).min, lax.max, [np.int64]), - (dtypes.iinfo(np.uint32).min, lax.max, [np.uint32]), - (dtypes.iinfo(np.uint64).min, lax.max, [np.uint64]), (np.inf, lax.min, float_dtypes), (dtypes.iinfo(np.int32).max, lax.min, [np.int32]), (dtypes.iinfo(np.int64).max, lax.min, [np.int64]),
`ZeroDivisionError` in `lax.conv_general_dilated_local` with empty output https://colab.research.google.com/gist/romanngg/943f464a76aa9fd4b93ac8fcac99d746/reverse_mode_conv_fail.ipynb ```python import jax lhs = jax.numpy.ones((1, 1, 1)) rhs = jax.numpy.ones((1, 1, 2)) def f(lhs, rhs): return jax.lax.conv_general_dilated(lhs=lhs, rhs=rhs, window_strides=(1,), padding=((0, 0),), ) ``` Gives an error in `jacrev`: ```python jax.jacrev(f)(lhs, rhs) ``` raises ```python --------------------------------------------------------------------------- JaxStackTraceBeforeTransformation Traceback (most recent call last) [/usr/lib/python3.7/runpy.py](https://localhost:8080/#) in _run_module_as_main(***failed resolving arguments***) 192 return _run_code(code, main_globals, None, --> 193 "__main__", mod_spec) 194 42 frames JaxStackTraceBeforeTransformation: ZeroDivisionError: integer division or modulo by zero The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception. -------------------- The above exception was the direct cause of the following exception: UnfilteredStackTrace Traceback (most recent call last) UnfilteredStackTrace: ZeroDivisionError: integer division or modulo by zero The stack trace below excludes JAX-internal frames. The preceding is the original exception that occurred, unmodified. -------------------- The above exception was the direct cause of the following exception: ZeroDivisionError Traceback (most recent call last) [/usr/local/lib/python3.7/dist-packages/jax/_src/api.py](https://localhost:8080/#) in jacfun(*args, **kwargs) 1170 y, pullback, aux = _vjp(f_partial, *dyn_args, has_aux=True) 1171 tree_map(partial(_check_output_dtype_jacrev, holomorphic), y) -> 1172 jac = vmap(pullback)(_std_basis(y)) 1173 jac = jac[0] if isinstance(argnums, int) else jac 1174 example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args ZeroDivisionError: integer division or modulo by zero ```
2022-06-01T17:09:27
google/jax
10,930
google__jax-10930
[ "10927" ]
b80d7195f695b728a91f63ab2d2b6b09692f2064
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py --- a/jax/_src/lax/lax.py +++ b/jax/_src/lax/lax.py @@ -2893,7 +2893,7 @@ def _pad_transpose(t, operand, padding_value, *, padding_config): t_operand = ad_util.Zero(operand.aval) if ad.is_undefined_primal(operand) else None t_padv = ad_util.Zero(padding_value.aval) if ad.is_undefined_primal(padding_value) else None else: - lo, hi, interior = zip(*padding_config) + lo, hi, interior = util.unzip3(padding_config) total = lambda x: _reduce_sum(x, list(range(t.ndim))) def t_op(): diff --git a/jax/_src/lax/windowed_reductions.py b/jax/_src/lax/windowed_reductions.py --- a/jax/_src/lax/windowed_reductions.py +++ b/jax/_src/lax/windowed_reductions.py @@ -418,7 +418,7 @@ def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides, operand_shape = lax._dilate_shape(operand_shape, base_dilation) if window_dilation is not None: window_dimensions = lax._dilate_shape(window_dimensions, window_dilation) - pads_lo, pads_hi = [(), ()] if len(padding) == 0 else zip(*padding) + pads_lo, pads_hi = util.unzip2(padding) operand_padded = core.sum_shapes(operand_shape, pads_lo, pads_hi) return core.stride_shape(operand_padded, window_dimensions, window_strides)
diff --git a/tests/lax_autodiff_test.py b/tests/lax_autodiff_test.py --- a/tests/lax_autodiff_test.py +++ b/tests/lax_autodiff_test.py @@ -502,9 +502,12 @@ def testReshapeGrad(self, arg_shape, out_shape, permutation, dtype): {"testcase_name": "_inshape={}_pads={}" .format(jtu.format_shape_dtype_string(shape, dtype), pads), "shape": shape, "dtype": dtype, "pads": pads} - for shape in [(2, 3)] for dtype in float_dtypes - for pads in [[(1, 2, 1), (0, 1, 0)], [(-1, 0, 0), (-1, 0, 2)]])) + for shape, paddings in [ + [(), [()]], + ((2, 3), [[(1, 2, 1), (0, 1, 0)], [(-1, 0, 0), (-1, 0, 2)]]), + ] + for pads in paddings)) def testPadGrad(self, shape, dtype, pads): rng = jtu.rand_small(self.rng()) operand = rng(shape, dtype)
`lax.pad` reverse-mode AD fails on scalar inputs https://colab.research.google.com/gist/romanngg/8284adda14131eca8d63d69929044277/lax_pad_reverse_mode_fail.ipynb ```python def f(x): return lax.pad(x, 0., ()) jacrev(f)(1.) ``` gives ```python --------------------------------------------------------------------------- JaxStackTraceBeforeTransformation Traceback (most recent call last) [/usr/lib/python3.7/runpy.py](https://localhost:8080/#) in _run_module_as_main(***failed resolving arguments***) 192 return _run_code(code, main_globals, None, --> 193 "__main__", mod_spec) 194 36 frames JaxStackTraceBeforeTransformation: ValueError: not enough values to unpack (expected 3, got 0) The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception. -------------------- The above exception was the direct cause of the following exception: UnfilteredStackTrace Traceback (most recent call last) UnfilteredStackTrace: ValueError: not enough values to unpack (expected 3, got 0) The stack trace below excludes JAX-internal frames. The preceding is the original exception that occurred, unmodified. -------------------- The above exception was the direct cause of the following exception: ValueError Traceback (most recent call last) [/usr/local/lib/python3.7/dist-packages/jax/_src/api.py](https://localhost:8080/#) in jacfun(*args, **kwargs) 1170 y, pullback, aux = _vjp(f_partial, *dyn_args, has_aux=True) 1171 tree_map(partial(_check_output_dtype_jacrev, holomorphic), y) -> 1172 jac = vmap(pullback)(_std_basis(y)) 1173 jac = jac[0] if isinstance(argnums, int) else jac 1174 example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args ValueError: not enough values to unpack (expected 3, got 0) ```
2022-06-01T19:27:39
google/jax
10,934
google__jax-10934
[ "10932" ]
e9542bb61d36475df64e6f7107b45991e4dcb6f0
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py --- a/jax/_src/lax/lax.py +++ b/jax/_src/lax/lax.py @@ -1552,6 +1552,7 @@ def _nary_lower_mhlo(op: Callable, ctx, _num = _int | _float | _complex _any = _int | _float | _complex | _bool _bool_or_int = _int | _bool +_ordered = _int | _float | _bool neg_p = standard_unop(_num, 'neg') ad.deflinear2(neg_p, lambda t, operand: [neg(t)]) @@ -2106,7 +2107,7 @@ def _div_transpose_rule(cotangent, x, y): ad.primitive_transposes[div_p] = _div_transpose_rule mlir.register_lowering(div_p, partial(_nary_lower_mhlo, mhlo.DivOp)) -rem_p = standard_naryop([_num, _num], 'rem') +rem_p = standard_naryop([_int | _float, _int | _float], 'rem') ad.defjvp( rem_p, lambda g, x, y: _maybe_broadcast(broadcast_shapes(np.shape(x), np.shape(y)), g), @@ -2171,19 +2172,19 @@ def _compare_lower_mhlo(direction: str, ctx, x, y): ad.defjvp_zero(ne_p) mlir.register_lowering(ne_p, partial(_compare_lower_mhlo, "NE")) -ge_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ge') +ge_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'ge') ad.defjvp_zero(ge_p) mlir.register_lowering(ge_p, partial(_compare_lower_mhlo, "GE")) -gt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'gt') +gt_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'gt') ad.defjvp_zero(gt_p) mlir.register_lowering(gt_p, partial(_compare_lower_mhlo, "GT")) -le_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'le') +le_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'le') ad.defjvp_zero(le_p) mlir.register_lowering(le_p, partial(_compare_lower_mhlo, "LE")) -lt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'lt') +lt_p = naryop(_fixed_dtype(np.bool_), [_ordered, _ordered], 'lt') ad.defjvp_zero(lt_p) mlir.register_lowering(lt_p, partial(_compare_lower_mhlo, "LT"))
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2533,6 +2533,11 @@ def test_reduction_with_repeated_axes_error(self): with self.assertRaisesRegex(ValueError, "duplicate value in 'axes' .*"): lax.reduce(np.arange(3), 0, lax.add, (0, 0)) + @parameterized.parameters([lax.rem, lax.lt, lax.gt, lax.ge, lax.le]) + def test_ops_do_not_accept_complex_dtypes(self, op): + with self.assertRaisesRegex(TypeError, ".*does not accept dtype complex.*"): + op(2+3j, 4+5j) + def test_population_count_booleans_not_supported(self): # https://github.com/google/jax/issues/3886 msg = "population_count does not accept dtype bool"
Abort trap in jax.numpy code ```python import jax import jaxlib import jax.numpy as jnp print(f"{jax.__version__=}") print(f"{jaxlib.__version__=}") x = jnp.arange(4) y = jnp.arange(20) z = jnp.ones(20, dtype='complex64') print(jnp.interp(x, y, z, period=0.59)) ``` output: ``` jax.__version__='0.3.14' jaxlib.__version__='0.3.10' 2022-06-01 13:06:02.826868: F external/org_tensorflow/tensorflow/compiler/xla/service/hlo_evaluator.cc:137] unhandled direction for conversion to Comparison: LT Abort trap: 6 ```
I think it may have to do with a complex remainder operation Certainly we should have rejected the `remainder` for complex types in several places before it made it to the compiler: ``` In [1]: import numpy as np, jax.numpy as jnp In [2]: np.remainder(1+2j, 3) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Input In [2], in <cell line: 1>() ----> 1 np.remainder(1+2j, 3) TypeError: ufunc 'remainder' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe'' In [3]: jnp.remainder(1+2j, 3) --------------------------------------------------------------------------- XlaRuntimeError Traceback (most recent call last) Input In [3], in <cell line: 1>() ----> 1 jnp.remainder(1+2j, 3) [... skipping hidden 12 frame] File ~/p/jax/jax/_src/dispatch.py:801, in backend_compile(backend, built_c, options) 797 @profiler.annotate_function 798 def backend_compile(backend, built_c, options): 799 # we use a separate function call to ensure that XLA compilation appears 800 # separately in Python profiling results --> 801 return backend.compile(built_c, compile_options=options) XlaRuntimeError: UNIMPLEMENTED: binary complex op 'remainder' ``` Here's a more compact repro: ```python from jax import jit import jax.numpy as jnp from functools import partial @partial(jit, static_argnums=1) def f(x, a): return x % a f(jnp.complex64(1), 0.5) ``` If the second argument is not annotated as static, we reject it before it makes it to the compiler.
2022-06-01T20:36:05
google/jax
11,030
google__jax-11030
[ "10231" ]
21b225001f7f018a08d4d29ebef729d6fa11d3df
diff --git a/jax/_src/numpy/setops.py b/jax/_src/numpy/setops.py --- a/jax/_src/numpy/setops.py +++ b/jax/_src/numpy/setops.py @@ -206,6 +206,11 @@ def isin(element, test_elements, assume_unique=False, invert=False): # noqa: F8 ### SetOps + +UNIQUE_SIZE_HINT = ( + "To make jnp.unique() compatible with JIT and other transforms, you can specify " + "a concrete value for the size argument, which will determine the output size.") + @partial(jit, static_argnums=1) def _unique_sorted_mask(ar, axis): aux = moveaxis(ar, axis, 0) @@ -243,7 +248,11 @@ def _unique(ar, axis, return_index=False, return_inverse=False, return_counts=Fa "jnp.unique: for zero-sized input with nonzero size argument, fill_value must be specified") aux, mask, perm = _unique_sorted_mask(ar, axis) - ind = mask if size is None else nonzero(mask, size=size)[0] + if size is None: + ind = core.concrete_or_error(None, mask, + "The error arose in jnp.unique(). " + UNIQUE_SIZE_HINT) + else: + ind = nonzero(mask, size=size)[0] result = aux[ind] if aux.size else aux if fill_value is not None: fill_value = asarray(fill_value, dtype=result.dtype) @@ -304,9 +313,11 @@ def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis: Optional[int] = None, *, size=None, fill_value=None): _check_arraylike("unique", ar) if size is None: - ar = core.concrete_or_error(None, ar, "The error arose for the first argument of jnp.unique()") + ar = core.concrete_or_error(None, ar, + "The error arose for the first argument of jnp.unique(). " + UNIQUE_SIZE_HINT) else: - size = core.concrete_or_error(operator.index, size, "The error arose for the size argument of jnp.unique()") + size = core.concrete_or_error(operator.index, size, + "The error arose for the size argument of jnp.unique(). " + UNIQUE_SIZE_HINT) ar = asarray(ar) if axis is None: axis = 0
NonConcreteBooleanIndexError on call to jnp.unique I'm getting the following error: ``` /usr/local/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py in _expand_bool_indices(idx, shape) 3875 if not type(abstract_i) is ConcreteArray: 3876 # TODO(mattjj): improve this error by tracking _why_ the indices are not concrete -> 3877 raise errors.NonConcreteBooleanIndexError(abstract_i) 3878 elif _ndim(i) == 0: 3879 raise TypeError("JAX arrays do not support boolean scalar indices") NonConcreteBooleanIndexError: Array boolean indices must be concrete; got ShapedArray(bool[4897]) ``` as the result of a call to `jnp.unique`: ``` 202[ ]() """ [203](file:///var/repos/pie_live/research/projections/pitchers/stuff_proj.py?line=202) --> [204](file:///var/repos/pie_live/research/projections/pitchers/stuff_proj.py?line=203) ages_pred = jnp.unique(age_idx) ``` where `age_idx` is an int DeviceArray: ```python DeviceArray([24, 25, 26, ..., 6, 14, 10], dtype=int32) ``` Its not immediately clear why this error would be propagated in this context. Any ideas appreciated.
JAX JIT currently only support static shape, while return value of `unique` has a shape depends on input value. https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.unique.html > Because the size of the output of unique is data-dependent, the function is not typically compatible with JIT. The JAX version adds the optional size argument which must be specified statically for jnp.unique to be used within some of JAX’s transformations. This is working as intended, but we should definitely improve this error to more directly point to the documentation of the issue. Can you share the code that led to a `NonConcreteBooleanIndexError`? When I try this, I find a `ConcretizationTypeError`: ```python from jax import jit import jax.numpy as jnp jit(jnp.unique)(jnp.arange(5)) # ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(int32[5])>with<DynamicJaxprTrace(level=0/1)> # The error arose for the first argument of jnp.unique() # While tracing the function unique at /usr/local/lib/python3.7/dist-packages/jax/_src/numpy/lax_numpy.py:4215 for jit, this concrete value was not available in Python because it depends on the value of the argument 'ar'. ``` @jakevdp `mask` can be abstract while `ar` is concrete. https://github.com/google/jax/blob/902fc0c3d2b3ec9b6034c66074984386ec35606f/jax/_src/numpy/setops.py#L236-L247 The code is part of a much bigger model that I can't share, but I will try to create a smaller, reproducible example. Thanks - don't worry about the reproduction, I think I understand where it's coming from now, and I can work on improving the error message. The root cause is attempting to JIT-compile `jnp.unique`, which returns an array with data-dependent shape. If you want to use this within JIT, you'll need to statically specify the `size` argument to `jnp.unique`.
2022-06-08T22:04:33
google/jax
11,043
google__jax-11043
[ "11005" ]
acc7dc094ec1f5ff63359e530285a25e40c2c2ec
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py --- a/jax/_src/lax/lax.py +++ b/jax/_src/lax/lax.py @@ -983,6 +983,8 @@ def _get_monoid_reducer(monoid_op: Callable, return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_or elif monoid_op is bitwise_and and dtype == np.bool_: return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_and + elif monoid_op is bitwise_xor and dtype == np.bool_: + return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_xor elif monoid_op is max: return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_max elif monoid_op is min: @@ -1023,6 +1025,8 @@ def _reduce_or(operand: Array, axes: Sequence[int]) -> Array: def _reduce_and(operand: Array, axes: Sequence[int]) -> Array: return reduce_and_p.bind(operand, axes=tuple(axes)) +def _reduce_xor(operand: Array, axes: Sequence[int]) -> Array: + return reduce_xor_p.bind(operand, axes=tuple(axes)) def sort(operand: Union[Array, Sequence[Array]], dimension: int = -1, is_stable: bool = True, num_keys: int = 1) -> Union[Array, Tuple[Array, ...]]: @@ -3618,6 +3622,12 @@ def _reduce_logical_shape_rule(operand, *, axes): batching.defreducer(reduce_and_p) +reduce_xor_p = standard_primitive( + _reduce_logical_shape_rule, _fixed_dtype(np.bool_), 'reduce_xor', + weak_type_rule=_strip_weak_type) +batching.defreducer(reduce_xor_p) + + def _unary_reduce_lower(reducer, unit_factory, ctx, x, *, axes): aval_out, = ctx.avals_out dtype = aval_out.dtype @@ -3639,6 +3649,8 @@ def _unary_reduce_lower(reducer, unit_factory, ctx, x, *, axes): lambda dtype: np.array(False, dtype))) mlir.register_lowering(reduce_and_p, partial(_unary_reduce_lower, mhlo.AndOp, lambda dtype: np.array(True, dtype))) +mlir.register_lowering(reduce_xor_p, partial(_unary_reduce_lower, mhlo.XorOp, + lambda dtype: np.array(False, dtype))) mlir.register_lowering(reduce_min_p, partial(_unary_reduce_lower, mlir.min_mhlo, _get_min_identity)) mlir.register_lowering(reduce_max_p, partial(_unary_reduce_lower, mlir.max_mhlo, diff --git a/jax/experimental/jax2tf/jax2tf.py b/jax/experimental/jax2tf/jax2tf.py --- a/jax/experimental/jax2tf/jax2tf.py +++ b/jax/experimental/jax2tf/jax2tf.py @@ -981,6 +981,7 @@ def _unexpected_primitive(p: core.Primitive, *args, **kwargs): "igamma_grad_a", "random_gamma_grad", "reduce_precision", + "reduce_xor", "schur", "closed_call", "unreachable", diff --git a/jax/lax/__init__.py b/jax/lax/__init__.py --- a/jax/lax/__init__.py +++ b/jax/lax/__init__.py @@ -182,6 +182,7 @@ reduce_precision_p as reduce_precision_p, reduce_prod_p as reduce_prod_p, reduce_sum_p as reduce_sum_p, + reduce_xor_p as reduce_xor_p, regularized_incomplete_beta_p as regularized_incomplete_beta_p, rem as rem, rem_p as rem_p,
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1748,6 +1748,44 @@ def testReduce(self, op, init_val, shape, dtype, dims): args_maker = lambda: [rng(shape, dtype)] self._CompileAndCheck(fun, args_maker) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_op={}_shape={}_reducedims={}_initval={}_prim={}" + .format(op.__name__, shape, dims, init_val, prim), + "op": op, "init_val": init_val, "shape": shape, "dims": dims, "prim": prim} + for init_val, op , prim in [ + (True, lax.bitwise_and, jax.lax.reduce_and_p), + (False, lax.bitwise_or, jax.lax.reduce_or_p), + (False, lax.bitwise_xor, jax.lax.reduce_xor_p), + ] + for shape, dims in [ + [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)], + [(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)] + ])) + def testReduceBoolean(self, op, init_val, shape, dims, prim): + def reference_fun(operand, init_value): + np_op = getattr(np, op.__name__) + return np_op.reduce(operand, axis=dims, initial=init_val) + + dtype = np.bool_ + rng = jtu.rand_bool(self.rng()) + init_val = np.asarray(init_val, dtype=dtype) + fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims) + args_maker = lambda: [rng(shape, dtype), init_val] + self._CompileAndCheck(fun, args_maker) + self._CheckAgainstNumpy(reference_fun, fun, args_maker) + + # recheck with a static init_val + fun = lambda operand: lax.reduce(operand, init_val, op, dims) + reference_fun = partial(reference_fun, init_value=init_val) + args_maker = lambda: [rng(shape, dtype)] + self._CompileAndCheck(fun, args_maker) + self._CheckAgainstNumpy(reference_fun, fun, args_maker) + + # check that the correct monoid reducer primitive is used inside the + # jaxpr. This requires the init_val (monoid identity element) to be static + jaxpr = jax.make_jaxpr(fun)(rng(shape, dtype)) + self.assertEqual(jaxpr.eqns[0].primitive, prim) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_op={}.{}_arr_weak_type={}_init_weak_type={}" .format(op_namespace.__name__, op, arr_weak_type, init_weak_type),
Bitwise XOR missing as a `lax.reduce` operation The `lax.reduce` function includes bitwise AND and OR as possible reduction operations, but does not include XOR. Likewise, there is no specialized XOR-reduction function in JAX.
Thanks for the report – I think it would be reasonable to add a `reduce_xor` primitive, modeled after `reduce_or` and `reduce_and`. Are you interested in contributing? No -- I don't know how to add that. Are there bitwise AND and OR reductions already? Or are there only those reductions for logical operations? -- Jeremiah Willcock On Wed, Jun 8, 2022 at 10:57 AM Jake Vanderplas ***@***.***> wrote: > Thanks for the report – I think it would be reasonable to add a reduce_xor > primitive, modeled after reduce_or and reduce_and. Are you interested in > contributing? > > — > Reply to this email directly, view it on GitHub > <https://github.com/google/jax/issues/11005#issuecomment-1150223086>, or > unsubscribe > <https://github.com/notifications/unsubscribe-auth/AAX34KHHQ2MDWYQJWWKYVSLVODNHXANCNFSM5YBQ2K4Q> > . > You are receiving this because you authored the thread.Message ID: > ***@***.***> > Yes, there are bitwise AND and OR reductions currently. I may be confused, because I thought you had referred to the existing bitwise AND and OR reductions in your initial comment. Is there something I'm not understanding about your request? I was looking at _get_monoid_reducer in https://jax.readthedocs.io/en/latest/_modules/jax/_src/lax/lax.html#reduce; is that relevant to what reduction ops are available? -- Jeremiah Willcock On Wed, Jun 8, 2022 at 11:18 AM Jake Vanderplas ***@***.***> wrote: > Yes, there are bitwise AND and OR reductions currently. I may be confused, > because I thought you had referred to the existing bitwise AND and OR > reductions in your initial comment. Is there something I'm not > understanding about your request? > > — > Reply to this email directly, view it on GitHub > <https://github.com/google/jax/issues/11005#issuecomment-1150242523>, or > unsubscribe > <https://github.com/notifications/unsubscribe-auth/AAX34KH6PIZSBMAYWB5FO5TVODPZBANCNFSM5YBQ2K4Q> > . > You are receiving this because you authored the thread.Message ID: > ***@***.***> > Yes, that's relevant here. And you'll see the list of currently available specialized reducers here: https://github.com/google/jax/blob/879a94a05405a84d677d0ab049cfefe0169dc22b/jax/_src/lax/lax.py#L970-L990 Note that even without the specialization, you can currently compute an xor reduction manually: ```python from jax import lax import jax.numpy as jnp result = lax.reduce(jnp.arange(6, dtype='uint8'), jnp.uint8(0), lax.bitwise_xor, (0,)) print(result) # 1 ```
2022-06-09T18:46:23
google/jax
11,077
google__jax-11077
[ "10922" ]
2c30b0758f3e6ada868217dcac9219a5405a30aa
diff --git a/jax/experimental/ode.py b/jax/experimental/ode.py --- a/jax/experimental/ode.py +++ b/jax/experimental/ode.py @@ -142,7 +142,7 @@ def optimal_step_size(last_step, mean_error_ratio, safety=0.9, ifactor=10.0, jnp.maximum(mean_error_ratio**(-1.0 / order) * safety, dfactor)) return jnp.where(mean_error_ratio == 0, last_step * ifactor, last_step * factor) -def odeint(func, y0, t, *args, rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf): +def odeint(func, y0, t, *args, rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf, hmax=jnp.inf): """Adaptive stepsize (Dormand-Prince) Runge-Kutta odeint implementation. Args: @@ -157,6 +157,7 @@ def odeint(func, y0, t, *args, rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf): rtol: float, relative local error tolerance for solver (optional). atol: float, absolute local error tolerance for solver (optional). mxstep: int, maximum number of steps to take for each timepoint (optional). + hmax: float, maximum step size allowed (optional). Returns: Values of the solution `y` (i.e. integrated system values) at each time @@ -171,17 +172,17 @@ def odeint(func, y0, t, *args, rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf): raise TypeError(f"t must be an array of floats, but got {t}.") converted, consts = custom_derivatives.closure_convert(func, y0, t[0], *args) - return _odeint_wrapper(converted, rtol, atol, mxstep, y0, t, *args, *consts) + return _odeint_wrapper(converted, rtol, atol, mxstep, hmax, y0, t, *args, *consts) -@partial(jax.jit, static_argnums=(0, 1, 2, 3)) -def _odeint_wrapper(func, rtol, atol, mxstep, y0, ts, *args): +@partial(jax.jit, static_argnums=(0, 1, 2, 3, 4)) +def _odeint_wrapper(func, rtol, atol, mxstep, hmax, y0, ts, *args): y0, unravel = ravel_pytree(y0) func = ravel_first_arg(func, unravel) - out = _odeint(func, rtol, atol, mxstep, y0, ts, *args) + out = _odeint(func, rtol, atol, mxstep, hmax, y0, ts, *args) return jax.vmap(unravel)(out) -@partial(jax.custom_vjp, nondiff_argnums=(0, 1, 2, 3)) -def _odeint(func, rtol, atol, mxstep, y0, ts, *args): +@partial(jax.custom_vjp, nondiff_argnums=(0, 1, 2, 3, 4)) +def _odeint(func, rtol, atol, mxstep, hmax, y0, ts, *args): func_ = lambda y, t: func(y, t, *args) def scan_fun(carry, target_t): @@ -196,7 +197,7 @@ def body_fun(state): next_t = t + dt error_ratio = mean_error_ratio(next_y_error, rtol, atol, y, next_y) new_interp_coeff = interp_fit_dopri(y, next_y, k, dt) - dt = optimal_step_size(dt, error_ratio) + dt = jnp.clip(optimal_step_size(dt, error_ratio), a_min=0., a_max=hmax) new = [i + 1, next_y, next_f, next_t, dt, t, new_interp_coeff] old = [i + 1, y, f, t, dt, last_t, interp_coeff] @@ -209,17 +210,17 @@ def body_fun(state): return carry, y_target f0 = func_(y0, ts[0]) - dt = initial_step_size(func_, ts[0], y0, 4, rtol, atol, f0) + dt = jnp.clip(initial_step_size(func_, ts[0], y0, 4, rtol, atol, f0), a_min=0., a_max=hmax) interp_coeff = jnp.array([y0] * 5) init_carry = [y0, f0, ts[0], dt, ts[0], interp_coeff] _, ys = lax.scan(scan_fun, init_carry, ts[1:]) return jnp.concatenate((y0[None], ys)) -def _odeint_fwd(func, rtol, atol, mxstep, y0, ts, *args): - ys = _odeint(func, rtol, atol, mxstep, y0, ts, *args) +def _odeint_fwd(func, rtol, atol, mxstep, hmax, y0, ts, *args): + ys = _odeint(func, rtol, atol, mxstep, hmax, y0, ts, *args) return ys, (ys, ts, args) -def _odeint_rev(func, rtol, atol, mxstep, res, g): +def _odeint_rev(func, rtol, atol, mxstep, hmax, res, g): ys, ts, args = res def aug_dynamics(augmented_state, t, *args): @@ -244,7 +245,7 @@ def scan_fun(carry, i): _, y_bar, t0_bar, args_bar = odeint( aug_dynamics, (ys[i], y_bar, t0_bar, args_bar), jnp.array([-ts[i], -ts[i - 1]]), - *args, rtol=rtol, atol=atol, mxstep=mxstep) + *args, rtol=rtol, atol=atol, mxstep=mxstep, hmax=hmax) y_bar, t0_bar, args_bar = tree_map(op.itemgetter(1), (y_bar, t0_bar, args_bar)) # Add gradient from current output y_bar = y_bar + g[i - 1]
diff --git a/tests/ode_test.py b/tests/ode_test.py --- a/tests/ode_test.py +++ b/tests/ode_test.py @@ -250,6 +250,21 @@ def f(y0, ts, alpha): jtu.check_grads(f, (y0, ts, alpha), modes=["rev"], order=2, atol=tol, rtol=tol) + @jtu.skip_on_devices("tpu", "gpu") + def test_hmax(self): + """Test max step size control.""" + + def rhs(y, t): + return jnp.piecewise( + t, + [t <= 2., (t >= 5.) & (t <= 7.)], + [lambda s: jnp.array(1.), lambda s: jnp.array(-1.), lambda s: jnp.array(0.)] + ) + ys = odeint(func=rhs, y0=jnp.array(0.), t=jnp.array([0., 5., 10.]), hmax=1.) + + self.assertTrue(jnp.abs(ys[1] - 2.) < 1e-4) + self.assertTrue(jnp.abs(ys[2]) < 1e-4) + if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
Add odeint max step size optional argument It'd be nice if `odeint` had an optional argument analogous to scipy `solve_ivp`'s `max_step` argument, which controls the maximum step size. I'll copy a code example below, but I'm running into an issue with `odeint` in which it steps over a feature in the ODE `rhs` after a long-ish period for which `rhs=0.` One minor issue is that `max_step` unfortunately has a naming collision with `odeint`'s optional arg `mxstep`, which is an integer controlling the total number of iterations `odeint` can take before exiting, so this name may not be usable without changing `mxstep` which may not be desirable. Aside from choosing the argument name, I think implementing this is nearly trivial: just need to add the argument, and I believe change [line 199](https://github.com/google/jax/blob/bab8520d0c6c40aa3799b897a7f2629ba746c2ad/jax/experimental/ode.py#L199) to take the minimum of the max step size and the computed optimal step. The example I post below could be rolled into a unittest validating this behaviour (perhaps with comparison directly with scipy). Thoughts? Given how simple it is I'm happy to implement this myself, but would definitely defer to the core JAX developers for what argument name to use (or for any insight into why this might be an issue with other parts of `odeint`/JAX control flow). ## Motivating example ``` import jax jax.config.update("jax_enable_x64", True) jax.config.update('jax_platform_name', 'cpu') import jax.numpy as jnp from jax.experimental.ode import odeint dt = jnp.array(1.) k = 5 def deriv(t): t = jnp.array(t) return jnp.piecewise( t, [t <= 2 * dt, (t >= k * dt) & (t <= (k + 2) * dt)], [lambda s: jnp.array(1.), lambda s: jnp.array(-1.), lambda s: jnp.array(0.)] ) def rhs(y, t): return deriv(t) odeint( func=rhs, y0=jnp.array(0.), t=jnp.array([0., k*dt, 2*k*dt]), atol=1e-8, rtol=1e-8 ) ``` Using `odeint`, the above code is essentially integrating a function which is `1.` over the interval `[0., 2*dt]`, `-1.` over the interval `[k*dt, (k+1)*dt]`, and zero elsewhere. If integrated over an interval containing both non-zero intervals, the correct result should be `0.`. However , the above code outputs: ``` DeviceArray([0. , 1.99999565, 1.99999565], dtype=float64) ``` and changing the tolerance doesn't change this (it just yields better or worse approximations of `[0., 2., 2.]`) The analogous code in scipy using `solve_ivp` with `method='RK45'` yields the same results, but with `solve_ivp` this can be corrected by setting `max_step=dt`. Shortening the intermediate `0.` interval by either reducing `dt` or `k` both solve the problem as well, so I'm pretty confident it's an issue of step size overshoot. ### Slightly more context The above is a minimal example, but the similarities to the situation in which I'm using `odeint` and the above are: - I don't know ahead of time if a period of `0.` will be present or for how long. - I do however have something analogous to `dt` - basically a natural time scale over which to restrict the step so that features aren't missed, but that also isn't so short that it will totally mess with the efficiency of the solver.
Have a look at [Diffrax](https://github.com/patrick-kidger/diffrax), which supports this as: ```python diffeqsolve(..., stepsize_controller=PIDController(..., dtmax=...)) ``` @patrick-kidger we are actually in the process of adding `diffrax` as a dependency to the project in which this came up :) . In any case it would still be a nice feature to have in `odeint`
2022-06-13T00:21:30
google/jax
11,189
google__jax-11189
[ "1311" ]
cd11aeca8318d3e7c4eeecdf5e8e46ddadcdb60b
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -2279,10 +2279,10 @@ def jvp( >>> import jax >>> - >>> y, v = jax.jvp(jax.numpy.sin, (0.1,), (0.2,)) - >>> print(y) + >>> primals, tangents = jax.jvp(jax.numpy.sin, (0.1,), (0.2,)) + >>> print(primals) 0.09983342 - >>> print(v) + >>> print(tangents) 0.19900084 """ _check_callable(fun)
jvp docs names output v which is confuses with v in jvp [The docs for `jvp`](https://jax.readthedocs.io/en/latest/jax.html?highlight=jvp#jax.jvp) currently have an example where the output is called `y, v`: ``` >>> y, v = jax.jvp(jax.numpy.sin, (0.1,), (0.2,)) >>> print(y) 0.09983342 >>> print(v) 0.19900084 ``` this is confusing as it collides with the `v` in `jvp`. The output should be renamed to something useful. What's wrong with `primals, tangents_out` ?
2022-06-21T20:21:29
google/jax
11,215
google__jax-11215
[ "11164" ]
2744404809a85fb25ae3adaa621ee51bc6d2bf17
diff --git a/jax/_src/numpy/polynomial.py b/jax/_src/numpy/polynomial.py --- a/jax/_src/numpy/polynomial.py +++ b/jax/_src/numpy/polynomial.py @@ -19,86 +19,81 @@ from jax import core from jax import jit from jax import lax +from jax._src import dtypes from jax._src.numpy.lax_numpy import ( - all, arange, argmin, array, asarray, atleast_1d, concatenate, convolve, diag, dot, finfo, - full, hstack, maximum, ones, outer, sqrt, trim_zeros, trim_zeros_tol, true_divide, vander, zeros) + all, arange, argmin, array, asarray, atleast_1d, concatenate, convolve, diag, dot, + finfo, full, maximum, ones, outer, roll, sqrt, trim_zeros, trim_zeros_tol, true_divide, + vander, zeros) from jax._src.numpy import linalg -from jax._src.numpy.util import _check_arraylike, _promote_dtypes, _promote_dtypes_inexact, _wraps +from jax._src.numpy.util import _check_arraylike, _promote_dtypes, _promote_dtypes_inexact, _where, _wraps import numpy as np @jit def _roots_no_zeros(p): - # assume: p does not have leading zeros and has length > 1 - p, = _promote_dtypes_inexact(p) - # build companion matrix and find its eigenvalues (the roots) + if p.size < 2: + return array([], dtype=dtypes._to_complex_dtype(p.dtype)) A = diag(ones((p.size - 2,), p.dtype), -1) A = A.at[0, :].set(-p[1:] / p[0]) - roots = linalg.eigvals(A) - return roots + return linalg.eigvals(A) @jit -def _nonzero_range(arr): - # return start and end s.t. arr[:start] = 0 = arr[end:] padding zeros - is_zero = arr == 0 - start = argmin(is_zero) - end = is_zero.size - argmin(is_zero[::-1]) - return start, end +def _roots_with_zeros(p, num_leading_zeros): + # Avoid lapack errors when p is all zero + p = _where(len(p) == num_leading_zeros, 1.0, p) + # Roll any leading zeros to the end & compute the roots + roots = _roots_no_zeros(roll(p, -num_leading_zeros)) + # Sort zero roots to the end. + roots = lax.sort_key_val(roots == 0, roots)[1] + # Set roots associated with num_leading_zeros to NaN + return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan)) @_wraps(np.roots, lax_description="""\ -If the input polynomial coefficients of length n do not start with zero, -the polynomial is of degree n - 1 leading to n - 1 roots. -If the coefficients do have leading zeros, the polynomial they define -has a smaller degree and the number of roots (and thus the output shape) -is value dependent. - -The general implementation can therefore not be transformed with jit. -If the coefficients are guaranteed to have no leading zeros, use the -keyword argument `strip_zeros=False` to get a jit-compatible variant: - ->>> from functools import partial ->>> roots_unsafe = jax.jit(partial(jnp.roots, strip_zeros=False)) ->>> roots_unsafe([1, 2]) # ok -DeviceArray([-2.+0.j], dtype=complex64) ->>> roots_unsafe([0, 1, 2]) # problem -DeviceArray([nan+nanj, nan+nanj], dtype=complex64) ->>> jnp.roots([0, 1, 2]) # use the no-jit version instead +Unlike the numpy version of this function, the JAX version returns the roots in +a complex array regardless of the values of the roots. Additionally, the jax +version of this function adds the ``strip_zeros`` function which must be set to +False for the function to be compatible with JIT and other JAX transformations. +With ``strip_zeros=False``, if your coefficients have leading zeros, the +roots will be padded with NaN values: + +>>> coeffs = jnp.array([0, 1, 2]) + +# The default behavior matches numpy and strips leading zeros: +>>> jnp.roots(coeffs) DeviceArray([-2.+0.j], dtype=complex64) + +# With strip_zeros=False, extra roots are set to NaN: +>>> jnp.roots(coeffs, strip_zeros=False) +DeviceArray([-2. +0.j, nan+nanj], dtype=complex64) +""", +extra_params=""" +strip_zeros : bool, default=True + If set to True, then leading zeros in the coefficients will be stripped, similar + to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and + undefined roots will be represented by NaN values in the function output. + ``strip_zeros`` must be set to ``False`` for the function to be compatible with + :func:`jax.jit` and other JAX transformations. """) def roots(p, *, strip_zeros=True): - # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251 - p = atleast_1d(p) + _check_arraylike("roots", p) + p = atleast_1d(*_promote_dtypes_inexact(p)) if p.ndim != 1: raise ValueError("Input must be a rank-1 array.") - - # strip_zeros=False is unsafe because leading zeros aren't removed - if not strip_zeros: - if p.size > 1: - return _roots_no_zeros(p) - else: - return array([]) - - if all(p == 0): - return array([]) - - # factor out trivial roots - start, end = _nonzero_range(p) - # number of trailing zeros = number of roots at 0 - trailing_zeros = p.size - end - - # strip leading and trailing zeros - p = p[start:end] - if p.size < 2: - return zeros(trailing_zeros, p.dtype) + return array([], dtype=dtypes._to_complex_dtype(p.dtype)) + num_leading_zeros = _where(all(p == 0), len(p), argmin(p == 0)) + + if strip_zeros: + num_leading_zeros = core.concrete_or_error(int, num_leading_zeros, + "The error occurred in the jnp.roots() function. To use this within a " + "JIT-compiled context, pass strip_zeros=False, but be aware that leading zeros " + "will be result in some returned roots being set to NaN.") + return _roots_no_zeros(p[num_leading_zeros:]) else: - roots = _roots_no_zeros(p) - # combine roots and zero roots - roots = hstack((roots, zeros(trailing_zeros, roots.dtype))) - return roots + return _roots_with_zeros(p, num_leading_zeros) _POLYFIT_DOC = """\
diff --git a/tests/polynomial_test.py b/tests/polynomial_test.py --- a/tests/polynomial_test.py +++ b/tests/polynomial_test.py @@ -13,13 +13,14 @@ # limitations under the License. from functools import partial + import numpy as np -import unittest +from scipy.sparse import csgraph, csr_matrix from absl.testing import absltest from absl.testing import parameterized -from jax import jit +from jax._src import dtypes from jax import numpy as jnp from jax._src import test_util as jtu @@ -35,114 +36,98 @@ class TestPolynomial(jtu.JaxTestCase): + def assertSetsAllClose(self, x, y, rtol=None, atol=None, check_dtypes=True): + """Assert that x and y contain permutations of the same approximate set of values. + + For non-complex inputs, this is accomplished by comparing the sorted inputs. + For complex, such an approach can be confounded by numerical errors. In this case, + we compute the structural rank of the pairwise comparison matrix: if the structural + rank is full, it implies that the matrix can be permuted so that the diagonal is + non-zero, which implies a one-to-one approximate match between the permuted sets. + """ + x = np.asarray(x).ravel() + y = np.asarray(y).ravel() + + atol = max(jtu.tolerance(x.dtype, atol), jtu.tolerance(y.dtype, atol)) + rtol = max(jtu.tolerance(x.dtype, rtol), jtu.tolerance(y.dtype, rtol)) + + if not (np.issubdtype(x.dtype, np.complexfloating) or + np.issubdtype(y.dtype, np.complexfloating)): + return self.assertAllClose(np.sort(x), np.sort(y), atol=atol, rtol=rtol, + check_dtypes=check_dtypes) + + if check_dtypes: + self.assertEqual(x.dtype, y.dtype) + self.assertEqual(x.size, y.size) + + pairwise = np.isclose(x[:, None], x[None, :], + atol=atol, rtol=rtol, equal_nan=True) + rank = csgraph.structural_rank(csr_matrix(pairwise)) + self.assertEqual(rank, x.size) + + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_dtype={}_leading={}_trailing={}".format( jtu.format_shape_dtype_string((length+leading+trailing,), dtype), leading, trailing), "dtype": dtype, "length": length, "leading": leading, "trailing": trailing} for dtype in all_dtypes - for length in [0, 3, 9, 10, 17] - for leading in [0, 1, 2, 3, 5, 7, 10] - for trailing in [0, 1, 2, 3, 5, 7, 10])) + for length in [0, 3, 5] + for leading in [0, 2] + for trailing in [0, 2])) # TODO(phawkins): no nonsymmetric eigendecomposition implementation on GPU. @jtu.skip_on_devices("gpu", "tpu") def testRoots(self, dtype, length, leading, trailing): - # rng = jtu.rand_default(self.rng()) - # This test is very fragile and breaks unless a "good" random seed is chosen. - rng = jtu.rand_default(self.rng()) + rng = jtu.rand_some_zero(self.rng()) def args_maker(): p = rng((length,), dtype) - return jnp.concatenate( - [jnp.zeros(leading, p.dtype), p, jnp.zeros(trailing, p.dtype)]), - - jnp_fn = lambda arg: jnp.sort(jnp.roots(arg)) - np_fn = lambda arg: np.sort(np.roots(arg)) - self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, check_dtypes=False, - tol=3e-6) - - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_dtype={}_trailing={}".format( - jtu.format_shape_dtype_string((length+trailing,), dtype), trailing), - "dtype": dtype, "length": length, "trailing": trailing} - for dtype in all_dtypes - for length in [0, 1, 3, 10] - for trailing in [0, 1, 3, 7])) - # TODO(phawkins): no nonsymmetric eigendecomposition implementation on GPU. - @jtu.skip_on_devices("gpu", "tpu") - def testRootsNostrip(self, length, dtype, trailing): - # rng = jtu.rand_default(self.rng()) - # This test is very fragile and breaks unless a "good" random seed is chosen. - rng = jtu.rand_default(np.random.RandomState(0)) + return [jnp.concatenate( + [jnp.zeros(leading, p.dtype), p, jnp.zeros(trailing, p.dtype)])] - def args_maker(): - p = rng((length,), dtype) - if length != 0: - return jnp.concatenate([p, jnp.zeros(trailing, p.dtype)]), - else: - # adding trailing would make input invalid (start with zeros) - return p, + jnp_fun = jnp.roots + def np_fun(arg): + return np.roots(arg).astype(dtypes._to_complex_dtype(arg.dtype)) - jnp_fn = lambda arg: jnp.sort(jnp.roots(arg, strip_zeros=False)) - np_fn = lambda arg: np.sort(np.roots(arg)) - self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, - check_dtypes=False, tol=1e-6) + # Note: outputs have no defined order, so we need to use a special comparator. + args = args_maker() + np_roots = np_fun(*args) + jnp_roots = jnp_fun(*args) + self.assertSetsAllClose(np_roots, jnp_roots) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_dtype={}_trailing={}".format( - jtu.format_shape_dtype_string((length + trailing,), dtype), trailing), - "dtype": dtype, "length": length, "trailing": trailing} + {"testcase_name": "_dtype={}_leading={}_trailing={}".format( + jtu.format_shape_dtype_string((length+leading+trailing,), dtype), + leading, trailing), + "dtype": dtype, "length": length, "leading": leading, "trailing": trailing} for dtype in all_dtypes - for length in [0, 1, 3, 10] - for trailing in [0, 1, 3, 7])) - # TODO: enable when there is an eigendecomposition implementation - # for GPU/TPU. + for length in [0, 3, 5] + for leading in [0, 2] + for trailing in [0, 2])) + # TODO(phawkins): no nonsymmetric eigendecomposition implementation on GPU. @jtu.skip_on_devices("gpu", "tpu") - def testRootsJit(self, length, dtype, trailing): - # rng = jtu.rand_default(self.rng()) - # This test is very fragile and breaks unless a "good" random seed is chosen. - rng = jtu.rand_default(np.random.RandomState(0)) + def testRootsNoStrip(self, dtype, length, leading, trailing): + rng = jtu.rand_some_zero(self.rng()) def args_maker(): p = rng((length,), dtype) - if length != 0: - return jnp.concatenate([p, jnp.zeros(trailing, p.dtype)]), - else: - # adding trailing would make input invalid (start with zeros) - return p, - - roots_compiled = jit(partial(jnp.roots, strip_zeros=False)) - jnp_fn = lambda arg: jnp.sort(roots_compiled(arg)) - np_fn = lambda arg: np.sort(np.roots(arg)) - # Using strip_zeros=False makes the algorithm less efficient - # and leads to slightly different values compared ot numpy - self._CheckAgainstNumpy(np_fn, jnp_fn, args_maker, - check_dtypes=False, tol=1e-6) - - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_dtype={}_zeros={}_nonzeros={}".format( - jtu.format_shape_dtype_string((zeros+nonzeros,), dtype), - zeros, nonzeros), - "zeros": zeros, "nonzeros": nonzeros, "dtype": dtype} - for dtype in all_dtypes - for zeros in [1, 2, 5] - for nonzeros in [0, 3])) - @jtu.skip_on_devices("gpu") - @unittest.skip("getting segfaults on MKL") # TODO(#3711) - def testRootsInvalid(self, zeros, nonzeros, dtype): - rng = jtu.rand_default(self.rng()) - - # The polynomial coefficients here start with zero and would have to - # be stripped before computing eigenvalues of the companion matrix. - # Setting strip_zeros=False skips this check, - # allowing jit transformation but yielding nan's for these inputs. - p = jnp.concatenate([jnp.zeros(zeros, dtype), rng((nonzeros,), dtype)]) - - if p.size == 1: - # polynomial = const has no roots - self.assertTrue(jnp.roots(p, strip_zeros=False).size == 0) - else: - self.assertTrue(jnp.any(jnp.isnan(jnp.roots(p, strip_zeros=False)))) + return [jnp.concatenate( + [jnp.zeros(leading, p.dtype), p, jnp.zeros(trailing, p.dtype)])] + + jnp_fun = partial(jnp.roots, strip_zeros=False) + def np_fun(arg): + roots = np.roots(arg).astype(dtypes._to_complex_dtype(arg.dtype)) + if len(roots) < len(arg) - 1: + roots = np.pad(roots, (0, len(arg) - len(roots) - 1), + constant_values=complex(np.nan, np.nan)) + return roots + + # Note: outputs have no defined order, so we need to use a special comparator. + args = args_maker() + np_roots = np_fun(*args) + jnp_roots = jnp_fun(*args) + self.assertSetsAllClose(np_roots, jnp_roots) + self._CompileAndCheck(jnp_fun, args_maker) if __name__ == "__main__":
jnp.roots and other polynomial functions return inconsistent dtypes For example, here we get either complex outputs or default float depending on the size of the input: https://github.com/google/jax/blob/9b7cad1db96a06e28e9a95261bea10da45914bd3/jax/_src/numpy/polynomial.py#L79-L82 Here we get either the input dtype or complex output: https://github.com/google/jax/blob/9b7cad1db96a06e28e9a95261bea10da45914bd3/jax/_src/numpy/polynomial.py#L95-L101
It turns out numpy has the same dtype discrepancy here, although in numpy's case it's consistent with its behavior of casting outputs to float when the imaginary parts are all zero.
2022-06-22T19:46:36
google/jax
11,234
google__jax-11234
[ "2406" ]
7011de56ef6925f74720b5a30c520385b6af668d
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -4575,20 +4575,22 @@ def wrapped(*args, **kwargs): _scalar_types = (int, float, complex, np.generic) _accepted_binop_types = (int, float, complex, np.generic, np.ndarray, ndarray) +_rejected_binop_types = (list, tuple, set, dict) -def _defer_to_unrecognized_arg(binary_op): +def _defer_to_unrecognized_arg(opchar, binary_op, swap=False): # Ensure that other array types have the chance to override arithmetic. def deferring_binary_op(self, other): if hasattr(other, '__jax_array__'): other = other.__jax_array__() - if not isinstance(other, _accepted_binop_types): - return NotImplemented - return binary_op(self, other) + args = (other, self) if swap else (self, other) + if isinstance(other, _accepted_binop_types): + return binary_op(*args) + if isinstance(other, _rejected_binop_types): + raise TypeError(f"unsupported operand type(s) for {opchar}: " + f"{type(args[0]).__name__!r} and {type(args[1]).__name__!r}") + return NotImplemented return deferring_binary_op -def _swap_args(f): - return lambda x, y: f(y, x) - def _unimplemented_setitem(self, i, x): msg = ("'{}' object does not support item assignment. JAX arrays are " "immutable. Instead of ``x[idx] = y``, use ``x = x.at[idx].set(y)`` " @@ -4615,44 +4617,44 @@ def _deepcopy(self, memo): "deepcopy": _deepcopy, "neg": negative, "pos": positive, - "eq": _defer_to_unrecognized_arg(equal), - "ne": _defer_to_unrecognized_arg(not_equal), - "lt": _defer_to_unrecognized_arg(less), - "le": _defer_to_unrecognized_arg(less_equal), - "gt": _defer_to_unrecognized_arg(greater), - "ge": _defer_to_unrecognized_arg(greater_equal), + "eq": _defer_to_unrecognized_arg("==", equal), + "ne": _defer_to_unrecognized_arg("!=", not_equal), + "lt": _defer_to_unrecognized_arg("<", less), + "le": _defer_to_unrecognized_arg("<=", less_equal), + "gt": _defer_to_unrecognized_arg(">", greater), + "ge": _defer_to_unrecognized_arg(">=", greater_equal), "abs": abs, - "add": _defer_to_unrecognized_arg(add), - "radd": _defer_to_unrecognized_arg(add), - "sub": _defer_to_unrecognized_arg(subtract), - "rsub": _defer_to_unrecognized_arg(_swap_args(subtract)), - "mul": _defer_to_unrecognized_arg(multiply), - "rmul": _defer_to_unrecognized_arg(multiply), - "div": _defer_to_unrecognized_arg(divide), - "rdiv": _defer_to_unrecognized_arg(_swap_args(divide)), - "truediv": _defer_to_unrecognized_arg(true_divide), - "rtruediv": _defer_to_unrecognized_arg(_swap_args(true_divide)), - "floordiv": _defer_to_unrecognized_arg(floor_divide), - "rfloordiv": _defer_to_unrecognized_arg(_swap_args(floor_divide)), - "divmod": _defer_to_unrecognized_arg(divmod), - "rdivmod": _defer_to_unrecognized_arg(_swap_args(divmod)), - "mod": _defer_to_unrecognized_arg(mod), - "rmod": _defer_to_unrecognized_arg(_swap_args(mod)), - "pow": _defer_to_unrecognized_arg(power), - "rpow": _defer_to_unrecognized_arg(_swap_args(power)), - "matmul": _defer_to_unrecognized_arg(matmul), - "rmatmul": _defer_to_unrecognized_arg(_swap_args(matmul)), - "and": _defer_to_unrecognized_arg(bitwise_and), - "rand": _defer_to_unrecognized_arg(bitwise_and), - "or": _defer_to_unrecognized_arg(bitwise_or), - "ror": _defer_to_unrecognized_arg(bitwise_or), - "xor": _defer_to_unrecognized_arg(bitwise_xor), - "rxor": _defer_to_unrecognized_arg(bitwise_xor), + "add": _defer_to_unrecognized_arg("+", add), + "radd": _defer_to_unrecognized_arg("+", add, swap=True), + "sub": _defer_to_unrecognized_arg("-", subtract), + "rsub": _defer_to_unrecognized_arg("-", subtract, swap=True), + "mul": _defer_to_unrecognized_arg("*", multiply), + "rmul": _defer_to_unrecognized_arg("*", multiply, swap=True), + "div": _defer_to_unrecognized_arg("/", divide), + "rdiv": _defer_to_unrecognized_arg("/", divide, swap=True), + "truediv": _defer_to_unrecognized_arg("/", true_divide), + "rtruediv": _defer_to_unrecognized_arg("/", true_divide, swap=True), + "floordiv": _defer_to_unrecognized_arg("//", floor_divide), + "rfloordiv": _defer_to_unrecognized_arg("//", floor_divide, swap=True), + "divmod": _defer_to_unrecognized_arg("divmod", divmod), + "rdivmod": _defer_to_unrecognized_arg("divmod", divmod, swap=True), + "mod": _defer_to_unrecognized_arg("%", mod), + "rmod": _defer_to_unrecognized_arg("%", mod, swap=True), + "pow": _defer_to_unrecognized_arg("**", power), + "rpow": _defer_to_unrecognized_arg("**", power, swap=True), + "matmul": _defer_to_unrecognized_arg("@", matmul), + "rmatmul": _defer_to_unrecognized_arg("@", matmul, swap=True), + "and": _defer_to_unrecognized_arg("&", bitwise_and), + "rand": _defer_to_unrecognized_arg("&", bitwise_and, swap=True), + "or": _defer_to_unrecognized_arg("|", bitwise_or), + "ror": _defer_to_unrecognized_arg("|", bitwise_or, swap=True), + "xor": _defer_to_unrecognized_arg("^", bitwise_xor), + "rxor": _defer_to_unrecognized_arg("^", bitwise_xor, swap=True), "invert": bitwise_not, - "lshift": _defer_to_unrecognized_arg(left_shift), - "rshift": _defer_to_unrecognized_arg(right_shift), - "rlshift": _defer_to_unrecognized_arg(_swap_args(left_shift)), - "rrshift": _defer_to_unrecognized_arg(_swap_args(right_shift)), + "lshift": _defer_to_unrecognized_arg("<<", left_shift), + "rshift": _defer_to_unrecognized_arg(">>", right_shift), + "rlshift": _defer_to_unrecognized_arg("<<", left_shift, swap=True), + "rrshift": _defer_to_unrecognized_arg(">>", right_shift, swap=True), "round": _operator_round, }
diff --git a/tests/host_callback_test.py b/tests/host_callback_test.py --- a/tests/host_callback_test.py +++ b/tests/host_callback_test.py @@ -941,8 +941,8 @@ def func(x): identity=True transforms=() ] b - _:f32[] = mul c 2.00 - d:f32[] = mul 1.00 2.00 + _:f32[] = mul 2.00 c + d:f32[] = mul 2.00 1.00 e:f32[] = outside_call[ arg_treedef={treedef} callback=... @@ -960,8 +960,8 @@ def func(x): callback=... identity=True ] b - _:f32[] = mul c 2.00 - d:f32[] = mul 1.00 2.00 + _:f32[] = mul 2.00 c + d:f32[] = mul 2.00 1.00 e:f32[] = mul d 3.00 in (e,) }}""", jaxpr) assertMultiLineStrippedEqual(self, "", testing_stream.output) diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -641,6 +641,36 @@ def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes, with jtu.strict_promotion_if_dtypes_match(dtypes): self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": f"{rec.test_name}_{othertype}", "name": rec.name, "othertype": othertype} + for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2 + for othertype in [dict, list, tuple, set])) + def testOperatorOverloadErrors(self, name, othertype): + # Test that binary operators with builtin collections raise a TypeError + # and report the types in the correct order. + data = [(1, 2), (2, 3)] + arr = jnp.array(data) + other = othertype(data) + + msg = f"unsupported operand type.* 'DeviceArray' and '{othertype.__name__}'" + with self.assertRaisesRegex(TypeError, msg): + getattr(arr, name)(other) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": f"{rec.test_name}_{othertype}", "name": rec.name, "othertype": othertype} + for rec in JAX_RIGHT_OPERATOR_OVERLOADS if rec.nargs == 2 + for othertype in [dict, list, tuple, set])) + def testRightOperatorOverloadErrors(self, name, othertype): + # Test that binary operators with builtin collections raise a TypeError + # and report the types in the correct order. + data = [(1, 2), (2, 3)] + arr = jnp.array(data) + other = othertype(data) + + msg = f"unsupported operand type.* '{othertype.__name__}' and 'DeviceArray'" + with self.assertRaisesRegex(TypeError, msg): + getattr(arr, name)(other) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": rec.test_name + f"_{dtype}", "rng_factory": rec.rng_factory,
Behavior of `!=` operator is confusing when rhs is a list Compare: ```python for np in jnp, onp: print(np) print(np.array([1,2]) != [1,2]) print(np.array([1,2]) != [3,2]) print(np.array([1,2]) != np.array([1,2])) print(np.array([1,2]) != np.array([3,2])) ``` Ouptut: ``` <module 'jax.numpy' from '/usr/local/lib/python3.6/dist-packages/jax/numpy/__init__.py'> True True [False False] [ True False] <module 'numpy' from '/usr/local/lib/python3.6/dist-packages/numpy/__init__.py'> [False False] [ True False] [False False] [ True False] ``` The strange dynamically-shaped result results in some strange downstream exceptions when dropping in jax.numpy in place of numpy, such as: ```python print(any(onp.array([1,2]) != [1,2])) print(any(jnp.array([1,2]) != [1,2])) ``` Output: ``` False --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-18-e89704e72507> in <module>() 1 print(any(onp.array([1,2]) != [1,2])) ----> 2 print(any(jnp.array([1,2]) != [1,2])) TypeError: 'bool' object is not iterable ``` (Of course the real use-case is `some_fn_returns_array(x) != some_fn_returns_list(y)`)
I think this is an artifact of jax.numpy not promoting lists to arrays automatically like numpy does. We're okay with that behavior in general, but perhaps the comparison functions like `equal`, `not_equal`, `less`, etc. could promote here. The other alternative would be to at least raise rather than quietly returning `False`.
2022-06-24T00:00:47
google/jax
11,237
google__jax-11237
[ "8954" ]
6835dc18e3c2edd02df81db8d4b6f817ad636c3a
diff --git a/jax/_src/scipy/stats/kde.py b/jax/_src/scipy/stats/kde.py new file mode 100644 --- /dev/null +++ b/jax/_src/scipy/stats/kde.py @@ -0,0 +1,270 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from functools import partial +from typing import Any + +import numpy as np +import scipy.stats as osp_stats + +import jax.numpy as jnp +from jax import jit, lax, random, vmap +from jax._src.numpy.lax_numpy import _check_arraylike, _promote_dtypes_inexact +from jax._src.numpy.util import _wraps +from jax._src.tree_util import register_pytree_node_class +from jax.scipy import linalg, special + + +@_wraps(osp_stats.gaussian_kde, update_doc=False) +@register_pytree_node_class +@dataclass(frozen=True, init=False) +class gaussian_kde: + neff: Any + dataset: Any + weights: Any + covariance: Any + inv_cov: Any + + def __init__(self, dataset, bw_method=None, weights=None): + _check_arraylike("gaussian_kde", dataset) + dataset = jnp.atleast_2d(dataset) + if jnp.issubdtype(lax.dtype(dataset), jnp.complexfloating): + raise NotImplementedError("gaussian_kde does not support complex data") + if not dataset.size > 1: + raise ValueError("`dataset` input should have multiple elements.") + + d, n = dataset.shape + if weights is not None: + _check_arraylike("gaussian_kde", weights) + dataset, weights = _promote_dtypes_inexact(dataset, weights) + weights = jnp.atleast_1d(weights) + weights /= jnp.sum(weights) + if weights.ndim != 1: + raise ValueError("`weights` input should be one-dimensional.") + if len(weights) != n: + raise ValueError("`weights` input should be of length n") + else: + dataset, = _promote_dtypes_inexact(dataset) + weights = jnp.full(n, 1.0 / n, dtype=dataset.dtype) + + self._setattr("dataset", dataset) + self._setattr("weights", weights) + neff = self._setattr("neff", 1 / jnp.sum(weights**2)) + + bw_method = "scott" if bw_method is None else bw_method + if bw_method == "scott": + factor = jnp.power(neff, -1. / (d + 4)) + elif bw_method == "silverman": + factor = jnp.power(neff * (d + 2) / 4.0, -1. / (d + 4)) + elif jnp.isscalar(bw_method) and not isinstance(bw_method, str): + factor = bw_method + elif callable(bw_method): + factor = bw_method(self) + else: + raise ValueError( + "`bw_method` should be 'scott', 'silverman', a scalar, or a callable." + ) + + data_covariance = jnp.atleast_2d( + jnp.cov(dataset, rowvar=1, bias=False, aweights=weights)) + data_inv_cov = jnp.linalg.inv(data_covariance) + covariance = data_covariance * factor**2 + inv_cov = data_inv_cov / factor**2 + self._setattr("covariance", covariance) + self._setattr("inv_cov", inv_cov) + + def _setattr(self, name, value): + # Frozen dataclasses don't support setting attributes so we have to + # overload that operation here as they do in the dataclass implementation + object.__setattr__(self, name, value) + return value + + def tree_flatten(self): + return ((self.neff, self.dataset, self.weights, self.covariance, + self.inv_cov), None) + + @classmethod + def tree_unflatten(cls, aux_data, children): + del aux_data + kde = cls.__new__(cls) + kde._setattr("neff", children[0]) + kde._setattr("dataset", children[1]) + kde._setattr("weights", children[2]) + kde._setattr("covariance", children[3]) + kde._setattr("inv_cov", children[4]) + return kde + + @property + def d(self): + return self.dataset.shape[0] + + @property + def n(self): + return self.dataset.shape[1] + + @_wraps(osp_stats.gaussian_kde.evaluate, update_doc=False) + def evaluate(self, points): + _check_arraylike("evaluate", points) + points = self._reshape_points(points) + result = _gaussian_kernel_eval(False, self.dataset.T, self.weights[:, None], + points.T, self.inv_cov) + return result[:, 0] + + @_wraps(osp_stats.gaussian_kde.__call__, update_doc=False) + def __call__(self, points): + return self.evaluate(points) + + @_wraps(osp_stats.gaussian_kde.integrate_gaussian, update_doc=False) + def integrate_gaussian(self, mean, cov): + mean = jnp.atleast_1d(jnp.squeeze(mean)) + cov = jnp.atleast_2d(cov) + + if mean.shape != (self.d,): + raise ValueError("mean does not have dimension {}".format(self.d)) + if cov.shape != (self.d, self.d): + raise ValueError("covariance does not have dimension {}".format(self.d)) + + chol = linalg.cho_factor(self.covariance + cov) + norm = jnp.sqrt(2 * np.pi)**self.d * jnp.prod(jnp.diag(chol[0])) + norm = 1.0 / norm + return _gaussian_kernel_convolve(chol, norm, self.dataset, self.weights, + mean) + + @_wraps(osp_stats.gaussian_kde.integrate_box_1d, update_doc=False) + def integrate_box_1d(self, low, high): + if self.d != 1: + raise ValueError("integrate_box_1d() only handles 1D pdfs") + if jnp.ndim(low) != 0 or jnp.ndim(high) != 0: + raise ValueError( + "the limits of integration in integrate_box_1d must be scalars") + sigma = jnp.squeeze(jnp.sqrt(self.covariance)) + low = jnp.squeeze((low - self.dataset) / sigma) + high = jnp.squeeze((high - self.dataset) / sigma) + return jnp.sum(self.weights * (special.ndtr(high) - special.ndtr(low))) + + @_wraps(osp_stats.gaussian_kde.integrate_kde, update_doc=False) + def integrate_kde(self, other): + if other.d != self.d: + raise ValueError("KDEs are not the same dimensionality") + + chol = linalg.cho_factor(self.covariance + other.covariance) + norm = jnp.sqrt(2 * np.pi)**self.d * jnp.prod(jnp.diag(chol[0])) + norm = 1.0 / norm + + sm, lg = (self, other) if self.n < other.n else (other, self) + result = vmap(partial(_gaussian_kernel_convolve, chol, norm, lg.dataset, + lg.weights), + in_axes=1)(sm.dataset) + return jnp.sum(result * sm.weights) + + def resample(self, key, shape=()): + r"""Randomly sample a dataset from the estimated pdf + + Args: + key: a PRNG key used as the random key. + shape: optional, a tuple of nonnegative integers specifying the result + batch shape; that is, the prefix of the result shape excluding the last + axis. + + Returns: + The resampled dataset as an array with shape `(d,) + shape`. + """ + ind_key, eps_key = random.split(key) + ind = random.choice(ind_key, self.n, shape=shape, p=self.weights) + eps = random.multivariate_normal(eps_key, + jnp.zeros(self.d, self.covariance.dtype), + self.covariance, + shape=shape, + dtype=self.dataset.dtype).T + return self.dataset[:, ind] + eps + + @_wraps(osp_stats.gaussian_kde.pdf, update_doc=False) + def pdf(self, x): + return self.evaluate(x) + + @_wraps(osp_stats.gaussian_kde.logpdf, update_doc=False) + def logpdf(self, x): + _check_arraylike("logpdf", x) + x = self._reshape_points(x) + result = _gaussian_kernel_eval(True, self.dataset.T, self.weights[:, None], + x.T, self.inv_cov) + return result[:, 0] + + def integrate_box(self, low_bounds, high_bounds, maxpts=None): + """This method is not implemented in the JAX interface.""" + del low_bounds, high_bounds, maxpts + raise NotImplementedError( + "only 1D box integrations are supported; use `integrate_box_1d`") + + def set_bandwidth(self, bw_method=None): + """This method is not implemented in the JAX interface.""" + del bw_method + raise NotImplementedError( + "dynamically changing the bandwidth method is not supported") + + def _reshape_points(self, points): + if jnp.issubdtype(lax.dtype(points), jnp.complexfloating): + raise NotImplementedError( + "gaussian_kde does not support complex coordinates") + points = jnp.atleast_2d(points) + d, m = points.shape + if d != self.d: + if d == 1 and m == self.d: + points = jnp.reshape(points, (self.d, 1)) + else: + raise ValueError( + "points have dimension {}, dataset has dimension {}".format( + d, self.d)) + return points + + +def _gaussian_kernel_convolve(chol, norm, target, weights, mean): + diff = target - mean[:, None] + alpha = linalg.cho_solve(chol, diff) + arg = 0.5 * jnp.sum(diff * alpha, axis=0) + return norm * jnp.sum(jnp.exp(-arg) * weights) + + +@partial(jit, static_argnums=0) +def _gaussian_kernel_eval(in_log, points, values, xi, precision): + points, values, xi, precision = _promote_dtypes_inexact( + points, values, xi, precision) + d = points.shape[1] + + if xi.shape[1] != d: + raise ValueError("points and xi must have same trailing dim") + if precision.shape != (d, d): + raise ValueError("precision matrix must match data dims") + + whitening = linalg.cholesky(precision, lower=True) + points = jnp.dot(points, whitening) + xi = jnp.dot(xi, whitening) + log_norm = jnp.sum(jnp.log( + jnp.diag(whitening))) - 0.5 * d * jnp.log(2 * np.pi) + + def kernel(x_test, x_train, y_train): + arg = log_norm - 0.5 * jnp.sum(jnp.square(x_train - x_test)) + if in_log: + return jnp.log(y_train) + arg + else: + return y_train * jnp.exp(arg) + + reduce = special.logsumexp if in_log else jnp.sum + reduced_kernel = lambda x: reduce(vmap(kernel, in_axes=(None, 0, 0)) + (x, points, values), + axis=0) + mapped_kernel = vmap(reduced_kernel) + + return mapped_kernel(xi) diff --git a/jax/scipy/stats/__init__.py b/jax/scipy/stats/__init__.py --- a/jax/scipy/stats/__init__.py +++ b/jax/scipy/stats/__init__.py @@ -31,3 +31,4 @@ from jax.scipy.stats import chi2 as chi2 from jax.scipy.stats import betabinom as betabinom from jax.scipy.stats import gennorm as gennorm +from jax._src.scipy.stats.kde import gaussian_kde as gaussian_kde
diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py --- a/tests/scipy_stats_test.py +++ b/tests/scipy_stats_test.py @@ -13,6 +13,7 @@ # limitations under the License. +from functools import partial import itertools from absl.testing import absltest, parameterized @@ -21,7 +22,7 @@ import scipy.stats as osp_stats import jax -from jax._src import test_util as jtu +from jax._src import test_util as jtu, tree_util from jax.scipy import stats as lsp_stats from jax.scipy.special import expit @@ -33,12 +34,12 @@ def genNamedParametersNArgs(n): - return parameterized.named_parameters( - jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes), - "shapes": shapes, "dtypes": dtypes} - for shapes in itertools.combinations_with_replacement(all_shapes, n) - for dtypes in itertools.combinations_with_replacement(jtu.dtypes.floating, n))) + return parameterized.named_parameters( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes), + "shapes": shapes, "dtypes": dtypes} + for shapes in itertools.combinations_with_replacement(all_shapes, n) + for dtypes in itertools.combinations_with_replacement(jtu.dtypes.floating, n))) # Allow implicit rank promotion in these tests, as virtually every test exercises it. @@ -684,6 +685,209 @@ def testMultivariateNormalLogpdfBatch(self, ndim, nbatch, dtype): result2 = jax.vmap(lsp_stats.multivariate_normal.logpdf)(x, mean, cov) self.assertArraysEqual(result1, result2, check_dtypes=False) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_inshape={}_outsize={}_weights={}_method={}_func={}".format( + jtu.format_shape_dtype_string(inshape, dtype), + outsize, weights, method, func), + "dtype": dtype, + "inshape": inshape, + "outsize": outsize, + "weights": weights, + "method": method, + "func": func} + for inshape in [(50,), (3, 50), (2, 12)] + for dtype in jtu.dtypes.floating + for outsize in [None, 10] + for weights in [False, True] + for method in [None, "scott", "silverman", 1.5, "callable"] + for func in [None, "evaluate", "logpdf", "pdf"])) + def testKde(self, inshape, dtype, outsize, weights, method, func): + if method == "callable": + method = lambda kde: jax.numpy.power(kde.neff, -1./(kde.d+4)) + + def scipy_fun(dataset, points, w): + w = np.abs(w) if weights else None + kde = osp_stats.gaussian_kde(dataset, bw_method=method, weights=w) + if func is None: + result = kde(points) + else: + result = getattr(kde, func)(points) + # Note: the scipy implementation _always_ returns float64 + return result.astype(dtype) + + def lax_fun(dataset, points, w): + w = jax.numpy.abs(w) if weights else None + kde = lsp_stats.gaussian_kde(dataset, bw_method=method, weights=w) + if func is None: + result = kde(points) + else: + result = getattr(kde, func)(points) + return result + + if outsize is None: + outshape = inshape + else: + outshape = inshape[:-1] + (outsize,) + rng = jtu.rand_default(self.rng()) + args_maker = lambda: [ + rng(inshape, dtype), rng(outshape, dtype), rng(inshape[-1:], dtype)] + self._CheckAgainstNumpy( + scipy_fun, lax_fun, args_maker, tol={ + np.float32: 1e-2 if jtu.device_under_test() == "tpu" else 1e-3, + np.float64: 1e-14 + }) + self._CompileAndCheck( + lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15}) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("", [shape], [dtype]), + "dtype": dtype, + "shape": shape} + for shape in [(15,), (3, 15), (1, 12)] + for dtype in jtu.dtypes.floating)) + def testKdeIntegrateGaussian(self, shape, dtype): + def scipy_fun(dataset, weights): + kde = osp_stats.gaussian_kde(dataset, weights=np.abs(weights)) + # Note: the scipy implementation _always_ returns float64 + return kde.integrate_gaussian(mean, covariance).astype(dtype) + + def lax_fun(dataset, weights): + kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights)) + return kde.integrate_gaussian(mean, covariance) + + # Construct a random mean and positive definite covariance matrix + rng = jtu.rand_default(self.rng()) + ndim = shape[0] if len(shape) > 1 else 1 + mean = rng(ndim, dtype) + L = rng((ndim, ndim), dtype) + L[np.triu_indices(ndim, 1)] = 0.0 + L[np.diag_indices(ndim)] = np.exp(np.diag(L)) + 0.01 + covariance = L @ L.T + + args_maker = lambda: [ + rng(shape, dtype), rng(shape[-1:], dtype)] + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, + tol={np.float32: 1e-3, np.float64: 1e-14}) + self._CompileAndCheck( + lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15}) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("", [shape], [dtype]), + "dtype": dtype, + "shape": shape} + for shape in [(15,), (12,)] + for dtype in jtu.dtypes.floating)) + def testKdeIntegrateBox1d(self, shape, dtype): + def scipy_fun(dataset, weights): + kde = osp_stats.gaussian_kde(dataset, weights=np.abs(weights)) + # Note: the scipy implementation _always_ returns float64 + return kde.integrate_box_1d(-0.5, 1.5).astype(dtype) + + def lax_fun(dataset, weights): + kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights)) + return kde.integrate_box_1d(-0.5, 1.5) + + rng = jtu.rand_default(self.rng()) + args_maker = lambda: [ + rng(shape, dtype), rng(shape[-1:], dtype)] + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, + tol={np.float32: 1e-3, np.float64: 1e-14}) + self._CompileAndCheck( + lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15}) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("", [shape], [dtype]), + "dtype": dtype, + "shape": shape} + for shape in [(15,), (3, 15), (1, 12)] + for dtype in jtu.dtypes.floating)) + def testKdeIntegrateKde(self, shape, dtype): + def scipy_fun(dataset, weights): + kde = osp_stats.gaussian_kde(dataset, weights=np.abs(weights)) + other = osp_stats.gaussian_kde( + dataset[..., :-3] + 0.1, weights=np.abs(weights[:-3])) + # Note: the scipy implementation _always_ returns float64 + return kde.integrate_kde(other).astype(dtype) + + def lax_fun(dataset, weights): + kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights)) + other = lsp_stats.gaussian_kde( + dataset[..., :-3] + 0.1, weights=jax.numpy.abs(weights[:-3])) + return kde.integrate_kde(other) + + rng = jtu.rand_default(self.rng()) + args_maker = lambda: [ + rng(shape, dtype), rng(shape[-1:], dtype)] + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, + tol={np.float32: 1e-3, np.float64: 1e-14}) + self._CompileAndCheck( + lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15}) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("", [shape], [dtype]), + "dtype": dtype, + "shape": shape} + for shape in [(15,), (3, 15), (1, 12)] + for dtype in jtu.dtypes.floating)) + def testKdeResampleShape(self, shape, dtype): + def resample(key, dataset, weights, *, shape): + kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights)) + return kde.resample(key, shape=shape) + + rng = jtu.rand_default(self.rng()) + args_maker = lambda: [ + jax.random.PRNGKey(0), rng(shape, dtype), rng(shape[-1:], dtype)] + + ndim = shape[0] if len(shape) > 1 else 1 + + args = args_maker() + func = partial(resample, shape=()) + self._CompileAndCheck( + func, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15}) + result = func(*args) + assert result.shape == (ndim,) + + func = partial(resample, shape=(4,)) + self._CompileAndCheck( + func, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15}) + result = func(*args) + assert result.shape == (ndim, 4) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("", [shape], [dtype]), + "dtype": dtype, + "shape": shape} + for shape in [(15,), (1, 12)] + for dtype in jtu.dtypes.floating)) + def testKdeResample1d(self, shape, dtype): + rng = jtu.rand_default(self.rng()) + dataset = rng(shape, dtype) + weights = jax.numpy.abs(rng(shape[-1:], dtype)) + kde = lsp_stats.gaussian_kde(dataset, weights=weights) + samples = jax.numpy.squeeze(kde.resample(jax.random.PRNGKey(5), shape=(1000,))) + + def cdf(x): + result = jax.vmap(partial(kde.integrate_box_1d, -np.inf))(x) + # Manually casting to numpy in order to avoid type promotion error + return np.array(result) + + self.assertGreater(osp_stats.kstest(samples, cdf).pvalue, 0.01) + + def testKdePyTree(self): + @jax.jit + def evaluate_kde(kde, x): + return kde.evaluate(x) + + dtype = np.float32 + rng = jtu.rand_default(self.rng()) + dataset = rng((3, 15), dtype) + x = rng((3, 12), dtype) + kde = lsp_stats.gaussian_kde(dataset) + leaves, treedef = tree_util.tree_flatten(kde) + kde2 = tree_util.tree_unflatten(treedef, leaves) + tree_util.tree_map(lambda a, b: self.assertAllClose(a, b), kde, kde2) + self.assertAllClose(evaluate_kde(kde, x), kde.evaluate(x)) if __name__ == "__main__": - absltest.main(testLoader=jtu.JaxTestLoader()) + absltest.main(testLoader=jtu.JaxTestLoader())
Please consider implementing scipy.stats.gaussian_kde Would it be possible to add [`scipy.stats.gaussian_kde`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gaussian_kde.html) to Jax? I use this function to generate plots, but it's very slow. If I could apply Jax's awesome jit to it, it could run much faster. Looking at the code https://github.com/scipy/scipy/blob/master/scipy/stats/_kde.py and https://github.com/scipy/scipy/blob/master/scipy/stats/_stats.pyx#L693, it doesn't seem too crazy as most of the peripheral functions are already in Jax.
It would definitely be possible. The tricky part of making it efficient in JAX would be to figure out how to take advantage of XLA operations for the convolution-like step (the doubly-nested for-loop in the Cython implementation). It doesn't map to an XLA convolution because the points are not grid-aligned. @jakevdp Yup, I was looking at that and thinking I don't know how to make that efficient in Jax :smile: I implemented one possible approach namely mapping over the elements of the input array (using `vmap`). Maybe this is sufficient if one also implements a simple heuristic to switch to mapping over the datapoints c.f. with [SciPy's `gaussian_kde.logpdf`](https://github.com/scipy/scipy/blob/25b3166c62909bb9d220665c43f960dd11d74967/scipy/stats/_kde.py#L603-L620). Here is a link to my implementation in case it should be of interest https://gist.github.com/Edenhofer/0f4947e4bf02cb2bcb3af7204ed2e626 . With these helper functions it should be trivial to implement SciPy's API. If there is general interest in tidying this up in a merge request, I would also have some tests lying around which I am happy to share as well. Compared to SciPy it should be pretty fast and most importantly for me it is differentiable. P.S. Feel free to use the code with whatever license you want.
2022-06-24T02:00:53
google/jax
11,255
google__jax-11255
[ "11254" ]
989a3304bfb046d8d2652ccb6f743d2015194ee0
diff --git a/jax/_src/config.py b/jax/_src/config.py --- a/jax/_src/config.py +++ b/jax/_src/config.py @@ -175,18 +175,18 @@ def parse_flags_with_absl(self): if not FLAGS.jax_omnistaging: raise Exception( "Disabling of omnistaging is no longer supported in JAX version 0.2.12 and higher: " - "see https://github.com/google/jax/blob/main/design_notes/omnistaging.md.\n" + "see https://github.com/google/jax/blob/main/docs/design_notes/omnistaging.md.\n" "To remove this warning, unset the JAX_OMNISTAGING environment variable.") def enable_omnistaging(self): warnings.warn( "enable_omnistaging() is a no-op in JAX versions 0.2.12 and higher;\n" - "see https://github.com/google/jax/blob/main/design_notes/omnistaging.md") + "see https://github.com/google/jax/blob/main/docs/design_notes/omnistaging.md") def disable_omnistaging(self): raise Exception( "Disabling of omnistaging is no longer supported in JAX version 0.2.12 and higher: " - "see https://github.com/google/jax/blob/main/design_notes/omnistaging.md.") + "see https://github.com/google/jax/blob/main/docs/design_notes/omnistaging.md.") def define_bool_state( self, name: str, default: bool, help: str, *,
Broken links after moved design_notes folder Looks like the `design_notes` folder was moved from the top level into `docs/` some time ago, but there are still a handful of broken links around from this change. Example of one: https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#JAX-PRNG The link to the design of JAX's prng is broken.
2022-06-24T19:35:14
google/jax
11,287
google__jax-11287
[ "11285" ]
4fd6733049fe76472f46e356a802238924b8eb98
diff --git a/jax/_src/random.py b/jax/_src/random.py --- a/jax/_src/random.py +++ b/jax/_src/random.py @@ -30,7 +30,7 @@ from jax._src.api import jit, vmap from jax._src.lax import lax as lax_internal from jax._src.lib import xla_bridge -from jax._src.numpy.lax_numpy import _arraylike, _check_arraylike, _convert_and_clip_integer +from jax._src.numpy.lax_numpy import _arraylike, _check_arraylike, _convert_and_clip_integer, _promote_dtypes_inexact from jax.numpy.linalg import cholesky, svd, eigh from jax.interpreters import ad from jax.interpreters import batching @@ -492,15 +492,17 @@ def choice(key: KeyArray, slices = (slice(None),) * axis + (slice(n_draws),) result = permutation(key, a, axis)[slices] else: + _check_arraylike("choice", p) + p, = _promote_dtypes_inexact(p) if p.shape != (n_inputs,): raise ValueError("p must be None or match the shape of a") if replace: p_cuml = jnp.cumsum(p) - r = p_cuml[-1] * (1 - uniform(key, shape)) + r = p_cuml[-1] * (1 - uniform(key, shape, dtype=p_cuml.dtype)) ind = jnp.searchsorted(p_cuml, r) else: # Gumbel top-k trick: https://timvieira.github.io/blog/post/2019/09/16/algorithms-for-sampling-without-replacement/ - g = -gumbel(key, (n_inputs,)) - jnp.log(p) + g = -gumbel(key, (n_inputs,), dtype=p.dtype) - jnp.log(p) ind = jnp.argsort(g)[:n_draws] result = ind if np.ndim(a) == 0 else jnp.take(a, ind, axis)
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -28,12 +28,12 @@ import jax from jax import core -from jax import dtypes from jax import grad from jax import lax from jax import numpy as jnp from jax import prng from jax import random +from jax._src import dtypes from jax._src import test_util as jtu from jax import vmap from jax.interpreters import xla @@ -639,6 +639,7 @@ def testShuffle(self, dtype): def testChoice(self, dtype, input_range_or_shape, shape, replace, weighted, axis): # This is the function API that we test against (note that self.rng().choice differs) np_choice = np.random.default_rng(0).choice + p_dtype = dtypes._to_inexact_dtype(dtype) key = self.seed_prng(0) is_range = type(input_range_or_shape) is int @@ -646,7 +647,11 @@ def testChoice(self, dtype, input_range_or_shape, shape, replace, weighted, axis self.rng().permutation(np.arange(np.prod( input_range_or_shape), dtype=dtype)).reshape(input_range_or_shape)) N = x if is_range else x.shape[axis] - p = None if not weighted else (np.arange(N) + 1) / np.sum(np.arange(N) + 1) + if weighted: + p = np.arange(N, dtype=p_dtype) + 1 + p /= p.sum() + else: + p = None rand = lambda key, x: random.choice(key, x, shape, replace, p, axis) sample = rand(key, x) if not is_range:
Incorrect type promotion in random.choice As discovered in #11237, `random.choice` throws a type promotion error when x64 is enabled and `p` is `float32`. The following demonstrates the issue. Executing: ```python import jax import jax.numpy as jnp jax.config.update("jax_enable_x64", True) with jax.numpy_dtype_promotion("strict"): jax.random.choice(jax.random.PRNGKey(0), 5, p=jnp.ones(5, dtype=jnp.float32)) ``` results in the following type promotion error: <details> <summary>Traceback</summary> ```python Traceback (most recent call last): File "<stdin>", line 2, in <module> File "/Users/dforemanmackey/src/google/jax/jax/_src/random.py", line 499, in choice r = p_cuml[-1] * (1 - uniform(key, shape)) File "/Users/dforemanmackey/src/google/jax/jax/_src/numpy/lax_numpy.py", line 4586, in deferring_binary_op return binary_op(self, other) File "/Users/dforemanmackey/src/google/jax/jax/_src/numpy/ufuncs.py", line 80, in fn x1, x2 = _promote_args(numpy_fn.__name__, x1, x2) File "/Users/dforemanmackey/src/google/jax/jax/_src/numpy/util.py", line 332, in _promote_args return _promote_shapes(fun_name, *_promote_dtypes(*args)) File "/Users/dforemanmackey/src/google/jax/jax/_src/numpy/util.py", line 262, in _promote_dtypes to_dtype, weak_type = dtypes._lattice_result_type(*args) jax._src.dtypes.TypePromotionError: Input dtypes ('float32', 'float64') have no available implicit dtype promotion path when jax_numpy_dtype_promotion=strict. Try explicitly casting inputs to the desired output type, or set jax_numpy_dtype_promotion=standard. ``` </details> The offending line is: https://github.com/google/jax/blob/de464fcf22c8bf7a2931182f8095bc01530df9fe/jax/_src/random.py#L499 And I think the fix is as simple as: ```diff - r = p_cuml[-1] * (1 - uniform(key, shape)) + r = p_cuml[-1] * (1 - uniform(key, shape, dtype=p_cuml.dtype)) ``` But I haven't checked in detail!
Good catch - thanks!
2022-06-28T15:42:12
google/jax
11,307
google__jax-11307
[ "6286" ]
637bb619155b0e501f306eb1fe4fd7728d986b5f
diff --git a/jax/_src/scipy/optimize/bfgs.py b/jax/_src/scipy/optimize/bfgs.py --- a/jax/_src/scipy/optimize/bfgs.py +++ b/jax/_src/scipy/optimize/bfgs.py @@ -144,7 +144,7 @@ def body_fun(state): rho_k = jnp.reciprocal(_dot(y_k, s_k)) sy_k = s_k[:, jnp.newaxis] * y_k[jnp.newaxis, :] - w = jnp.eye(d) - rho_k * sy_k + w = jnp.eye(d, dtype=rho_k.dtype) - rho_k * sy_k H_kp1 = (_einsum('ij,jk,lk', w, state.H_k, w) + rho_k * s_k[:, jnp.newaxis] * s_k[jnp.newaxis, :]) H_kp1 = jnp.where(jnp.isfinite(rho_k), H_kp1, state.H_k)
diff --git a/tests/scipy_optimize_test.py b/tests/scipy_optimize_test.py --- a/tests/scipy_optimize_test.py +++ b/tests/scipy_optimize_test.py @@ -71,8 +71,8 @@ class TestBFGS(jtu.JaxTestCase): {"testcase_name": f"_func={func_and_init[0].__name__}_maxiter={maxiter}", "maxiter": maxiter, "func_and_init": func_and_init} for maxiter in [None] - for func_and_init in [(rosenbrock, np.zeros(2)), - (himmelblau, np.ones(2)), + for func_and_init in [(rosenbrock, np.zeros(2, dtype='float32')), + (himmelblau, np.ones(2, dtype='float32')), (matyas, np.ones(2) * 6.), (eggholder, np.ones(2) * 100.)])) def test_minimize(self, maxiter, func_and_init):
Line-search x64 type promotion bug There seems to be a bug in the line-search when enabling x64 mode but optimizing a purely float32 function. ```python import jax.numpy as jnp import jax.scipy.optimize jax.config.update("jax_enable_x64", True) def f(x): return jnp.sum(x ** 2) x0 = jnp.zeros(2, dtype=jnp.float32) jax.scipy.optimize.minimize(f, x0, method='BFGS') ``` ``` TypeError: body_fun output and input must have identical types, got _ZoomState(done=ShapedArray(bool[]), failed=ShapedArray(bool[]), j=ShapedArray(int64[], weak_type=True), a_lo=ShapedArray(float64[]), phi_lo=ShapedArray(float64[]), dphi_lo=ShapedArray(float64[]), a_hi=ShapedArray(float64[]), phi_hi=ShapedArray(float64[]), dphi_hi=ShapedArray(float64[]), a_rec=ShapedArray(float64[]), phi_rec=ShapedArray(float64[]), a_star=ShapedArray(float64[]), phi_star=ShapedArray(float64[]), dphi_star=ShapedArray(float64[]), g_star=ShapedArray(float64[2]), nfev=ShapedArray(int64[], weak_type=True), ngev=ShapedArray(int64[], weak_type=True)) and _ZoomState(done=ShapedArray(bool[], weak_type=True), failed=ShapedArray(bool[], weak_type=True), j=ShapedArray(int64[], weak_type=True), a_lo=ShapedArray(float64[], weak_type=True), phi_lo=ShapedArray(float32[]), dphi_lo=ShapedArray(float64[]), a_hi=ShapedArray(float64[], weak_type=True), phi_hi=ShapedArray(float64[]), dphi_hi=ShapedArray(float64[]), a_rec=ShapedArray(float64[], weak_type=True), phi_rec=ShapedArray(float64[]), a_star=ShapedArray(float64[], weak_type=True), phi_star=ShapedArray(float32[]), dphi_star=ShapedArray(float64[]), g_star=ShapedArray(float32[2]), nfev=ShapedArray(int64[], weak_type=True), ngev=ShapedArray(int64[], weak_type=True)). ``` -> `g_star` type differs Is this expected behavior or a bug?
This looks like a bug - thanks for the report!
2022-06-29T16:48:17
google/jax
11,308
google__jax-11308
[ "8046" ]
637bb619155b0e501f306eb1fe4fd7728d986b5f
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -4553,6 +4553,10 @@ def _view(arr, dtype=None, type=None): return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype) return lax.bitcast_convert_type(arr_bytes, dtype) +def _notimplemented_flat(self): + raise NotImplementedError("JAX DeviceArrays do not implement the arr.flat property: " + "consider arr.flatten() instead.") + ### track unimplemented functions _NOT_IMPLEMENTED_DESC = """ @@ -5010,6 +5014,7 @@ def _set_shaped_array_attributes(shaped_array): setattr(shaped_array, "reshape", core.aval_method(_reshape)) setattr(shaped_array, "transpose", core.aval_method(_transpose)) setattr(shaped_array, "flatten", core.aval_method(ravel)) + setattr(shaped_array, "flat", core.aval_property(_notimplemented_flat)) setattr(shaped_array, "T", core.aval_property(transpose)) setattr(shaped_array, "real", core.aval_property(real)) setattr(shaped_array, "imag", core.aval_property(imag)) @@ -5043,6 +5048,7 @@ def _set_device_array_base_attributes(device_array): setattr(device_array, "reshape", _reshape) setattr(device_array, "transpose", _transpose) setattr(device_array, "flatten", ravel) + setattr(device_array, "flat", property(_notimplemented_flat)) setattr(device_array, "T", property(transpose)) setattr(device_array, "real", property(real)) setattr(device_array, "imag", property(imag))
Implement `.flat` on JAX arrays JAX arrays are missing the `.flat` property: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flat.html We should add it!
Just a note on this: the full `np.flatiter` API is more than just an iterator, it also supports advanced indexing operations. For example: ```python In [1]: import numpy as np In [2]: x = np.arange(5) In [3]: mask = x > 2 In [4]: x.flat[mask] Out[4]: array([3, 4]) In [5]: x.flat[[1, 2, 3]] Out[5]: array([1, 2, 3]) ``` I think the implementation of this is going to have some subtleties, particularly with regard to JIT lowering and batching. The easiest way to tick all the boxes might be to do `flat = property(ravel)` +1 for `flat = property(ravel)`. I don't think we want to make any attempt at implementing NumPy's iterator API.
2022-06-29T17:12:08
google/jax
11,311
google__jax-11311
[ "8883" ]
7d637d15e40dac827d776e526f8c8e86f10097f5
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -1593,10 +1593,15 @@ def _get_axis_size(name: str, shape: Tuple[int, ...], axis: int): # in which case we can produce an error message based on argument indices, # or if it has nested containers. if kws: - # if keyword arguments are included in the tree, we make adapt the error + position_only_tree, leaf = treedef_children(tree) + if not treedef_is_leaf(leaf): + sizes = [x.shape[d] if d is not None else None for x, d in zip(vals, dims)] + sizes = tree_unflatten(tree, sizes) + raise ValueError(msg.format(f"the tree of axis sizes is:\n{sizes}")) from None + # if keyword arguments are included in the tree, we adapt the error # message only to be about the positional arguments - tree, leaf = treedef_children(tree) - assert treedef_is_leaf(leaf) + tree = position_only_tree + # TODO(mattjj,phawkins): add a way to inspect pytree kind more directly if tree == tree_flatten((0,) * tree.num_leaves)[1]: lines1 = [f"arg {i} has shape {np.shape(x)} and axis {d} is to be mapped"
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -2586,6 +2586,18 @@ def test_vmap_unbatched_object_passthrough_issue_183(self): ans = vfun(lambda x: x + 1, jnp.arange(3)) self.assertAllClose(ans, np.arange(1, 4), check_dtypes=False) + def test_vmap_mismatched_keyword(self): + # https://github.com/google/jax/issues/10193 + @jax.vmap + def f(x, y): + return x + y + + with self.assertRaisesRegex( + ValueError, "vmap got inconsistent sizes for array axes to be mapped:\n" + "the tree of axis sizes is:\n" + r"\(\(1,\), \{'y': 2\}\)"): + f(jnp.array([1]), y=jnp.array([1, 2])) + def test_vmap_mismatched_axis_sizes_error_message_issue_705(self): # https://github.com/google/jax/issues/705 def h(a, b):
hit assertion error in api._get_axis_size A user hit [this assertion](https://github.com/google/jax/blob/f6f6b4f04cb1cfb64d531eb41172e1783c57fe47/jax/_src/api.py#L1562). We haven't been able to make a minimal repro yet.
2022-06-29T20:57:57
google/jax
11,397
google__jax-11397
[ "11396" ]
7da733f94bc76bcc8ba8aeecec6f0651d5982763
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -2516,6 +2516,8 @@ def diag_indices_from(arr): def diagonal(a, offset=0, axis1: int = 0, axis2: int = 1): _check_arraylike("diagonal", a) a_shape = shape(a) + if ndim(a) < 2: + raise ValueError("diagonal requires an array of at least two dimensions.") offset = core.concrete_or_error(operator.index, offset, "'offset' argument of jnp.diagonal()") a = moveaxis(a, (axis1, axis2), (-2, -1))
improve error message for jnp.diagonal ```python import jax.numpy as jnp import numpy as np M = jnp.ones(5) ``` Then, `np.diagonal(M)` returns a clear error message `ValueError: diag requires an array of at least two dimensions` whereas `jnp.diagonal(M)` returns a still-informative-but-less-clear `ValueError: axis 1 is out of bounds for array of dimension 1`. Could we possibly change this behavior?
Thanks for the report - we should be able to improve that message.
2022-07-07T16:12:42
google/jax
11,469
google__jax-11469
[ "11466" ]
3eff9d11d2210e213c9e205773485acf19c8fc57
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py --- a/jax/_src/lax/lax.py +++ b/jax/_src/lax/lax.py @@ -2161,7 +2161,7 @@ def _div_transpose_rule(cotangent, x, y): ad.defjvp( rem_p, lambda g, x, y: _maybe_broadcast(broadcast_shapes(np.shape(x), np.shape(y)), g), - lambda g, x, y: mul(neg(g), floor(div(x, y)))) + lambda g, x, y: mul(neg(g), mul(sign(div(x, y)), floor(abs(div(x, y)))))) mlir.register_lowering(rem_p, partial(_nary_lower_mhlo, mhlo.RemOp)) def _minmax_complex_lowering(x, y, *, lax_cmp_pick_x):
diff --git a/tests/lax_autodiff_test.py b/tests/lax_autodiff_test.py --- a/tests/lax_autodiff_test.py +++ b/tests/lax_autodiff_test.py @@ -1060,19 +1060,24 @@ def f2(x, y): # TODO(mattjj): make this a more systematic test def testRemainder(self): + def gen_x(rng, size): + return rng.uniform(-9, 9, size=size) + + def gen_y(rng, size): + # avoid values near zero because gradients diverge + return rng.uniform(0.1, 5, size=size) * rng.choice([-1, 1], size=size) + rng = self.rng() - x = rng.uniform(-0.9, 9, size=(3, 4)) - y = rng.uniform(0.7, 1.9, size=(3, 1)) + x = gen_x(rng, (5, 8)) + y = gen_y(rng, (1, 8)) assert not set(np.unique(x)) & set(np.unique(y)) - # TODO(jakevdp) try to make these tolerances tighter. - tol = 1e-1 - check_grads(lax.rem, (x, y), 2, ["fwd", "rev"], tol, tol) + check_grads(lax.rem, (x, y), 2, ["fwd", "rev"]) rng = self.rng() - x = rng.uniform(-0.9, 9, size=(1, 4)) - y = rng.uniform(0.7, 1.9, size=(3, 4)) + x = gen_x(rng, (1, 8)) + y = gen_y(rng, (5, 8)) assert not set(np.unique(x)) & set(np.unique(y)) - check_grads(lax.rem, (x, y), 2, ["fwd", "rev"], tol, tol) + check_grads(lax.rem, (x, y), 2, ["fwd", "rev"]) def testHigherOrderGradientOfReciprocal(self): # Regression test for https://github.com/google/jax/issues/3136
`lax.rem` has wrong gradient for `y` when `x<0` `lax.rem` has wrong gradient when `x<0`. For example, ```py import jax def fn(y): x = jax.numpy.array(-19., jax.numpy.float64) return jax.lax.rem(x, y) y = jax.numpy.array(3., jax.numpy.float64) res = fn(y) print(res) # -1.0 a = jax.jacfwd(fn)(y) print(a) # 7.0 ``` Actually, the gradient should be 6 instead of 7. Because `-19 = (-6)*3 + (-1)`. When the `y` is `3.1`, the results will be `-0.4`. Thus, the gradient should be 6. By the way, another two similar APIs `numpy.remainder` and `numpy.mod` also have this issue. ```py def fn(y): x = jax.numpy.array(-19., jax.numpy.float64) return jax.numpy.remainder(x, y) y = jax.numpy.array(3., jax.numpy.float64) res = fn(y) print(res) # 2.0 a = jax.jacfwd(fn)(y) print(a) # 8.0 ``` In these two APIs, `-19 = (-7)*3 + 2`, so their gradient should be 7 instead of 8.
Thanks for the report – I have the fix in #11469
2022-07-12T16:44:28
google/jax
11,507
google__jax-11507
[ "11505" ]
10720258ea7fb5bde997dfa2f3f71135ab7a6733
diff --git a/jax/_src/tree_util.py b/jax/_src/tree_util.py --- a/jax/_src/tree_util.py +++ b/jax/_src/tree_util.py @@ -527,7 +527,8 @@ def _deprecate(f): @functools.wraps(f) def wrapped(*args, **kwargs): warnings.warn(f"jax.{f.__name__} is deprecated, and will be removed in a future release. " - f"Use jax.tree_util.{f.__name__} instead.") + f"Use jax.tree_util.{f.__name__} instead.", + category=FutureWarning, stacklevel=2) return f(*args, **kwargs) return wrapped
tree_util deprecation warnings should set warning class and stacklevel If I try using deprecated functions like `jax.tree_map` using the development version of JAX, I see messages like: ``` >>> jax.tree_map(lambda x: x, [1, 2, 3]) /usr/local/lib/python3.7/dist-packages/jax/_src/tree_util.py:529: UserWarning: jax.tree_map is deprecated, and will be removed in a future release. Use jax.tree_util.tree_map instead. warnings.warn(f"jax.{f.__name__} is deprecated, and will be removed in a future release. " [1, 2, 3] ``` This is OK, but a better warning would set `category` (so it can be filtered appropriately) and `stacklevel` (so it's clear to users where it's coming from). Either `DeprecationWarning` or `FutureWarning` would be appropriate for the category, depending on how "noisy" you want the warnings to be. I would probably lean towards `FutureWarning` for JAX, since JAX is typically used by developers rather than end users.
Thanks - that was an oversight on my part. Fixed in #11507
2022-07-15T16:08:56
google/jax
11,542
google__jax-11542
[ "11537" ]
388733b5334665b91b6d61ed20ab3acaa5866de7
diff --git a/examples/resnet50.py b/examples/resnet50.py deleted file mode 100644 --- a/examples/resnet50.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A mock-up showing a ResNet50 network with training on synthetic data. - -This file uses the stax neural network definition library and the optimizers -optimization library. -""" - -import numpy.random as npr - -import jax.numpy as jnp -from jax import jit, grad, random -from jax.example_libraries import optimizers -from jax.example_libraries import stax -from jax.example_libraries.stax import (AvgPool, BatchNorm, Conv, Dense, - FanInSum, FanOut, Flatten, GeneralConv, - Identity, MaxPool, Relu, LogSoftmax) - - -# ResNet blocks compose other layers - -def ConvBlock(kernel_size, filters, strides=(2, 2)): - ks = kernel_size - filters1, filters2, filters3 = filters - Main = stax.serial( - Conv(filters1, (1, 1), strides), BatchNorm(), Relu, - Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, - Conv(filters3, (1, 1)), BatchNorm()) - Shortcut = stax.serial(Conv(filters3, (1, 1), strides), BatchNorm()) - return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu) - - -def IdentityBlock(kernel_size, filters): - ks = kernel_size - filters1, filters2 = filters - def make_main(input_shape): - # the number of output channels depends on the number of input channels - return stax.serial( - Conv(filters1, (1, 1)), BatchNorm(), Relu, - Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu, - Conv(input_shape[3], (1, 1)), BatchNorm()) - Main = stax.shape_dependent(make_main) - return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu) - - -# ResNet architectures compose layers and ResNet blocks - -def ResNet50(num_classes): - return stax.serial( - GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'), - BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)), - ConvBlock(3, [64, 64, 256], strides=(1, 1)), - IdentityBlock(3, [64, 64]), - IdentityBlock(3, [64, 64]), - ConvBlock(3, [128, 128, 512]), - IdentityBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - IdentityBlock(3, [128, 128]), - ConvBlock(3, [256, 256, 1024]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - IdentityBlock(3, [256, 256]), - ConvBlock(3, [512, 512, 2048]), - IdentityBlock(3, [512, 512]), - IdentityBlock(3, [512, 512]), - AvgPool((7, 7)), Flatten, Dense(num_classes), LogSoftmax) - - -if __name__ == "__main__": - rng_key = random.PRNGKey(0) - - batch_size = 8 - num_classes = 1001 - input_shape = (224, 224, 3, batch_size) - step_size = 0.1 - num_steps = 10 - - init_fun, predict_fun = ResNet50(num_classes) - _, init_params = init_fun(rng_key, input_shape) - - def loss(params, batch): - inputs, targets = batch - logits = predict_fun(params, inputs) - return -jnp.sum(logits * targets) - - def accuracy(params, batch): - inputs, targets = batch - target_class = jnp.argmax(targets, axis=-1) - predicted_class = jnp.argmax(predict_fun(params, inputs), axis=-1) - return jnp.mean(predicted_class == target_class) - - def synth_batches(): - rng = npr.RandomState(0) - while True: - images = rng.rand(*input_shape).astype('float32') - labels = rng.randint(num_classes, size=(batch_size, 1)) - onehot_labels = labels == jnp.arange(num_classes) - yield images, onehot_labels - - opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9) - batches = synth_batches() - - @jit - def update(i, opt_state, batch): - params = get_params(opt_state) - return opt_update(i, grad(loss)(params, batch), opt_state) - - opt_state = opt_init(init_params) - for i in range(num_steps): - opt_state = update(i, opt_state, next(batches)) - trained_params = get_params(opt_state)
diff --git a/examples/examples_test.py b/examples/examples_test.py --- a/examples/examples_test.py +++ b/examples/examples_test.py @@ -15,7 +15,6 @@ import os import sys -import unittest import zlib from absl.testing import absltest @@ -23,14 +22,12 @@ import numpy as np -import jax from jax import lax from jax import random import jax.numpy as jnp sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from examples import kernel_lsq -from examples import resnet50 sys.path.pop() from jax.config import config @@ -49,36 +46,6 @@ class ExamplesTest(parameterized.TestCase): def setUp(self): self.rng = np.random.default_rng(zlib.adler32(self.__class__.__name__.encode())) - @parameterized.named_parameters( - {"testcase_name": f"_input_shape={input_shape}", - "input_shape": input_shape} - for input_shape in [(2, 20, 25, 2)]) - @unittest.skipIf(config.x64_enabled, "skip in x64 mode") - def testIdentityBlockShape(self, input_shape): - init_fun, apply_fun = resnet50.IdentityBlock(2, (4, 3)) - _CheckShapeAgreement(self, init_fun, apply_fun, input_shape) - - @parameterized.named_parameters( - {"testcase_name": f"_input_shape={input_shape}", - "input_shape": input_shape} - for input_shape in [(2, 20, 25, 3)]) - @unittest.skipIf(config.x64_enabled, "skip in x64 mode") - def testConvBlockShape(self, input_shape): - init_fun, apply_fun = resnet50.ConvBlock(3, (2, 3, 4)) - _CheckShapeAgreement(self, init_fun, apply_fun, input_shape) - - @parameterized.named_parameters( - {"testcase_name": "_num_classes={}_input_shape={}" - .format(num_classes, input_shape), - "num_classes": num_classes, "input_shape": input_shape} - for num_classes in [5, 10] - for input_shape in [(224, 224, 3, 2)]) - @unittest.skipIf(config.x64_enabled, "skip in x64 mode") - @jax.numpy_rank_promotion("allow") # Uses stax, which exercises implicit rank promotion. - def testResNet50Shape(self, num_classes, input_shape): - init_fun, apply_fun = resnet50.ResNet50(num_classes) - _CheckShapeAgreement(self, init_fun, apply_fun, input_shape) - def testKernelRegressionGram(self): n, d = 100, 20 xs = self.rng.normal(size=(n, d))
examples/resnet50 Has anyone trained resnet50 in example? In a batch, I always get the same prediction value: ``` batch_size = 100 [ 56 56 56 56 56 56 153 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 323 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 695 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56] ```
Thanks for raising the issue! I've been meaning to delete that example file. It's old and likely not a useful reference. This [Flax resnet50](https://github.com/google/flax/tree/main/examples/imagenet) seems like a much better reference. I'm not sure if it's buggy though. If you want to help debug it, can you share code to run to reproduce the result you're seeing? But if we're going to delete it anyway, maybe it's not worth investigating?
2022-07-19T15:55:49
google/jax
11,544
google__jax-11544
[ "11517" ]
ac731bb4ccaaa25cb20f3e2438f351e0b621784d
diff --git a/jax/_src/lax/convolution.py b/jax/_src/lax/convolution.py --- a/jax/_src/lax/convolution.py +++ b/jax/_src/lax/convolution.py @@ -812,6 +812,9 @@ def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1): lhs_padded = np.add(lhs_shape[2:], np.sum(np.array(pads).reshape(-1, 2), axis=1)) + if np.any(lhs_padded < 0): + raise ValueError("Negative padding is larger than the size of the corresponding dimension: " + f"got padding={pads} for lhs_shape[2:]={lhs_shape[2:]}") out_space = core.stride_shape(lhs_padded, rhs_shape[2:], strides) out_space = np.maximum(0, out_space) if batch_group_count > 1:
`lax.conv_general_dilated` will crash when `padding` has some negative `lax.conv_general_dilated` will crash when `padding` has some negative ```py import jax def fn(lhs, rhs): window_strides = [1, 1] padding = [[5, 16], [-5, 1]] return jax.lax.conv_general_dilated(lhs, rhs, window_strides, padding) mykey = jax.random.PRNGKey(83725540) lhs = jax.numpy.array([[[[-7.0950737, 20.612083, 53.864067], [-12.116554, -16.742317, -7.050851 ]]]]) rhs = jax.numpy.array([[[[29.074692, 40.518192], [21.685156, 41.867775]]]]) fn(lhs, rhs) ``` ``` F external/org_tensorflow/tensorflow/compiler/xla/window_util.cc:250] Check failed: bound >= 0 (0 vs. -1) [1] abort (core dumped) ``` Besides, `jax.lax.conv_general_dilated_patches` and `jax.lax.conv_with_general_padding` also have such issue ```py import jax def fn(lhs): filter_shape = [1, 1] window_strides = [1, 1] padding = [[-4, -2], [-3, -4]] return jax.lax.conv_general_dilated_patches(lhs=lhs, filter_shape=filter_shape, window_strides=window_strides, padding=padding) mykey = jax.random.PRNGKey(19446744) lhs = jax.random.uniform(mykey, [1, 2, 3, 4], jax.numpy.float32) fn(lhs) ``` ``` F external/org_tensorflow/tensorflow/compiler/xla/window_util.cc:250] Check failed: bound >= 0 (0 vs. -3) [1] abort (core dumped) ``` ```py import jax def fn(lhs, rhs): window_strides = [1, 1] padding = [[-5, 0], [-16, -2]] lhs_dilation = [1, 1] rhs_dilation = [1, 1] return jax.lax.conv_with_general_padding(lhs, rhs, window_strides=window_strides, padding=padding, lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation) mykey = jax.random.PRNGKey(30297051) lhs = jax.random.uniform(mykey, [3, 2, 3, 1], jax.numpy.float32) mykey = jax.random.PRNGKey(34585797) rhs = jax.random.uniform(mykey, [2, 2, 1, 2], jax.numpy.float32) fn(lhs, rhs) ``` ``` F external/org_tensorflow/tensorflow/compiler/xla/window_util.cc:250] Check failed: bound >= 0 (0 vs. -2) [1] abort (core dumped) ```
Hi @zhangqiaorjc, is this [comment](https://github.com/google/jax/issues/11521#issuecomment-1188254569) in a wrong place? If so, I agree that a check in python is much better than core dumped in cpp! https://github.com/google/jax/pull/11544 should fix the issue Actually, it looks like negative padding is valid after all; from https://www.tensorflow.org/xla/operation_semantics#convwithgeneralpadding_convolution: > The padding argument specifies the amount of zero padding to be applied to the base area. The amount of padding can be negative -- the absolute value of negative padding indicates the number of elements to remove from the specified dimension before doing the convolution. I think the issue in your example is that the negative value is too large for the size of the specified axis. This is going to be a little more involved to check correctly on the Python side...
2022-07-19T16:48:04
google/jax
11,546
google__jax-11546
[ "11538" ]
9f914a93d6fe7b6667135581ed5f2b1ba59b9d69
diff --git a/jax/_src/scipy/linalg.py b/jax/_src/scipy/linalg.py --- a/jax/_src/scipy/linalg.py +++ b/jax/_src/scipy/linalg.py @@ -205,9 +205,9 @@ def qr(a, overwrite_a=False, lwork=None, mode="full", pivoting=False, return _qr(a, mode, pivoting) -@partial(jit, static_argnames=('sym_pos', 'lower')) -def _solve(a, b, sym_pos, lower): - if not sym_pos: +@partial(jit, static_argnames=('assume_a', 'lower')) +def _solve(a, b, assume_a, lower): + if assume_a != 'pos': return np_linalg.solve(a, b) a, b = _promote_dtypes_inexact(jnp.asarray(a), jnp.asarray(b)) @@ -232,9 +232,18 @@ def _solve(a, b, sym_pos, lower): @_wraps(scipy.linalg.solve, lax_description=_no_overwrite_and_chkfinite_doc, skip_params=('overwrite_a', 'overwrite_b', 'debug', 'check_finite')) def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False, - debug=False, check_finite=True): + debug=False, check_finite=True, assume_a='gen'): + # TODO(jakevdp) remove sym_pos argument after October 2022 del overwrite_a, overwrite_b, debug, check_finite - return _solve(a, b, sym_pos, lower) + valid_assume_a = ['gen', 'sym', 'her', 'pos'] + if assume_a not in valid_assume_a: + raise ValueError("Expected assume_a to be one of {valid_assume_a}; got {assume_a!r}") + if sym_pos: + warnings.warn("The sym_pos argument to solve() is deprecated and will be removed " + "in a future JAX release. Use assume_a='pos' instead.", + category=FutureWarning, stacklevel=2) + assume_a = 'pos' + return _solve(a, b, assume_a, lower) @partial(jit, static_argnames=('trans', 'lower', 'unit_diagonal')) def _solve_triangular(a, b, trans, lower, unit_diagonal): diff --git a/jax/_src/scipy/sparse/linalg.py b/jax/_src/scipy/sparse/linalg.py --- a/jax/_src/scipy/sparse/linalg.py +++ b/jax/_src/scipy/sparse/linalg.py @@ -512,7 +512,7 @@ def _lstsq(a, b): # faster than jsp.linalg.lstsq a2 = _dot(a.T.conj(), a) b2 = _dot(a.T.conj(), b) - return jsp.linalg.solve(a2, b2, sym_pos=True) + return jsp.linalg.solve(a2, b2, assume_a='pos') def _gmres_batched(A, b, x0, unit_residual, residual_norm, ptol, restart, M):
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -1153,31 +1153,31 @@ def args_maker(): @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": - "_lhs={}_rhs={}_sym_pos={}_lower={}".format( + "_lhs={}_rhs={}_assume_a={}_lower={}".format( jtu.format_shape_dtype_string(lhs_shape, dtype), jtu.format_shape_dtype_string(rhs_shape, dtype), - sym_pos, lower), + assume_a, lower), "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, - "sym_pos": sym_pos, "lower": lower} + "assume_a": assume_a, "lower": lower} for lhs_shape, rhs_shape in [ ((1, 1), (1, 1)), ((4, 4), (4,)), ((8, 8), (8, 4)), ] - for sym_pos, lower in [ - (False, False), - (True, False), - (True, True), + for assume_a, lower in [ + ('gen', False), + ('pos', False), + ('pos', True), ] for dtype in float_types + complex_types)) - def testSolve(self, lhs_shape, rhs_shape, dtype, sym_pos, lower): + def testSolve(self, lhs_shape, rhs_shape, dtype, assume_a, lower): rng = jtu.rand_default(self.rng()) - osp_fun = lambda lhs, rhs: osp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower) - jsp_fun = lambda lhs, rhs: jsp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower) + osp_fun = lambda lhs, rhs: osp.linalg.solve(lhs, rhs, assume_a=assume_a, lower=lower) + jsp_fun = lambda lhs, rhs: jsp.linalg.solve(lhs, rhs, assume_a=assume_a, lower=lower) def args_maker(): a = rng(lhs_shape, dtype) - if sym_pos: + if assume_a == 'pos': a = np.matmul(a, np.conj(T(a))) a = np.tril(a) if lower else np.triu(a) return [a, rng(rhs_shape, dtype)]
⚠️ Nightly upstream-dev CI failed ⚠️ [Workflow Run URL](https://github.com/google/jax/actions/runs/2697503795) <details><summary>Summary of Failures</summary> ``` tests/linalg_test.py::ScipyLinalgTest::testSolve_lhs=complex128[8,8]_rhs=complex128[8,4]_sym_pos=True_lower=True: DeprecationWarning: The 'sym_pos' keyword is deprecated and should be replaced by using 'assume_a = "pos"'. 'sym_pos' will be removed in SciPy 1.11.0. ``` </details>
2022-07-19T20:58:54
google/jax
11,581
google__jax-11581
[ "9165" ]
8a67734e7b073445d475ba27dcc74be1ba728100
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -132,7 +132,13 @@ def canonicalize_shape( printoptions = np.printoptions set_printoptions = np.set_printoptions -iscomplexobj = np.iscomplexobj +@_wraps(np.iscomplexobj) +def iscomplexobj(x): + try: + typ = x.dtype.type + except AttributeError: + typ = asarray(x).dtype.type + return issubdtype(typ, complexfloating) shape = _shape = np.shape ndim = _ndim = np.ndim
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -4134,6 +4134,14 @@ def testMemoryView(self): np.array(bytearray(b'\x2a\xf3'), ndmin=2) ) + @parameterized.named_parameters( + {'testcase_name': f"_val={val}", 'val': val} + for val in [1+1j, [1+1j], jnp.pi, np.arange(2)]) + def testIsComplexObj(self, val): + args_maker = lambda: [val] + self._CheckAgainstNumpy(np.iscomplexobj, jnp.iscomplexobj, args_maker) + self._CompileAndCheck(jnp.iscomplexobj, args_maker) + def testIsClose(self): c_isclose = jax.jit(jnp.isclose) c_isclose_nan = jax.jit(partial(jnp.isclose, equal_nan=True))
jnp.iscomplexobj calls __array__ on tracer objects TLDR: In some exotic case `jnp.iscomplexobj(x)` calls `x.__array__()`, and if `x` happens to be a Tracer object then this errors out. This should never happen because the `dtype` of an array is a jit-time/constant information and so `array` should never be hit. I noticed because in some complex NetKet code I attempted to add a check with `jnp.iscomplexobj` to validate the input dtype. This works fine, but when this code is called within `jax.lax.custom_linear_solve` this crashes. I suspect this is happening because `jnp.iscomplexobj` is simply calling `np.iscomplexobj`, which probably has some fast-code path for some types (maybe those that define dtype), but if some condition is not met it calls `__array__` to convert it to a numpy array. See the stack trace below, which you can also see [here](https://github.com/netket/netket/runs/4763942096?check_suite_focus=true#step:8:15062). I don't have a simple reproducer yet, unfortunately, but will attempt to derive one in the next days, unless some of you guys already have an idea of what is going wrong. The key point is when we hit some internal numpy frame ```python test/optimizer/test_qgt_itersolve.py:119: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ netket/optimizer/linear_operator.py:100: in solve return self._solve(jax.tree_util.Partial(solve_fun), y, x0=x0, **kwargs) netket/optimizer/qgt/qgt_jacobian_pytree.py:172: in _solve return _solve(self, solve_fun, y, x0=x0) netket/optimizer/qgt/qgt_jacobian_pytree.py:251: in _solve out, info = solve_fun(unscaled_self, y, x0=x0) ../../../../../Documents/pythonenvs/netket_env/lib/python3.9/site-packages/jax/_src/scipy/sparse/linalg.py:686: in gmres x = lax.custom_linear_solve(A, b, solve=_solve, transpose_solve=_solve) netket/optimizer/linear_operator.py:114: in __call__ return self @ vec netket/optimizer/qgt/qgt_jacobian_pytree.py:157: in __matmul__ return _matmul(self, vec) netket/optimizer/qgt/qgt_jacobian_pytree.py:203: in _matmul check_valid_vector_type(self.params, vec) netket/optimizer/qgt/common.py:44: in check_valid_vector_type jax.tree_multimap(check, x, target) netket/optimizer/qgt/common.py:32: in check #<---- x is a Tracer object. Maybe it has a weak dtype? if jnp.iscomplexobj(target) and not jnp.iscomplexobj(x): <__array_function__ internals>:5: in iscomplexobj ??? ../../../../../Documents/pythonenvs/netket_env/lib/python3.9/site-packages/numpy/lib/type_check.py:316: in iscomplexobj type_ = asarray(x).dtype.type ../../../../../Documents/pythonenvs/netket_env/lib/python3.9/site-packages/numpy/core/_asarray.py:102: in asarray return array(a, dtype, copy=False, order=order) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = TracerArrayConversionError(Traced<ShapedArray(float64[5])>with<DynamicJaxprTrace(level=1/2)>), tracer = Traced<ShapedArray(float64[5])>with<DynamicJaxprTrace(level=1/2)> def __init__(self, tracer: "core.Tracer"): super().__init__( "The numpy.ndarray conversion method __array__() was called on " > f"the JAX Tracer object {tracer}{tracer._origin_msg()}") E IndexError: list assignment index out of range ``` The dtypes of `x` and `target` on that highlighted line are ```python x=Traced<ShapedArray(complex128[5])>with<DynamicJaxprTrace(level=1/2)> target=(Traced<ShapedArray(float64[5])>with<DynamicJaxprTrace(level=1/2)>, Traced<ShapedArray(float64[5])>with<DynamicJaxprTrace(level=1/2)>) ```
When was this fixed? I'm still having this issue. I'm not aware of any fix related to this bug, because it was closed shortly after being reported. Looking into it now, it seems that this may come up in cases where collections like tuples or lists are passed to `jnp.iscomplexobj` within a `jit` context. For example: ```python import jax @jax.jit def f(x): return jnp.iscomplexobj(x) f([1]) ``` ```pytb --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) [/usr/local/lib/python3.7/dist-packages/numpy/lib/type_check.py](https://localhost:8080/#) in iscomplexobj(x) 336 try: --> 337 dtype = x.dtype 338 type_ = dtype.type AttributeError: 'list' object has no attribute 'dtype' During handling of the above exception, another exception occurred: UnfilteredStackTrace Traceback (most recent call last) 20 frames <__array_function__ internals> in iscomplexobj(*args, **kwargs) UnfilteredStackTrace: jax._src.errors.TracerArrayConversionError: The numpy.ndarray conversion method __array__() was called on the JAX Tracer object Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)> While tracing the function f at <ipython-input-17-86c68a464777>:3 for jit, this concrete value was not available in Python because it depends on the value of the argument 'x'. See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.TracerArrayConversionError The stack trace below excludes JAX-internal frames. The preceding is the original exception that occurred, unmodified. -------------------- The above exception was the direct cause of the following exception: TracerArrayConversionError Traceback (most recent call last) <__array_function__ internals> in iscomplexobj(*args, **kwargs) [/usr/local/lib/python3.7/dist-packages/numpy/lib/type_check.py](https://localhost:8080/#) in iscomplexobj(x) 338 type_ = dtype.type 339 except AttributeError: --> 340 type_ = asarray(x).dtype.type 341 return issubclass(type_, _nx.complexfloating) 342 TracerArrayConversionError: The numpy.ndarray conversion method __array__() was called on the JAX Tracer object Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)> While tracing the function f at <ipython-input-17-86c68a464777>:3 for jit, this concrete value was not available in Python because it depends on the value of the argument 'x'. See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.TracerArrayConversionError ``` Does that seem similar to the issue you're having? Reopening, because I think it's worth fixing `jnp.iscomplexobj` to handle this case. I'll try to create a MWE to recreate the error I'm seeing, I noticed it when trying to add support for complex numbers to diffrax - https://github.com/patrick-kidger/diffrax/pull/112 Looking back at this – it looks like similar issues arise for other `jax.numpy` functions that are aliased to numpy equivalents. For example: ```python import jax.numpy as jnp from jax import jit jit(jnp.iscomplexobj)([1, 2, 3]) # TracerArrayConversionError jit(jnp.shape)([1, 2, 3]) # TracerArrayConversionError jit(jnp.size)([1, 2, 3]) # TracerArrayConversionError jit(jnp.ndim)([1, 2, 3]) # TracerArrayConversionError ``` We should probably make these match other JAX APIs and wrap these in such a way that `_check_arraylike` is called on the inputs.
2022-07-21T23:57:03
google/jax
11,627
google__jax-11627
[ "11574" ]
9f96a0474e7626cdd8b043dc34e83552e696978e
diff --git a/jax/_src/lax/control_flow/conditionals.py b/jax/_src/lax/control_flow/conditionals.py --- a/jax/_src/lax/control_flow/conditionals.py +++ b/jax/_src/lax/control_flow/conditionals.py @@ -148,7 +148,8 @@ def _cond(pred, true_fun: Callable, false_fun: Callable, *operands, operand=_no_operand_sentinel, linear=None): """Conditionally apply ``true_fun`` or ``false_fun``. - ``cond()`` has equivalent semantics to this Python implementation:: + Provided arguments are correctly typed, ``cond()`` has equivalent + semantics to this Python implementation:: def cond(pred, true_fun, false_fun, *operands): if pred: @@ -181,6 +182,8 @@ def cond(pred, true_fun, false_fun, *operands): operands = (operand,) del operand + if pred is None: + raise TypeError("cond predicate is None") if isinstance(pred, Sequence) or np.ndim(pred) != 0: raise TypeError( f"Pred must be a scalar, got {pred} of " +
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -628,6 +628,16 @@ def false_fun(x): self.assertEqual(fun(4), cfun(4)) self.assertEqual(fun(4), (8, 16)) + def testCondPredIsNone(self): + # see https://github.com/google/jax/issues/11574 + def f(pred, x): + return lax.cond(pred, lambda x: x + 1, lambda x: x + 2, x) + + self.assertRaisesRegex(TypeError, "cond predicate is None", + lambda: f(None, 1.)) + self.assertRaisesRegex(TypeError, "cond predicate is None", + lambda: jax.jit(f)(None, 1.)) + def testCondTwoOperands(self): # see https://github.com/google/jax/issues/8469 add, mul = lax.add, lax.mul
jax.lax.cond evaluates None as True When using `jax.lax.cond`, passing `None` as the first argument causes the `true_fun` argument to get called. This is contrary to the behavior one would expect for Python if statements. For example, running the following ```python import jax true_fun = lambda x: True false_fun = lambda x: False for pred in [True, False, None]: print(pred, jax.lax.cond(pred, true_fun, false_fun, None)) ``` gives the following output: ``` True True False False None True ``` I am using the newest PyPI versions of `jax` and `jaxlib` (0.3.14).
For what it's worth, the reason `None` evaluates to `True` is because of this: ```python >>> jnp.array(None) DeviceArray(nan, dtype=float32) ``` And `NaN`, when converted to `bool`, is not False because it's not equal to zero: ```python >>> jnp.array(None).astype(bool) DeviceArray(True, dtype=bool) ``` I suppose a better behavior here would be to raise an error when `None` is passed in a place where a JAX array is expected. That would work. The only thing to keep an eye on is the documentation of `jax.lax.cond`, which currently says: ``` cond() has equivalent semantics to this Python implementation: def cond(pred, true_fun, false_fun, *operands): if pred: return true_fun(*operands) else: return false_fun(*operands) ``` Since `bool(None)` is `False`, some caveat would be helpful to warn users about unexpected errors that wouldn't arise in the Python equivalent. Regardless, throwing an error is still better than the present situation, since the branch that is selected is currently the opposite of what the documentation implies.
2022-07-26T20:14:10
google/jax
11,797
google__jax-11797
[ "11795" ]
25dd62c3882985f96e87f3fb7dd7b8b11a7a6e6d
diff --git a/jax/_src/numpy/linalg.py b/jax/_src/numpy/linalg.py --- a/jax/_src/numpy/linalg.py +++ b/jax/_src/numpy/linalg.py @@ -111,14 +111,13 @@ def matrix_power(a, n): @jit def matrix_rank(M, tol=None): M, = _promote_dtypes_inexact(jnp.asarray(M)) - if M.ndim > 2: - raise TypeError("array should have 2 or fewer dimensions") if M.ndim < 2: return jnp.any(M != 0).astype(jnp.int32) S = svd(M, full_matrices=False, compute_uv=False) if tol is None: - tol = S.max() * np.max(M.shape).astype(S.dtype) * jnp.finfo(S.dtype).eps - return jnp.sum(S > tol) + tol = S.max(-1) * np.max(M.shape[-2:]).astype(S.dtype) * jnp.finfo(S.dtype).eps + tol = jnp.expand_dims(tol, np.ndim(tol)) + return jnp.sum(S > tol, axis=-1) @custom_jvp
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -924,7 +924,8 @@ def testMatrixPower(self, shape, dtype, n): {"testcase_name": "_shape={}".format( jtu.format_shape_dtype_string(shape, dtype)), "shape": shape, "dtype": dtype} - for shape in [(3, ), (1, 2), (8, 5), (4, 4), (5, 5), (50, 50)] + for shape in [(3, ), (1, 2), (8, 5), (4, 4), (5, 5), (50, 50), + (3, 4, 5), (2, 3, 4, 5)] for dtype in float_types + complex_types)) def testMatrixRank(self, shape, dtype): rng = jtu.rand_default(self.rng())
BUG: jnp.linalg.matrix_rank does not operate on stacks of matrices ### Description ``` myInput = np.random.normal(size=(9,10,10)) myInput.shape # (9,10,10) np.linalg.matrix_rank(myInput) # array([10, 10, 10, 10, 10, 10, 10, 10, 10]) ``` jax fails on this same stack: ``` jax.numpy.linalg.matrix_rank(jnp.array(myInput)) ### TypeError: array should have 2 or fewer dimensions ### Expected: DeviceArray([10, 10, 10, 10, 10, 10, 10, 10, 10]) ``` ### What jax/jaxlib version are you using? jax 0.3.13, jaxlib 0.3.10 ### Which accelerator(s) are you using? - [ ] CPU - [X] GPU - [ ] TPU ### Additional System Info _No response_
i haven't tested, but suspect this might apply to the jax version of `numpy.linalg.svd` as well. Thanks for the report - I'll take a look.
2022-08-08T18:17:10
google/jax
11,803
google__jax-11803
[ "2731" ]
ce80a5480520dde98f0d21bb79064e91da2a36fd
diff --git a/jax/_src/distributed.py b/jax/_src/distributed.py --- a/jax/_src/distributed.py +++ b/jax/_src/distributed.py @@ -102,45 +102,53 @@ def initialize_preemption_sync_manager(self): def initialize(coordinator_address: Optional[str] = None, num_processes: Optional[int] = None, process_id: Optional[int] = None): - """Initialize distributed system for topology discovery. + """Initializes the JAX distributed system. - Currently, calling ``initialize`` sets up the multi-host GPU backend and Cloud - TPU backend. + Calling :func:`~jax.distributed.initialize` prepares JAX for execution on + multi-host GPU and Cloud TPU. :func:`~jax.distributed.initialize` must be + called before performing any JAX computations. - If you are on GPU platform, you will have to provide the coordinator_address - and other args to the `initialize` API. + The JAX distributed system serves a number of roles: - If you are on TPU platform, the coordinator_address and other args will be - auto detected but you have the option to provide it too. + * it allows JAX processes to discover each other and share topology information, + * it performs health checking, ensuring that all processes shut down if any process dies, and + * it is used for distributed checkpointing. + + If you are using GPU, you must provide the ``coordinator_address``, + ``num_processes``, and ``process_id`` arguments to :func:`~jax.distributed.initialize`. + + If you are using TPU, all arguments are optional: if omitted, they + will be chosen automatically from the Cloud TPU metadata. Args: - coordinator_address: IP address and port of the coordinator. The choice of + coordinator_address: the IP address of process `0` and a port on which that + process should launch a coordinator service. The choice of port does not matter, so long as the port is available on the coordinator and all processes agree on the port. - Can be None only for TPU platform. If coordinator_address is None on TPU, - then it will be auto detected. - num_processes: Number of processes. Can be None only for TPU platform and - if None will be determined from the TPU slice metadata. - process_id: Id of the current process. Can be None only for TPU platform and - if None will default to the current TPU worker id determined via the TPU - slice metadata. + May be ``None`` only on TPU, in which case it will be chosen automatically. + num_processes: Number of processes. May be ``None`` only on TPU, in + which case it will be chosen automatically based on the TPU slice. + process_id: The ID number of the current process. The ``process_id`` values across + the cluster must be a dense range ``0``, ``1``, ..., ``num_processes - 1``. + May be ``None`` only on TPU; if ``None`` it will be chosen from the TPU slice + metadata. Raises: - RuntimeError: If `distributed.initialize` is called more than once. + RuntimeError: If :func:`~jax.distributed.initialize` is called more than once. Example: - Suppose there are two GPU hosts, and host 0 is the designated coordinator + Suppose there are two GPU processs, and process 0 is the designated coordinator with address ``10.0.0.1:1234``. To initialize the GPU cluster, run the following commands before anything else. - On host 0: + On process 0: - >>> jax.distributed.initialize('10.0.0.1:1234', 2, 0) # doctest: +SKIP + >>> jax.distributed.initialize(coordinator_address='10.0.0.1:1234', num_processes=2, process_id=0) # doctest: +SKIP - On host 1: + On process 1: - >>> jax.distributed.initialize('10.0.0.1:1234', 2, 1) # doctest: +SKIP + >>> jax.distributed.initialize(coordinator_address='10.0.0.1:1234', num_processes=2, process_id=1) # doctest: +SKIP """ global_state.initialize(coordinator_address, num_processes, process_id) atexit.register(shutdown)
Support multinode training on GPU I don't have a node with 8 gpus. I have two nodes each with 4 gpus. So is it possible to train a model on multiple nodes?
This *is* actually something that does work right now but it's still experimental. There's also no real public-facing API for it yet; you have to type in some obscure and fairly magical things to set it all up correctly. We should polish it off and document it! Can you say a bit more about your model, though? Would gradient all-reductions across multiple nodes suffice? @hawkinsp Technically, I'm training a reformer model using Trax library. And I assume you're just looking for data parallelism, i.e., partitioning a minibatch across GPUs, not partitioning in any other way (e.g., model parallelism)? @hawkinsp yeah my concern is data parallelism @hawkinsp Can you please share your notes on this (don't need a stable api) ? We are trying some hybrid data/model/pipeline parallelism so it is a little different from @py4 but would love to get started with data parallelism Data parallelism would of value to other projects that use XLA as well (eg https://www.tensorflow.org/swift). Exposing this functionality in a standardized way would help drive progress in the broader ecosystem! > I don't have a node with 8 gpus. I have two nodes each with 4 gpus. So is it possible to train a model on multiple nodes? Hello py4, I am meeting the same problem, have you found some solutions? > This _is_ actually something that does work right now but it's still experimental. There's also no real public-facing API for it yet; you have to type in some obscure and fairly magical things to set it all up correctly. > > We should polish it off and document it! Hello hawkinsp, Could you please provide more details about how to run data parallel with multi node GPUs? @hawkinsp We are also interested in running JAX code on multiple nodes. Anything (hacky or not) that you can share would be appreciated. Thanks! I really enjoyed Jax during my DM internship and wanted to use it on my university SLURM cluster, but the lack of a clear (official) data parallel (multi-node) solution is a huge blocker to increasing Jax adoption outside of Google where you cant just grab a TPU pod and `pmap` across the pod. A single 8 (GPU) replica setup can barely train a Resnet50 imagenet classifier. Training SimCLR or any other large SOTA model is currently impossible without multi-node data parallelism. I would love this feature! I enjoy Jax, but I've been largely using DeepSpeed due to its ability to distribute across clusters. Any progress on this issue ? Using JAX to train a model on multi-node, multi-GPU is becoming a very important features for us. @hawkinsp This is a significant bottleneck for scaling on multi-node GPU clusters. Is there any update on this issue? Also, there was a recent `pjit` [tutorial](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html) that explains multi-node TPU scaling but doesn't mention about GPUs. Is that planned to be updated in the future? @sudhakarsingh27 I constantly monitoring the jax releases, and there is something WIP that you might be interested in https://github.com/google/jax/pull/8364 See also: #9582 Yes indeed. We haven't advertised it that much yet, but (a) you need to initialize the cluster using that API, and (b) you need to follow the same rules of multi-host programming that also apply on TPU, documented here: https://jax.readthedocs.io/en/latest/multi_process.html I suspect we can consider this issue closed when we've documented (a) in the document (b). @hawkinsp @zhangqiaorjc Multinode (or multiprocess) doesn't seem to work with the following jax(lib) versions: ``` jax 0.3.13 jaxlib 0.3.10+cuda11.cudnn82 ``` Ran [the attached](https://github.com/google/jax/files/8744863/jax_multi_node_experiment.zip) minimal code on single node with 8 V100 GPUs as follows (2 processes with 4 GPUs each): ``` CUDA_VISIBLE_DEVICES="0,1,2,3" python jax_multi_node_experiment.py 0 & CUDA_VISIBLE_DEVICES="4,5,6,7" python jax_multi_node_experiment.py 1 ``` --- I could check that multi process(host/node) first fails with `jax[cuda]==0.3.12` installed with following command ``` pip install jax[cuda]==0.3.12 -f https://storage.googleapis.com/jax-releases/jax_releases.html ``` I get the following error when I run the multi-process jax commands above: ``` 127.0.0.1:65432 2 1 I0525 00:05:16.228919 139978761119552 distributed.py:59] Connecting to JAX distributed service on 127.0.0.1:65432 I0525 00:05:16.245648 139978761119552 xla_bridge.py:330] Unable to initialize backend 'tpu_driver': NOT_FOUND: Unable to find driver in registry given worker: I0525 00:05:16.246569 139742444975936 xla_bridge.py:330] Unable to initialize backend 'tpu_driver': NOT_FOUND: Unable to find driver in registry given worker: I0525 00:05:18.227763 139978761119552 xla_bridge.py:330] Unable to initialize backend 'tpu': INVALID_ARGUMENT: TpuPlatform is not available. I0525 00:05:18.228022 139978761119552 xla_bridge.py:330] Unable to initialize backend 'cuda': make_gpu_client() got an unexpected keyword argument 'platform_name' I0525 00:05:18.228085 139978761119552 xla_bridge.py:330] Unable to initialize backend 'rocm': make_gpu_client() got an unexpected keyword argument 'platform_name' global devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0)] local devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0)] I0525 00:05:18.246024 139742444975936 xla_bridge.py:330] Unable to initialize backend 'tpu': INVALID_ARGUMENT: TpuPlatform is not available. I0525 00:05:18.246273 139742444975936 xla_bridge.py:330] Unable to initialize backend 'cuda': make_gpu_client() got an unexpected keyword argument 'platform_name' I0525 00:05:18.246334 139742444975936 xla_bridge.py:330] Unable to initialize backend 'rocm': make_gpu_client() got an unexpected keyword argument 'platform_name' global devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0)] local devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0)] ``` For reference, here's the ouput from `jax[cuda]==0.3.10` where multi-process seems to be working okay: ``` 127.0.0.1:65432 2 1 I0525 00:09:03.394093 140366043674432 distributed.py:59] Connecting to JAX distributed service on 127.0.0.1:65432 I0525 00:09:03.410755 140366043674432 xla_bridge.py:263] Unable to initialize backend 'tpu_driver': NOT_FOUND: Unable to find driver in registry given worker: I0525 00:09:03.410994 140588577851200 xla_bridge.py:263] Unable to initialize backend 'tpu_driver': NOT_FOUND: Unable to find driver in registry given worker: I0525 00:09:05.517608 140366043674432 xla_bridge.py:263] Unable to initialize backend 'tpu': INVALID_ARGUMENT: TpuPlatform is not available. global devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0), GpuDevice(id=4, process_index=1), GpuDevice(id=5, process_index=1), GpuDevice(id=6, process_index=1), GpuDevice(id=7, process_index=1)] I0525 00:09:05.517817 140588577851200 xla_bridge.py:263] Unable to initialize backend 'tpu': INVALID_ARGUMENT: TpuPlatform is not available. local devices= [GpuDevice(id=4, process_index=1), GpuDevice(id=5, process_index=1), GpuDevice(id=6, process_index=1), GpuDevice(id=7, process_index=1)] global devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0), GpuDevice(id=4, process_index=1), GpuDevice(id=5, process_index=1), GpuDevice(id=6, process_index=1), GpuDevice(id=7, process_index=1)] local devices= [GpuDevice(id=0, process_index=0), GpuDevice(id=1, process_index=0), GpuDevice(id=2, process_index=0), GpuDevice(id=3, process_index=0)] ```
2022-08-08T21:48:51
google/jax
11,906
google__jax-11906
[ "11501" ]
6ae46c3d696444c840763c78b38788306227a182
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -1616,14 +1616,14 @@ def pad(array, pad_width, mode="constant", **kwargs): @_wraps(np.stack, skip_params=['out']) -def stack(arrays, axis: int = 0, out=None): +def stack(arrays, axis: int = 0, out=None, dtype=None): if not len(arrays): raise ValueError("Need at least one array to stack.") if out is not None: raise NotImplementedError("The 'out' argument to jnp.stack is not supported.") if isinstance(arrays, (np.ndarray, ndarray)): axis = _canonicalize_axis(axis, arrays.ndim) - return concatenate(expand_dims(arrays, axis + 1), axis=axis) + return concatenate(expand_dims(arrays, axis + 1), axis=axis, dtype=dtype) else: _stackable(*arrays) or _check_arraylike("stack", *arrays) shape0 = shape(arrays[0]) @@ -1633,7 +1633,7 @@ def stack(arrays, axis: int = 0, out=None): if shape(a) != shape0: raise ValueError("All input arrays must have the same shape.") new_arrays.append(expand_dims(a, axis)) - return concatenate(new_arrays, axis=axis) + return concatenate(new_arrays, axis=axis, dtype=dtype) @_wraps(np.tile) def tile(A, reps): @@ -1696,33 +1696,33 @@ def concatenate(arrays, axis: int = 0, dtype=None): @_wraps(np.vstack) -def vstack(tup): +def vstack(tup, dtype=None): if isinstance(tup, (np.ndarray, ndarray)): arrs = jax.vmap(atleast_2d)(tup) else: arrs = [atleast_2d(m) for m in tup] - return concatenate(arrs, axis=0) + return concatenate(arrs, axis=0, dtype=dtype) row_stack = vstack @_wraps(np.hstack) -def hstack(tup): +def hstack(tup, dtype=None): if isinstance(tup, (np.ndarray, ndarray)): arrs = jax.vmap(atleast_1d)(tup) arr0_ndim = arrs.ndim - 1 else: arrs = [atleast_1d(m) for m in tup] arr0_ndim = arrs[0].ndim - return concatenate(arrs, axis=0 if arr0_ndim == 1 else 1) + return concatenate(arrs, axis=0 if arr0_ndim == 1 else 1, dtype=dtype) @_wraps(np.dstack) -def dstack(tup): +def dstack(tup, dtype=None): if isinstance(tup, (np.ndarray, ndarray)): arrs = jax.vmap(atleast_3d)(tup) else: arrs = [atleast_3d(m) for m in tup] - return concatenate(arrs, axis=2) + return concatenate(arrs, axis=2, dtype=dtype) @_wraps(np.column_stack)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -3311,9 +3311,11 @@ def testColumnStack(self, shape, dtypes, array_input): self._CompileAndCheck(jnp_fun, args_maker) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "{}_axis={}_array={}".format( - jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input), - "shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input} + {"testcase_name": "{}_axis={}_array={}_out={}".format( + jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input, + np.dtype(out_dtype).name), + "shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input, + "out_dtype": out_dtype} for dtypes in [ [np.float32], [np.float32, np.float32], @@ -3323,23 +3325,30 @@ def testColumnStack(self, shape, dtypes, array_input): ] for shape in [(), (2,), (3, 4), (1, 100)] for axis in range(-len(shape), len(shape) + 1) - for array_input in [True, False])) - def testStack(self, shape, axis, dtypes, array_input): + for array_input in [True, False] + for out_dtype in [np.float32, np.int32])) + def testStack(self, shape, axis, dtypes, array_input, out_dtype): rng = jtu.rand_default(self.rng()) if array_input: args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])] else: args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] - np_fun = _promote_like_jnp(partial(np.stack, axis=axis)) - jnp_fun = partial(jnp.stack, axis=axis) + + if numpy_version < (1, 24): + np_fun = _promote_like_jnp(lambda *args: np.stack(*args, axis=axis).astype(out_dtype)) + else: + np_fun = _promote_like_jnp(partial(np.stack, axis=axis, dtype=out_dtype)) + + jnp_fun = partial(jnp.stack, axis=axis, dtype=out_dtype) with jtu.strict_promotion_if_dtypes_match(dtypes): self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker) self._CompileAndCheck(jnp_fun, args_maker) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_op={}_{}_array={}".format( - op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input), - "shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input} + {"testcase_name": "_op={}_{}_array={}_out={}".format( + op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input, + np.dtype(out_dtype).name), + "shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input, "out_dtype": out_dtype} for op in ["hstack", "vstack", "dstack"] for dtypes in [ [np.float32], @@ -3349,15 +3358,21 @@ def testStack(self, shape, axis, dtypes, array_input): [np.float32, np.int32, np.float64], ] for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)] - for array_input in [True, False])) - def testHVDStack(self, shape, op, dtypes, array_input): + for array_input in [True, False] + for out_dtype in [np.float32, np.int32])) + def testHVDStack(self, shape, op, dtypes, array_input, out_dtype): rng = jtu.rand_default(self.rng()) if array_input: args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])] else: args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] - np_fun = _promote_like_jnp(getattr(np, op)) - jnp_fun = getattr(jnp, op) + + if numpy_version < (1, 24) or op == "dstack": + np_fun = _promote_like_jnp(lambda *args: getattr(np, op)(*args).astype(out_dtype)) + else: + np_fun = partial(_promote_like_jnp(getattr(np, op)), dtype=out_dtype) + + jnp_fun = partial(getattr(jnp, op), dtype=out_dtype) with jtu.strict_promotion_if_dtypes_match(dtypes): self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker) self._CompileAndCheck(jnp_fun, args_maker) @@ -6388,7 +6403,7 @@ def testWrappedSignaturesMatch(self): 'einsum': ['kwargs'], 'einsum_path': ['einsum_call'], 'eye': ['order', 'like'], - 'hstack': ['dtype', 'casting'], + 'hstack': ['casting'], 'identity': ['like'], 'in1d': ['kind'], 'isin': ['kind'], @@ -6400,11 +6415,11 @@ def testWrappedSignaturesMatch(self): 'histogramdd': ['normed'], 'ones': ['order', 'like'], 'ones_like': ['subok', 'order'], - 'row_stack': ['dtype', 'casting'], - 'stack': ['dtype', 'casting'], + 'row_stack': ['casting'], + 'stack': ['casting'], 'tri': ['like'], 'unique': ['equal_nan'], - 'vstack': ['dtype', 'casting'], + 'vstack': ['casting'], 'zeros_like': ['subok', 'order'] }
Add `dtype` argument to `jax.numpy.concatenate` and friends `np.concatentate` got a `dtype` argument in numpy 1.20; other functions will get them in numpy 1.24. - [ ] `np.concatenate` - [ ] `np.stack` - [ ] `np.vstack` - [ ] `np.hstack` - [ ] `np.row_stack` There is also a `casting` argument that goes with this; for now we can probably ignore this because JAX's type promotion system does not have well-defined casting modes.
2022-08-14T06:39:49
google/jax
11,981
google__jax-11981
[ "11892" ]
6340952e2a90195c7507345e4eb00df951db058a
diff --git a/jax/_src/lax/control_flow/conditionals.py b/jax/_src/lax/control_flow/conditionals.py --- a/jax/_src/lax/control_flow/conditionals.py +++ b/jax/_src/lax/control_flow/conditionals.py @@ -671,6 +671,12 @@ def _cond_transpose(reduce_axes, cts, *args, branches, linear): assert next(out_iter, None) is None return [None] + out +def _cond_axis_substitution(params, subst, traverse): + if not traverse: + return params + branches = tuple(core.subst_axis_names_jaxpr(jaxpr, subst) for jaxpr in params['branches']) + return dict(params, branches=branches) + def _cond_typecheck(*in_atoms, branches, linear): avals = [x.aval for x in in_atoms] tc = partial(_typecheck_param, 'cond') @@ -753,6 +759,7 @@ def cond_bind(*args, branches, linear): batching.axis_primitive_batchers[cond_p] = _cond_batching_rule xla.register_initial_style_primitive(cond_p) core.custom_typechecks[cond_p] = _cond_typecheck +core.axis_substitution_rules[cond_p] = _cond_axis_substitution pe.partial_eval_jaxpr_custom_rules[cond_p] = _cond_partial_eval_custom pe.dce_rules[cond_p] = _cond_dce_rule
BUG: Unbound axis error with xmap + scan + cond. ### Description I've been getting the error `NameError: unbound axis name: b. The following axis names (e.g. defined by pmap) are available to collective operations: ['x', 'x']` when running the following code (a simplified version of my full code): ```python import jax import jax.numpy as jnp import numpy as onp from jax.experimental.maps import xmap, Mesh import tensorflow_probability.substrates.jax as tfp tfd = tfp.distributions def loss(key): init_z = tfd.Normal(0., 1.).sample(seed=key) def scan_fn(prev_z, t): new_z = jax.lax.cond(t == 0, lambda _: prev_z, lambda _: prev_z, None) return new_z, None out, _ = jax.lax.scan(scan_fn, init_z, jnp.arange(10)) return 0. def step(key): x = loss(key) return jnp.mean(x, axis=('b')) xm_step = xmap(step, in_axes=['b',...], out_axes=[...], axis_resources={'b':'x'}) devices = onp.array(jax.local_devices()) key = jax.random.PRNGKey(0) keys = jax.random.split(key, num=jax.local_device_count()) with Mesh(devices, ('x',)): xm_step(keys) ``` This is strange to me for a few reasons: 1. I am actually binding `b` to `x` using `axis_resources`, I think. 2. `x` appears twice in the available axis names. 3. All of the code in `loss` appears to be necessary to trigger the error. If I remove the sampling of `init_z` or set it to a constant, the error disappears. Similarly if I remove the `scan`, the error disappears. Similarly, if I remove the `cond`, the error disappears. 4. Note that `loss` is returning 0 -- the outcome doesn't depend on the computation in the body. Any thoughts on why this might occur? @sharadmv Full error: ``` Traceback (most recent call last): File "/Users/dlaw/dev/jax_bug/run.py", line 34, in <module> xm_step(keys) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback return fun(*args, **kwargs) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/lax/control_flow/loops.py", line 1128, in while_loop outs = while_p.bind(*cond_consts, *body_consts, *init_vals, File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2339, in bind axis_main = max((axis_frame(a).main_trace for a in used_axis_names(self, params)), File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2250, in used_axis_names subst_axis_names(primitive, params, subst) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2269, in subst_axis_names new_params[name] = subst_axis_names_jaxpr(jaxpr, shadowed_subst) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2325, in subst_axis_names_jaxpr subst.axis_names |= used_axis_names_jaxpr(jaxpr) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/util.py", line 273, in wrapped result = call(weak_arg, *args, **kwargs) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2320, in used_axis_names_jaxpr do_subst_axis_names_jaxpr(jaxpr, subst) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2310, in do_subst_axis_names_jaxpr eqns = [subst_axis_names_eqn(eqn, subst, var_map) for eqn in jaxpr.eqns] File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2310, in <listcomp> eqns = [subst_axis_names_eqn(eqn, subst, var_map) for eqn in jaxpr.eqns] File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2295, in subst_axis_names_eqn outvars = [subst_axis_names_var(v, subst, var_map) for v in eqn.outvars] File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2295, in <listcomp> outvars = [subst_axis_names_var(v, subst, var_map) for v in eqn.outvars] File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2285, in subst_axis_names_var named_shape = {name: axis_frame(name).size for name in names} File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2285, in <dictcomp> named_shape = {name: axis_frame(name).size for name in names} File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2233, in axis_frame raise NameError( jax._src.traceback_util.UnfilteredStackTrace: NameError: unbound axis name: b. The following axis names (e.g. defined by pmap) are available to collective operations: ['x', 'x'] The stack trace below excludes JAX-internal frames. The preceding is the original exception that occurred, unmodified. -------------------- The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/dlaw/dev/jax_bug/run.py", line 27, in <module> xm_step = xmap(step, in_axes=['b',...], out_axes=[...], axis_resources={'b':'x'}) File "/Users/dlaw/dev/jax_bug/run.py", line 24, in step x = loss(key) File "/Users/dlaw/dev/jax_bug/run.py", line 12, in loss def scan_fn(prev_z, t): jax._src.source_info_util.JaxStackTraceBeforeTransformation: NameError: unbound axis name: b. The following axis names (e.g. defined by pmap) are available to collective operations: ['x', 'x'] The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception. -------------------- The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/dlaw/dev/jax_bug/run.py", line 34, in <module> xm_step(keys) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/experimental/maps.py", line 625, in fun_mapped out_flat = xmap_p.bind(fun_flat, *args_flat, **params) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/experimental/maps.py", line 849, in bind return core.map_bind(self, fun, *args, in_axes=in_axes, **params) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 2072, in map_bind outs = primitive.process(top_trace, fun, tracers, params) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/experimental/maps.py", line 852, in process return trace.process_xmap(self, fun, tracers, params) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/core.py", line 687, in process_call return primitive.impl(f, *tracers, **params) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/experimental/maps.py", line 653, in xmap_impl xmap_callable = make_xmap_callable( File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/linear_util.py", line 295, in memoized_fun ans = call(fun, *args) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/experimental/maps.py", line 723, in make_xmap_callable return pxla.lower_mesh_computation( File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/profiler.py", line 294, in wrapper return func(*args, **kwargs) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/pxla.py", line 2559, in lower_mesh_computation lowering_result = mlir.lower_jaxpr_to_module( File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/mlir.py", line 618, in lower_jaxpr_to_module lower_jaxpr_to_fun( File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/mlir.py", line 881, in lower_jaxpr_to_fun out_vals, tokens_out = jaxpr_subcomp(ctx.replace(name_stack=callee_name_stack), File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/mlir.py", line 1008, in jaxpr_subcomp ans = rule(rule_ctx, *map(_unwrap_singleton_ir_values, in_nodes), File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/mlir.py", line 1071, in f_lowered jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, ctx.avals_in) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/profiler.py", line 294, in wrapper return func(*args, **kwargs) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/partial_eval.py", line 2092, in trace_to_jaxpr_dynamic jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic( File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/interpreters/partial_eval.py", line 2109, in trace_to_subjaxpr_dynamic ans = fun.call_wrapped(*in_tracers_) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/linear_util.py", line 168, in call_wrapped ans = self.f(*args, **dict(self.params, **kwargs)) File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/lax/control_flow/loops.py", line 362, in _scan_impl return _scan_impl_loop( File "/Users/dlaw/dev/jax_bug/.env/lib/python3.10/site-packages/jax/_src/lax/control_flow/loops.py", line 323, in _scan_impl_loop _, *outs = while_loop(cond_fun, body_fun, init_val) NameError: unbound axis name: b. The following axis names (e.g. defined by pmap) are available to collective operations: ['x', 'x'] ``` ### What jax/jaxlib version are you using? jax head (0.3.17, 4cb31a6), jaxlib 0.3.15 ### Which accelerator(s) are you using? CPU ### Additional System Info Python 3.10, Mac OS Monterey, Intel processors
I shrank the repro a bit but haven't yet found the underlying issue: ```python import jax import jax.numpy as jnp import numpy as onp from jax.experimental.maps import xmap, Mesh from jax.config import config config.update('jax_traceback_filtering', 'off') def loss(init_z): def scan_fn(prev_z, t): new_z = jax.lax.cond(True, lambda _: prev_z, lambda _: prev_z, None) return new_z, None out, _ = jax.lax.scan(scan_fn, init_z, jnp.arange(10)) return out xm_step = xmap(loss, in_axes=['b',...], out_axes=['b', ...], axis_resources={'b':'x'}) devices = onp.array(jax.local_devices()) with Mesh(devices, ('x',)): xm_step(jnp.arange(jax.local_device_count(), dtype=jnp.float32)) ```
2022-08-18T13:34:39
google/jax
11,996
google__jax-11996
[ "11965" ]
accd18974422fa9628c3c7532e1dda416d851da2
diff --git a/jax/_src/numpy/ufuncs.py b/jax/_src/numpy/ufuncs.py --- a/jax/_src/numpy/ufuncs.py +++ b/jax/_src/numpy/ufuncs.py @@ -529,6 +529,8 @@ def frexp(x): def remainder(x1, x2): x1, x2 = _promote_args_numeric("remainder", x1, x2) zero = _constant_like(x1, 0) + if dtypes.issubdtype(x2.dtype, np.integer): + x2 = _where(x2 == 0, lax_internal._ones(x2), x2) trunc_mod = lax.rem(x1, x2) trunc_mod_not_zero = lax.ne(trunc_mod, zero) do_plus = lax.bitwise_and(
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -286,9 +286,9 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []), op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]), op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []), - op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [], + op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_some_zero, [], tolerance={np.float16: 1e-2}), - op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []), + op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_some_zero, []), op_record("modf", 1, float_dtypes, all_shapes, jtu.rand_default, []), op_record("modf", 1, int_dtypes + unsigned_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),
BUG: numpy and jax differ on modulus by zero ### Description Jax and numpy differ in their behavior with regards to computing a modulus by zero. Jax treats it as a no-op, and numpy treats it as the zero function. ## Numpy ```python Python 3.10.5 (main, Jul 15 2022, 03:56:49) [GCC 11.3.0] on linux >>> import numpy as np >>> a = np.array([1,2,3], dtype=np.uint32) >>> np.mod(a, 0) <stdin>:1: RuntimeWarning: divide by zero encountered in remainder array([0, 0, 0], dtype=uint32) ``` ## JAX ```python >>> import jax.numpy as jnp >>> a = jnp.array([1,2,3], dtype=jnp.uint32) >>> jnp.mod(a, 0) DeviceArray([1, 2, 3], dtype=uint32) ``` ### What jax/jaxlib version are you using? HEAD ### Which accelerator(s) are you using? CPU ### Additional System Info Python 3.9, 3.10.5
Thanks for filing - I noticed also that this leads to strange behaviors in `divmod`: ```python In [2]: print(*np.divmod(2, 0)) <ipython-input-28-e495b2cbe1c8>:1: RuntimeWarning: divide by zero encountered in divmod 0 0 In [3]: print(*jnp.divmod(2, 0)) -2 2 ```
2022-08-18T20:20:44
google/jax
12,041
google__jax-12041
[ "12033" ]
560cc366e17a2efeb2d8e11164c137d76e04bae7
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py --- a/jax/_src/lax/lax.py +++ b/jax/_src/lax/lax.py @@ -2004,8 +2004,7 @@ def _abs_jvp_rule(g, ans, x): pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow') def _pow_jvp_lhs(g, ans, x, y): - jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y))))) - return mul(g, jac) + return mul(g, mul(y, pow(x, sub(y, _ones(y))))) def _pow_jvp_rhs(g, ans, x, y): return mul(g, mul(log(_replace_zero(x)), ans))
diff --git a/tests/lax_autodiff_test.py b/tests/lax_autodiff_test.py --- a/tests/lax_autodiff_test.py +++ b/tests/lax_autodiff_test.py @@ -529,6 +529,26 @@ def testReverseGrad(self): check_grads(rev, (np.array([[6., 5., 4.], [3., 2., 1.]]),), 2, rtol={np.float32: 3e-3}) + def testPowSecondDerivative(self): + # https://github.com/google/jax/issues/12033 + x, y = 4.0, 0.0 + expected = ((0.0, 1/x), (1/x, np.log(x) ** 2)) + + with self.subTest("jacfwd"): + result_fwd = jax.jacfwd(jax.jacfwd(lax.pow, (0, 1)), (0, 1))(x, y) + self.assertAllClose(result_fwd, expected) + + with self.subTest("jacrev"): + result_rev = jax.jacrev(jax.jacrev(lax.pow, (0, 1)), (0, 1))(x, y) + self.assertAllClose(result_rev, expected) + + with self.subTest("zero to the zero"): + result = jax.grad(lax.pow)(0.0, 0.0) + # TODO(jakevdp) special-case zero in a way that doesn't break other cases + # See https://github.com/google/jax/pull/12041#issuecomment-1222766191 + # self.assertEqual(result, 0.0) + self.assertAllClose(result, np.nan) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_predshape={}_argshapes={}".format( jtu.format_shape_dtype_string(pred_shape, np.bool_),
The second order gradient of `lax.pow` is wrong The second order gradient of `lax.pow` is wrong. For instance, the `fn` computes `x^y`, so `grad_fn` returns `(y*x^(y-1), ln(x)* x^y)`. When `x, y = 23.0, 0.0`, for the second element `ln(x) * x^y`, the gradient w.r.t `x` should be `1/x` instead of `23.0` computed by `jacrev` or `jacfwd`. ```python import jax def grad_fn(x, y): def fn(x, y): return jax.numpy.sum(jax.lax.pow(x, y)) return jax.grad(fn, (0, 1))(x, y) x = jax.numpy.array([23.0]) y = jax.numpy.array([0.0]) print(jax.jacrev(grad_fn, (0, 1))(x, y)) print(jax.jacfwd(grad_fn, (0, 1))(x, y)) ``` ``` ((DeviceArray([[0.]], dtype=float32), DeviceArray([[23.]], dtype=float32)), (DeviceArray([[0.04347826]], dtype=float32), DeviceArray([[9.831324]], dtype=float32))) ```
Thanks for the report – I had a bit of trouble following your reproduction, so I rewrote it a bit more succinctly: ```python import jax f = jax.lax.pow x = 23.0 y = 0.0 print(f"d²f/dxdy = {jax.grad(jax.grad(f, 0), 1)(x, y)}") print(f"d²f/dydx = {jax.grad(jax.grad(f, 1), 0)(x, y)}") ``` ``` d²f/dxdy = 23.0 d²f/dydx = 0.043478261679410934 ``` As you mention, these two results should both be equal to the second output. Looking into the code, I think the issue comes from the `select` statement in line 2004 here: https://github.com/google/jax/blob/da4e79a6251ae71504061b496ea34f20dc06fa60/jax/_src/lax/lax.py#L2003-L2008 The gradient of `pow` with respect to `x` is special-cased when `y` is zero. I think this yields the correct result when `y` is a constant, but it removes the dependence of the result on `y` and therefore the mixed second derivative is incorrect.
2022-08-22T17:11:20
google/jax
12,069
google__jax-12069
[ "12063" ]
160a6c5229ba9a1b78431708cd52d39f08e8197b
diff --git a/jax/_src/ad_checkpoint.py b/jax/_src/ad_checkpoint.py --- a/jax/_src/ad_checkpoint.py +++ b/jax/_src/ad_checkpoint.py @@ -14,7 +14,8 @@ from functools import partial import operator as op -from typing import Callable, Optional, List, Tuple, Sequence, Set, Union, Any +from typing import (Callable, Optional, List, Tuple, Sequence, Set, Union, Any, + FrozenSet) import types from absl import logging @@ -302,7 +303,7 @@ def _remat_static_argnums(fun, static_argnums, args): class WrapHashably: val: Any - hash: Optional[int] = None + hash: int hashable: bool def __init__(self, val): @@ -317,8 +318,10 @@ def __hash__(self): return self.hash def __eq__(self, other): if isinstance(other, WrapHashably): - try: return self.val == other.val - except: return self.val is other.val + if self.hashable and other.hashable: + return self.val == other.val + else: + return self.val is other.val return False # This caching is useful to avoid retracing even when static_argnums is used. @@ -326,7 +329,7 @@ def __eq__(self, other): # On that benchmark, including this caching makes a ~10x difference (which can # be made arbitrary large by involving larger functions to be traced). @weakref_lru_cache -def _dyn_args_fun(fun: Callable, static_argnums: Tuple[int, ...], +def _dyn_args_fun(fun: Callable, static_argnums: FrozenSet[int], static_args: Tuple[WrapHashably, ...], nargs: int): def new_fun(*dyn_args, **kwargs): static_args_, dyn_args_ = iter(static_args), iter(dyn_args)
Flaky test failure of RematTest in api_test Run, with jaxlib built from head: ``` bazel test --//jax:build_jaxlib=false //tests:api_test_cpu --test_arg=RematTest.test_remat_grad_python_control_flow_static_argnums --test_sharding_strategy=disabled --notrim_test_configuration --runs_per_test=100 ``` ``` //tests:api_test_cpu FAILED in 2 out of 100 in 7.4s Stats over 100 runs: max = 7.4s, min = 2.0s, avg = 6.8s, dev = 1.0s /usr/local/google/home/phawkins/.cache/bazel/_bazel_phawkins/f5f850d43f4a41d5e22a7e7660064506/execroot/__main__/bazel-out/k8-opt/testlogs/tests/api_test_cpu/run_17_of_100/test.log /usr/local/google/home/phawkins/.cache/bazel/_bazel_phawkins/f5f850d43f4a41d5e22a7e7660064506/execroot/__main__/bazel-out/k8-opt/testlogs/tests/api_test_cpu/run_91_of_100/test.log ``` Example failure: ``` [ FAILED ] RematTest.test_remat_grad_python_control_flow_static_argnums ====================================================================== FAIL: test_remat_grad_python_control_flow_static_argnums (__main__.RematTest) RematTest.test_remat_grad_python_control_flow_static_argnums ---------------------------------------------------------------------- Traceback (most recent call last): File "/usr/local/google/home/phawkins/.cache/bazel/_bazel_phawkins/f5f850d43f4a41d5e22a7e7660064506/execroot/__main__/bazel-out/k8-opt/bin/tests/api_test_cpu.runfiles/__main__/tests/api_test.py", line 4037, in test_remat_grad_python_control_flow_static_argnums self.assertAllClose(ans, expected, check_dtypes=False) File "/usr/local/google/home/phawkins/.cache/bazel/_bazel_phawkins/f5f850d43f4a41d5e22a7e7660064506/execroot/__main__/bazel-out/k8-opt/bin/tests/api_test_cpu.runfiles/__main__/jax/_src/test_util.py", line 798, in assertAllClose self.assertArraysAllClose(x, y, check_dtypes=False, atol=atol, rtol=rtol, File "/usr/local/google/home/phawkins/.cache/bazel/_bazel_phawkins/f5f850d43f4a41d5e22a7e7660064506/execroot/__main__/bazel-out/k8-opt/bin/tests/api_test_cpu.runfiles/__main__/jax/_src/test_util.py", line 763, in assertArraysAllClose _assert_numpy_allclose(x, y, atol=atol, rtol=rtol, err_msg=err_msg) File "/usr/local/google/home/phawkins/.cache/bazel/_bazel_phawkins/f5f850d43f4a41d5e22a7e7660064506/execroot/__main__/bazel-out/k8-opt/bin/tests/api_test_cpu.runfiles/__main__/jax/_src/public_test_util.py", line 96, in _assert_numpy_allclose np.testing.assert_allclose(a, b, **kw, err_msg=err_msg) File "/usr/local/google/home/phawkins/.pyenv/versions/py3913/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1527, in assert_allclose assert_array_compare(compare, actual, desired, err_msg=str(err_msg), File "/usr/local/google/home/phawkins/.pyenv/versions/py3913/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare raise AssertionError(msg) AssertionError: Not equal to tolerance rtol=1e-06, atol=1e-06 Mismatched elements: 1 / 1 (100%) Max absolute difference: 0.41614684 Max relative difference: 1. x: array(0., dtype=float32) y: array(-0.416147) ---------------------------------------------------------------------- Ran 1 test in 0.352s FAILED (failures=1) ``` It doesn't appear to repro with jaxlib from pypi. Now, I'm not convinced this is an XLA bug from the evidence. Interestingly we dump different XLA computations with `XLA_FLAGS=--xla_dump_to` in cases when we fail and cases when we succeed! So that suggests to me this is a JAX bug. I've seen this on CPU, GPU and, TPU.
2022-08-24T02:16:16
google/jax
12,203
google__jax-12203
[ "12194" ]
0869183107f8bd4622451b1d04f255577e5b2588
diff --git a/jax/_src/callback.py b/jax/_src/callback.py --- a/jax/_src/callback.py +++ b/jax/_src/callback.py @@ -14,6 +14,8 @@ """Module for JAX callbacks.""" from __future__ import annotations +import functools + from typing import Any, Callable, Sequence from jax import core @@ -21,6 +23,7 @@ from jax._src import dtypes from jax._src import lib as jaxlib from jax._src import util +from jax._src import dispatch from jax.interpreters import ad from jax.interpreters import batching from jax.interpreters import mlir @@ -33,11 +36,12 @@ map, unsafe_map = util.safe_map, map -@pure_callback_p.def_impl def pure_callback_impl(*args, result_avals, callback: Callable[..., Any], vectorized: bool): del vectorized, result_avals return callback(*args) +pure_callback_p.def_impl(functools.partial(dispatch.apply_primitive, + pure_callback_p)) @pure_callback_p.def_abstract_eval @@ -102,7 +106,7 @@ def pure_callback_lowering(ctx, *args, callback, **params): "Please upgrade to a jaxlib >= 0.3.15.") def _callback(*flat_args): - return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params)) + return tuple(pure_callback_impl(*flat_args, callback=callback, **params)) result, _, keepalive = mlir.emit_python_callback( ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,
diff --git a/tests/python_callback_test.py b/tests/python_callback_test.py --- a/tests/python_callback_test.py +++ b/tests/python_callback_test.py @@ -469,6 +469,17 @@ def tearDown(self): super().tearDown() dispatch.runtime_tokens.clear() + @jtu.skip_on_devices(*disabled_backends) + def test_pure_callback_passes_ndarrays_without_jit(self): + + def cb(x): + self.assertIs(type(x), np.ndarray) + return x + + def f(x): + return jax.pure_callback(cb, x, x) + f(jnp.array(2.)) + @jtu.skip_on_devices(*disabled_backends) def test_simple_pure_callback(self):
pure_callback passes jax.DeviceArray to the callback on CPU when not jitted ### Description As title. This only happens when the callback is executed outside of jit boundaries. This is different from the documented behaviour. ```python ➜ python Python 3.10.6 (main, Aug 23 2022, 11:35:18) [Clang 13.1.6 (clang-1316.0.21.2.5)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import jax >>> def test(x): ... print(type(x)) ... return x ... >>> def f(x): ... return jax.pure_callback(test, x, x) ... >>> x= jax.numpy.ones(3) >>> f(x) <class 'jaxlib.xla_extension.DeviceArray'> DeviceArray([1., 1., 1.], dtype=float32) >>> jax.jit(f)(x) <class 'numpy.ndarray'> DeviceArray([1., 1., 1.], dtype=float32) ``` ### What jax/jaxlib version are you using? jax 0.3.17 ### Which accelerator(s) are you using? CPU ### Additional System Info MacOs
This is a straightforward fix on our end, though I'm curious, does this cause problems for you given that `DeviceArray`s behave like `np.ndarray`s?
2022-09-01T22:27:50
google/jax
12,219
google__jax-12219
[ "12198" ]
91d134d65b39c5570800fd788c3d41fac87ccebf
diff --git a/benchmarks/api_benchmark.py b/benchmarks/api_benchmark.py --- a/benchmarks/api_benchmark.py +++ b/benchmarks/api_benchmark.py @@ -663,6 +663,19 @@ def bench_slicing_compilation2(state): while state: jax.jit(lambda x: (x[:1], x[1:2], x[2:3])).lower(x).compile() +@google_benchmark.register +@google_benchmark.option.unit(google_benchmark.kMillisecond) +def bench_repeated_static_indexing(state): + x = jnp.arange(500) + while state: + jax.block_until_ready([x[i] for i in range(500)]) + +@google_benchmark.register +@google_benchmark.option.unit(google_benchmark.kMillisecond) +def bench_repeated_static_slicing(state): + x = jnp.arange(1000) + while state: + jax.block_until_ready([x[i:i + 2] for i in range(0, 1000, 2)]) def pjit_simple_benchmark(state, num_devices, num_args, cpp_jit, use_aot=False): spec = pjit_lib.PartitionSpec('x') diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -3776,10 +3776,9 @@ def _rewriting_take(arr, idx, indices_are_sorted=False, unique_indices=False, if (arr.ndim > 0 and isinstance(idx, (int, np.integer)) and not isinstance(idx, (bool, np.bool_)) and isinstance(arr.shape[0], int)): if 0 <= idx < arr.shape[0]: - if _any(isinstance(d, core.Tracer) for d in arr.shape[1:]): - return lax.dynamic_index_in_dim(arr, idx, keepdims=False) - else: - return lax.index_in_dim(arr, idx, keepdims=False) + # Use dynamic rather than static index here to avoid slow repeated execution: + # See https://github.com/google/jax/issues/12198 + return lax.dynamic_index_in_dim(arr, idx, keepdims=False) if (arr.ndim > 0 and isinstance(arr.shape[0], int) and isinstance(idx, slice) and (type(idx.start) is int or idx.start is None) and @@ -3794,6 +3793,10 @@ def _rewriting_take(arr, idx, indices_are_sorted=False, unique_indices=False, if _any(isinstance(d, core.Tracer) for d in arr.shape[1:]): if step == 1: # TODO(mattjj, sharadmv): handle step != 1 return lax.dynamic_slice_in_dim(arr, start, _max(0, stop - start), 0) + elif step == 1: + # Use dynamic rather than static slice here to avoid slow repeated execution: + # See https://github.com/google/jax/issues/12198 + return lax.dynamic_slice_in_dim(arr, start, _max(0, stop - start), 0) else: return lax.slice_in_dim(arr, start, stop, step)
slice-based indexing is slow for repeated indexing This issue is the result of some pair-debugging with @sharadmv. In the course of trying to re-land #12091, I found that it was leading to some test timeouts due to replacing `lax.gather` with `lax.slice`. This issue actually can be reproduced in the current release of JAX; given the current implementation, we can define two functions, one of which takes the `gather` path and one of which takes the `slice` path: ```python import jax.numpy as jnp import jax def index_via_gather(x): return x[(0,)] def index_via_slice(x): return x[0] x = jnp.arange(100) print(jax.make_jaxpr(index_via_gather)(x)) # { lambda ; a:i32[100]. let # b:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 0 # c:i32[] = gather[ # dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) # fill_value=None # indices_are_sorted=True # mode=GatherScatterMode.PROMISE_IN_BOUNDS # slice_sizes=(1,) # unique_indices=True # ] a b # in (c,) } print(jax.make_jaxpr(index_via_slice)(x)) # { lambda ; a:i32[100]. let # b:i32[1] = slice[limit_indices=(1,) start_indices=(0,) strides=(1,)] a # c:i32[] = squeeze[dimensions=(0,)] b # in (c,) } ``` The latter slice behavior was added in #11867 because it can be more efficient. Unfortunately, there is a catch: when indexing is done repeatedly on different static indices, the `slice` approach has a cache miss on each new index, making it much slower. The test that led to the rollback of #12091 exposed this issue, which we can reproduce for the current implementation like this: ```python def f_gather(x): return jnp.array([x[(i,)] for i in range(len(x))]) def f_slice(x): return jnp.array([x[i] for i in range(len(x))]) x = jnp.arange(500) %timeit -n 1 -r 1 f_gather(x).block_until_ready() # 611 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each) %timeit -n 1 -r 1 f_slice(x).block_until_ready() # 6.78 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each) ``` Repeated indexing via static slice is *very* slow comparted to repeated indexing via gather. We could instead consider using `lax.dynamic_slice`, which has comparable performance to `gather` for this repeated operation: ```python def f_dynamic_slice(x): return jnp.array([jax.lax.dynamic_index_in_dim(x, i, 0) for i in range(len(x))]) %timeit -n 1 -r 1 f_dynamic_slice(x).block_until_ready() # 420 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each) ``` Any thoughts? cc/ @mattjj, who contributed #11867
Chatted offline with @mattjj; we agreed that switching from `slice` to `dynamic_slice` is probably the best option here.
2022-09-02T22:24:52
google/jax
12,308
google__jax-12308
[ "12305" ]
40c80d7d0aa08f39b634e9fc54420892860db386
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py --- a/jax/_src/lax/lax.py +++ b/jax/_src/lax/lax.py @@ -149,8 +149,7 @@ def _broadcast_shapes_uncached(*shapes): shape_list = [(1,) * (ndim - len(shape)) + shape for shape in shapes] result_shape = _try_broadcast_shapes(shape_list) if result_shape is None: - raise ValueError("Incompatible shapes for broadcasting: {}" - .format(tuple(shape_list))) + raise ValueError(f"Incompatible shapes for broadcasting: shapes={list(shapes)}") return result_shape def _broadcast_ranks(s1, s2):
Misleading error message in broadcast ### Description ```python In [64]: x, y = jnp.ones((2, 4)), jnp.ones((2,)) In [65]: x + y .... /jax/_src/numpy/util.py in _promote_shapes(fun_name, *args) 246 if config.jax_numpy_rank_promotion != "allow": 247 _rank_promotion_warning_or_error(fun_name, shapes) --> 248 result_rank = len(lax.broadcast_shapes(*shapes)) 249 return [_broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp) 250 for arg, shp in zip(args, shapes)] ValueError: Incompatible shapes for broadcasting: ((2, 4), (1, 2)) ``` ```python In [66]: xx, yy = np.ones((2, 4)), np.ones((2,)) In [67]: xx + yy --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-67-9e546835689c> in <module>() ----> 1 xx + yy ValueError: operands could not be broadcast together with shapes (2,4) (2,) ``` The error message from `jnp` does not tell the shape given by user and that's misleading for debugging. `np` has a better behavior here. ### What jax/jaxlib version are you using? latest ### Which accelerator(s) are you using? _No response_ ### Additional System Info _No response_
Thanks for the report - we should be able to improve that
2022-09-09T15:37:46
google/jax
12,332
google__jax-12332
[ "12321" ]
3243e23aa528db390fdece3fa32517c28a50318b
diff --git a/jax/_src/lax/slicing.py b/jax/_src/lax/slicing.py --- a/jax/_src/lax/slicing.py +++ b/jax/_src/lax/slicing.py @@ -14,7 +14,7 @@ import enum from functools import partial -from typing import Any, Callable, NamedTuple, Optional, Sequence, Union +from typing import Any, Callable, NamedTuple, Optional, Sequence, Tuple, Union import weakref import numpy as np @@ -173,9 +173,9 @@ class GatherDimensionNumbers(NamedTuple): implicit; there is always an index vector dimension and it must always be the last dimension. To gather scalar indices, add a trailing dimension of size 1. """ - offset_dims: Sequence[int] - collapsed_slice_dims: Sequence[int] - start_index_map: Sequence[int] + offset_dims: Tuple[int, ...] + collapsed_slice_dims: Tuple[int, ...] + start_index_map: Tuple[int, ...] class GatherScatterMode(enum.Enum): @@ -612,15 +612,17 @@ def scatter( def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array: indices = lax.concatenate([lax.expand_dims(i, (1,)) for i in idxs], 1) - indices = indices % np.array([src.shape[ax] for ax in axes]) + max_idx = lax.expand_dims(np.array([src.shape[ax] for ax in axes]), + tuple(range(indices.ndim - 1))) + indices = indices % max_idx slice_sizes = list(src.shape) for ax in axes: slice_sizes[ax] = 1 offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1)) dnums = GatherDimensionNumbers( offset_dims=offset_dims, - collapsed_slice_dims=axes, - start_index_map=axes) + collapsed_slice_dims=tuple(axes), + start_index_map=tuple(axes)) return gather(src, indices, dimension_numbers=dnums, slice_sizes=tuple(slice_sizes))
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2287,9 +2287,8 @@ def collapse_first_two(x): [(3, 4, 5), (np.array([0, 2, 1]),), (0,)], [(3, 4, 5), (np.array([-1, -2]),), (0,)], [(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 1)], - [(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 2)], + [(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), [0, 2]], ])) - @jax.numpy_rank_promotion('allow') # Test explicitly exercises implicit rank promotion. def testIndexTake(self, shape, dtype, idxs, axes): rng = jtu.rand_default(self.rng()) rand_idxs = lambda: tuple(rng(e.shape, e.dtype) for e in idxs)
Possible bug in `jax.lax.index_take` The `jax.lax.index_take` will fail when `axes` is a list, for example, ```py import jax def fn(src): idxs = jax.numpy.array([[2, 2], [31, 0]], dtype=jax.numpy.int32) axes = [0, 1] return jax.lax.index_take(src, idxs, axes) genkey = jax.random.PRNGKey(90376501) src = jax.random.randint(genkey, [3, 4, 5], -32, 32, jax.numpy.int32) fn(src) ``` ``` File ~/torch-1.12.1/lib/python3.9/site-packages/jax/_src/lax/slicing.py:281, in gather(operand, start_indices, dimension_numbers, slice_sizes, unique_indices, indices_are_sorted, mode, fill_value) 279 else: 280 fill_value = None --> 281 return gather_p.bind( ... 220 return f(*args, **kwargs) 221 else: --> 222 return cached(config._trace_context(), *args, **kwargs) TypeError: unhashable type: 'list' ``` However, the document said `axes` is a sequence of integer. Thus, I think it should succeed with integer list. --- Plus, its `jit` version can succeed with the same input ```py import jax def fn(src): idxs = jax.numpy.array([[2, 2], [31, 0]], dtype=jax.numpy.int32) axes = [0, 1] return jax.lax.index_take(src, idxs, axes) genkey = jax.random.PRNGKey(90376501) src = jax.random.randint(genkey, [3, 4, 5], -32, 32, jax.numpy.int32) print(jax.jit(fn)(src)) ``` ``` [[-17 -28 3 16 -31] [ 14 -28 -21 -18 17]] ``` but I think the indices here is not valid, `31` is obviously out of range, and jax just uses `31 % 4` as the index to take the elements. Maybe it needs a index check. --- jax version: 0.3.17
Thanks - we should be able to fix this.
2022-09-12T19:10:47
google/jax
12,334
google__jax-12334
[ "12331" ]
3243e23aa528db390fdece3fa32517c28a50318b
diff --git a/jax/_src/api_util.py b/jax/_src/api_util.py --- a/jax/_src/api_util.py +++ b/jax/_src/api_util.py @@ -37,6 +37,7 @@ def _ensure_index(x: Any) -> Union[int, Tuple[int, ...]]: """Ensure x is either an index or a tuple of indices.""" + x = core.concrete_or_error(None, x, "expected a static index or sequence of indices.") try: return operator.index(x) except TypeError: @@ -44,6 +45,7 @@ def _ensure_index(x: Any) -> Union[int, Tuple[int, ...]]: def _ensure_index_tuple(x: Any) -> Tuple[int, ...]: """Convert x to a tuple of indices.""" + x = core.concrete_or_error(None, x, "expected a static index or sequence of indices.") try: return (operator.index(x),) except TypeError:
`jnp.squeeze` will fail unexpectedly with `jit` compilation `jnp.squeeze` will fail unexpectedly with `jit` compilation but direct invocation will succeed. For example, ```py import jax def fn(inp): axis = jax.numpy.array(1, dtype=jax.numpy.int32) return jax.numpy.squeeze(inp, axis) mykey = jax.random.PRNGKey(0) inp = jax.random.uniform(mykey, [3, 1], jax.numpy.float32, minval=0, maxval=1) print(fn(inp)) # [0.9653214 0.31468165 0.63302994] jax.jit(fn)(inp) # TypeError: iteration over a 0-d array ``` I think the input is valid since the dim-`1` has shape 1. ## Version jax 0.3.17
BTW, `jnp.expand_dims` has a similar issue ``` import jax def fn(inp): axis = jax.numpy.array(0, dtype=jax.numpy.int64) return jax.numpy.expand_dims(inp, axis) inp = jax.numpy.array(128, dtype=jax.numpy.float32) print(fn(inp)) # [128.] jax.jit(fn)(inp) # TypeError: iteration over a 0-d array ``` The error is misleading here, but I believe the issue is that in both cases the `axis` argument must be static, and when you pass a jax array to the function within `jit` it is no longer static. We should update the `_ensure_index_tuple` helper function to return a more suitable error in this case.
2022-09-12T19:32:46
google/jax
12,382
google__jax-12382
[ "10813" ]
69d1a2c0637369bb2303738b4d082b9c1bfd2680
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py --- a/jax/_src/numpy/reductions.py +++ b/jax/_src/numpy/reductions.py @@ -26,7 +26,7 @@ from jax._src import api from jax._src import dtypes from jax._src.numpy.ndarray import ndarray -from jax._src.numpy.util import _broadcast_to, _check_arraylike, _complex_elem_type, _promote_dtypes_inexact, _where, _wraps +from jax._src.numpy.util import _broadcast_to, _check_arraylike, _complex_elem_type, _promote_dtypes_inexact, _promote_dtypes_numeric, _where, _wraps from jax._src.lax import lax as lax_internal from jax._src.util import canonicalize_axis as _canonicalize_axis, maybe_named_axis @@ -62,7 +62,7 @@ def _upcast_f16(dtype): def _reduction(a, name, np_fun, op, init_val, has_identity=True, preproc=None, bool_op=None, upcast_f16_for_computation=False, axis=None, dtype=None, out=None, keepdims=False, initial=None, - where_=None, parallel_reduce=None): + where_=None, parallel_reduce=None, promote_integers=False): bool_op = bool_op or op # Note: we must accept out=None as an argument, because numpy reductions delegate to # object methods. For example `np.sum(x)` will call `x.sum()` if the `sum()` method @@ -86,7 +86,18 @@ def _reduction(a, name, np_fun, op, init_val, has_identity=True, if not _all(core.greater_equal_dim(shape[d], 1) for d in pos_dims): raise ValueError(f"zero-size array to reduction operation {name} which has no identity") - result_dtype = dtypes.canonicalize_dtype(dtype or dtypes.dtype(np_fun(np.ones((), dtype=dtypes.dtype(a))))) + result_dtype = dtypes.canonicalize_dtype(dtype or dtypes.dtype(a)) + + # promote_integers=True matches NumPy's behavior for sum() and prod(), which promotes + # all int-like inputs to the widest available dtype. + if dtype is None and promote_integers: + if dtypes.issubdtype(result_dtype, np.bool_): + result_dtype = dtypes.canonicalize_dtype(np.int64) + elif dtypes.issubdtype(result_dtype, np.unsignedinteger): + result_dtype = dtypes.canonicalize_dtype(np.uint64) + elif dtypes.issubdtype(result_dtype, np.integer): + result_dtype = dtypes.canonicalize_dtype(np.int64) + if upcast_f16_for_computation and dtypes.issubdtype(result_dtype, np.inexact): computation_dtype = _upcast_f16(result_dtype) else: @@ -146,6 +157,9 @@ def _cast_to_bool(operand): warnings.filterwarnings("ignore", category=np.ComplexWarning) return lax.convert_element_type(operand, np.bool_) +def _cast_to_numeric(operand): + return _promote_dtypes_numeric(operand)[0] + def _ensure_optional_axes(x): def force(x): @@ -159,34 +173,46 @@ def force(x): force, x, "The axis argument must be known statically.") -@partial(api.jit, static_argnames=('axis', 'dtype', 'keepdims'), inline=True) -def _reduce_sum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, - dtype=None, out=None, keepdims=None, initial=None, where=None): - return _reduction(a, "sum", np.sum, lax.add, 0, +# TODO(jakevdp) change promote_integers default to False +_PROMOTE_INTEGERS_DOC = """ +promote_integers : bool, default=True + If True, then integer inputs will be promoted to the widest available integer + dtype, following numpy's behavior. If False, the result will have the same dtype + as the input. ``promote_integers`` is ignored if ``dtype`` is specified. +""" + + +@partial(api.jit, static_argnames=('axis', 'dtype', 'keepdims', 'promote_integers'), inline=True) +def _reduce_sum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None, + out=None, keepdims=None, initial=None, where=None, promote_integers=True): + return _reduction(a, "sum", np.sum, lax.add, 0, preproc=_cast_to_numeric, bool_op=lax.bitwise_or, upcast_f16_for_computation=True, axis=axis, dtype=dtype, out=out, keepdims=keepdims, - initial=initial, where_=where, parallel_reduce=lax.psum) + initial=initial, where_=where, parallel_reduce=lax.psum, + promote_integers=promote_integers) -@_wraps(np.sum, skip_params=['out']) +@_wraps(np.sum, skip_params=['out'], extra_params=_PROMOTE_INTEGERS_DOC) def sum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None, - out=None, keepdims=None, initial=None, where=None): + out=None, keepdims=None, initial=None, where=None, promote_integers=True): return _reduce_sum(a, axis=_ensure_optional_axes(axis), dtype=dtype, out=out, - keepdims=keepdims, initial=initial, where=where) + keepdims=keepdims, initial=initial, where=where, + promote_integers=promote_integers) -@partial(api.jit, static_argnames=('axis', 'dtype', 'keepdims'), inline=True) -def _reduce_prod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, - dtype=None, out=None, keepdims=None, initial=None, where=None): - return _reduction(a, "prod", np.prod, lax.mul, 1, +@partial(api.jit, static_argnames=('axis', 'dtype', 'keepdims', 'promote_integers'), inline=True) +def _reduce_prod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None, + out=None, keepdims=None, initial=None, where=None, promote_integers=True): + return _reduction(a, "prod", np.prod, lax.mul, 1, preproc=_cast_to_numeric, bool_op=lax.bitwise_and, upcast_f16_for_computation=True, axis=axis, dtype=dtype, out=out, keepdims=keepdims, - initial=initial, where_=where) + initial=initial, where_=where, promote_integers=promote_integers) -@_wraps(np.prod, skip_params=['out']) +@_wraps(np.prod, skip_params=['out'], extra_params=_PROMOTE_INTEGERS_DOC) def prod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None, - out=None, keepdims=None, initial=None, where=None): + out=None, keepdims=None, initial=None, where=None, promote_integers=True): return _reduce_prod(a, axis=_ensure_optional_axes(axis), dtype=dtype, - out=out, keepdims=keepdims, initial=initial, where=where) + out=out, keepdims=keepdims, initial=initial, where=where, + promote_integers=promote_integers) @partial(api.jit, static_argnames=('axis', 'keepdims'), inline=True)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -407,6 +407,11 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []), ] +JAX_REDUCER_PROMOTE_INT_RECORDS = [ + op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []), + op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []), +] + JAX_ARGMINMAX_RECORDS = [ op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []), op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []), @@ -941,6 +946,49 @@ def np_fun(x): self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, rtol=tol) self._CompileAndCheck(jnp_fun, args_maker) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}_promote_integers={}".format( + rec.test_name.capitalize(), + jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial, promote_integers), + "rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, + "np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), + "initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact, + "promote_integers": promote_integers} + for shape in rec.shapes for dtype in rec.dtypes + for axis in list(range(-len(shape), len(shape))) + [None] + for initial in [0, 1] for keepdims in [False, True] + for promote_integers in [True, False] + if jtu.is_valid_shape(shape, dtype)) + for rec in JAX_REDUCER_PROMOTE_INT_RECORDS)) + def testReducerPromoteInt(self, np_op, jnp_op, rng_factory, shape, dtype, axis, + keepdims, initial, inexact, promote_integers): + rng = rng_factory(self.rng()) + is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan' + @jtu.ignore_warning(category=RuntimeWarning, + message="Degrees of freedom <= 0 for slice.*") + @jtu.ignore_warning(category=np.ComplexWarning) + def np_fun(x): + x = np.asarray(x) + if inexact: + x = x.astype(dtypes.to_inexact_dtype(x.dtype)) + x_cast = x if not is_bf16_nan_test else x.astype(np.float32) + res = np_op(x_cast, axis, keepdims=keepdims, initial=initial) + res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16) + print(f"res.dtype = {res.dtype}") + if not promote_integers and dtypes.issubdtype(res.dtype, np.integer): + res = res.astype(dtypes.to_numeric_dtype(x.dtype)) + return res + + jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, promote_integers=promote_integers) + jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun) + args_maker = lambda: [rng(shape, dtype)] + tol = {jnp.bfloat16: 3E-2} + print(jnp_fun(*args_maker())) + print(np_fun(*args_maker())) + self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, rtol=tol) + self._CompileAndCheck(jnp_fun, args_maker) + @parameterized.named_parameters(itertools.chain.from_iterable( jtu.cases_from_list( {"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
[x64] Reductions are too eager to promote to 64-bit ```python In [1]: import jax.numpy as jnp In [2]: jnp.arange(4, dtype='int8').sum() Out[2]: DeviceArray(6, dtype=int64) ``` The root cause is this line: https://github.com/google/jax/blob/d849f495193c69e8c133d0c6a424ba80d72938cb/jax/_src/numpy/reductions.py#L81 It's not an issue for floating-point types, because numpy does not promote those in most cases
2022-09-15T22:06:36
google/jax
12,389
google__jax-12389
[ "11905" ]
60c5b324ac9e773687de13e779a10ce8a9e7aa34
diff --git a/jax/_src/checkify.py b/jax/_src/checkify.py --- a/jax/_src/checkify.py +++ b/jax/_src/checkify.py @@ -662,18 +662,54 @@ def ignore_error_output_jaxpr(jaxpr): new_jaxpr = jaxpr.replace(outvars=jaxpr.outvars[3:]) return core.ClosedJaxpr(new_jaxpr, consts) +def batch_error(err, code, payload, batch_shape): + err = jnp.broadcast_to(err, batch_shape) + code = jnp.broadcast_to(code, batch_shape) + payload = jnp.broadcast_to(payload, batch_shape+(3,)) + return err, code, payload + +def unbatch_error(err, code, payload): + err = err.ravel()[0] + code = code.ravel()[0] + payload = payload.reshape(-1, 3)[0] + return err, code, payload + +def trivial_batched_jaxpr(jaxpr, batch_shape, batched_err): + fun = core.jaxpr_as_fun(jaxpr) + + def g(err, code, payload, *a): + err_args = unbatch_error(err, code, payload) + err, code, payload, *out = fun(*err_args, *a) + err, code, payload = batch_error(err, code, payload, batch_shape) + return (err, code, payload, *out) + + error_avals = map(lambda x: core.raise_to_shaped(core.get_aval(x)), batched_err) + new_jaxpr, _, literals_out = pe.trace_to_jaxpr_dynamic( + lu.wrap_init(g), [*error_avals, *jaxpr.in_avals[3:]]) + return core.ClosedJaxpr(new_jaxpr, literals_out) + def while_loop_error_check(error, enabled_errors, *in_flat, cond_nconsts, cond_jaxpr, body_nconsts, body_jaxpr): + batch_shape = cond_jaxpr.out_avals[0].shape + if batch_shape: + err_args = batch_error(error.err, error.code, error.payload, batch_shape) + else: + err_args = [error.err, error.code, error.payload] + c_consts, b_consts, carry = split_list(in_flat, [cond_nconsts, body_nconsts]) # Check if the first cond application will error. checked_cond_jaxpr, msgs_cond = checkify_jaxpr(cond_jaxpr, error, enabled_errors) + if batch_shape: + checked_cond_jaxpr = trivial_batched_jaxpr(checked_cond_jaxpr, batch_shape, err_args) cond_err, cond_code, cond_payload, _ = core.jaxpr_as_fun(checked_cond_jaxpr)( - error.err, error.code, error.payload, *c_consts, *carry) + *err_args, *c_consts, *carry) checked_body_jaxpr_, msgs_body = checkify_while_body_jaxpr( cond_jaxpr, body_jaxpr, error, enabled_errors, c_consts) + if batch_shape: + checked_body_jaxpr_ = trivial_batched_jaxpr(checked_body_jaxpr_, batch_shape, err_args) to_move = [False] * 3 + [True] * body_nconsts + [False] * len(carry) checked_body_jaxpr = pe.move_binders_to_front(checked_body_jaxpr_, to_move) @@ -686,6 +722,9 @@ def while_loop_error_check(error, enabled_errors, *in_flat, cond_nconsts, *new_in_flat, cond_nconsts=cond_nconsts, cond_jaxpr=compat_cond_jaxpr, body_nconsts=body_nconsts, body_jaxpr=checked_body_jaxpr) new_msgs = {**error.msgs, **msgs_body, **msgs_cond} + if batch_shape: + err, code, payload = unbatch_error(err, code, payload) + return out, Error(err, code, new_msgs, payload) error_checks[lax.while_p] = while_loop_error_check
diff --git a/tests/checkify_test.py b/tests/checkify_test.py --- a/tests/checkify_test.py +++ b/tests/checkify_test.py @@ -875,5 +875,35 @@ def f(x): self.assertIsNotNone(err.get()) self.assertIn("should be positive", err.get()) + def test_checkify_of_vmap_of_while(self): + @jax.vmap + def fun(n, v): + def while_cond(s): + counter, value = s + checkify.check(value < 6, "value needs to be less than 6!") + return counter > 0 + + def while_body(s): + counter, value = s + checkify.check(value >= 0, "value needs to be positive!") + return counter/value, value - 1. + + _, result = jax.lax.while_loop(while_cond, while_body, (n, v)) + return result + + checked_f = checkify.checkify(fun, errors=checkify.all_checks) + + err, _ = checked_f(jnp.asarray([1., 2., 3.]), jnp.asarray([5., 2., 4.])) + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "divided by zero") + + err, _ = checked_f(jnp.asarray([1., 2., 3.]), jnp.asarray([5., 2., -4.])) + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "value needs to be positive") + + err, _ = checked_f(jnp.asarray([1., 2., 3.]), jnp.asarray([6., 2., -4.])) + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "value needs to be less than 6") + if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
BUG: checkify of a vmap-ed while loop fails with an obscure error ### Description Consider the following code: ```python def test_checkify_with_batched_while(): def func_to_check_with_loop(n, v): def while_condition(loop_state): counter, _ = loop_state return counter > 0 def loop_body(loop_state): counter, value = loop_state checkify.check(value > 0, 'value must be positive') return counter - 1, value - 1 _, result = jax.lax.while_loop(while_condition, loop_body, (n, v)) return result batched_func = jax.vmap(func_to_check_with_loop) checked_f = checkify.checkify(batched_func) err, val = checked_f(jnp.asarray([1, 2, 3]), jnp.asarray([5, 2, 4])) err.throw() assert jnp.all(val == jnp.asarray([4, 0, 1])) ``` The code above fails when calling `checked_f` with the following error (truncated): ``` def _pred_bcast_select_mhlo( pred_aval: core.ShapedArray, pred: ir.Value, xs: Sequence[ir.Value], ys: Sequence[ir.Value], x_y_aval: core.AbstractValue) -> Sequence[ir.Value]: if x_y_aval is core.abstract_token: x, = xs y, = ys return [mhlo.AfterAllOp(mlir.aval_to_ir_type(x_y_aval), [x, y]).result] else: assert isinstance(x_y_aval, core.ShapedArray), x_y_aval x, = xs y, = ys assert x.type == y.type, (x.type, y.type) > assert (pred_aval.shape == x_y_aval.shape[:len(pred_aval.shape)]), ( pred_aval.shape, x_y_aval) E jax._src.traceback_util.UnfilteredStackTrace: AssertionError: ((3,), ShapedArray(bool[])) ``` If one modifies the code by either removing `checkify`, or making a non-batched version by removing `vmap`, everything works as expected. Can you please confirm that this is indeed a bug in JAX and, if so, provide a workaround? ### What jax/jaxlib version are you using? 0.3.16 ### Which accelerator(s) are you using? CPU ### Additional System Info Mac
Given that many library users will likely use checkify on the top-most function (i.e. `train_step`), it would be nice to have some transformation that would allow the user to locally exclude functions from the checkify transform even if the calling code has been transformed. It should help to work around problems like the one in this issue until the new functionality is perfectly stable and will probably be useful in general. For instance, in my case I would have liked to just label the function with the vmap-ed while loop as non-checkifiable, while continuing to check the rest of the code. One other example where this transformation can be useful can be illustrated by the following code: ```python def entropy(logits): chex.assert_rank(logits, 1) log_probs = jax.nn.log_softmax(logits) probs = jnp.exp(log_probs) return -jnp.sum(jnp.where(probs > 0, probs * log_probs, jnp.zeros_like(probs))) ``` `NaN` can arise in this code for zero probabilities, but it is a valid behavior for this particular function as it relies on `jnp.where` to filter them out. Therefore, I don't want `NaN` checks enabled for this particular function, but these checks will be very useful for the rest of the code, so I would like to be able to enable them globally. Hello, can someone take a look at this please? Sorry for the delayed response! I've been out of office for the past 2 weeks. Thanks for the clear and minimal repro, I'll send a fix for this soon with your repro as a test-case. Also, good point on adding some ability to locally exclude functions from being checkified. We've been thinking of a way to support "nullifying" any enabled checks from an outer checkify. I'll take this as a +1 to get that feature added!
2022-09-16T16:55:27
google/jax
12,444
google__jax-12444
[ "12385" ]
48b89560e58fcacb02e558e6a069c36bfb49ecd6
diff --git a/jax/_src/lax/control_flow/conditionals.py b/jax/_src/lax/control_flow/conditionals.py --- a/jax/_src/lax/control_flow/conditionals.py +++ b/jax/_src/lax/control_flow/conditionals.py @@ -139,6 +139,10 @@ def switch(index, branches, *operands): if disallowed_effects: raise NotImplementedError( f'Effects not supported in `switch`: {disallowed_effects}') + if joined_effects: + # Raise index in case of effects to allow data-dependence-based discharging + # of those effects (even if they don't have an explicit data dependence). + index = core.raise_as_much_as_possible(index) linear = (False,) * (len(consts) + len(ops)) out = cond_p.bind( @@ -235,6 +239,10 @@ def cond(pred, true_fun, false_fun, *operands): f'Effects not supported in `cond`: {disallowed_effects}') index = lax.convert_element_type(pred, np.int32) + if joined_effects: + # Raise index in case of effects to allow data-dependence-based discharging + # of those effects (even if they don't have an explicit data dependence). + index = core.raise_as_much_as_possible(index) linear = [False] * len(consts) + linear_ops out = cond_p.bind(
diff --git a/tests/checkify_test.py b/tests/checkify_test.py --- a/tests/checkify_test.py +++ b/tests/checkify_test.py @@ -766,6 +766,33 @@ def f(init_i, init_val): self.assertIsNotNone(err.get()) self.assertStartsWith(err.get(), "x must be negative") + def test_assert_discharging_cond(self): + def true_branch(x): + checkify.check(jnp.all(x != 0.), "x cannot be 0") + return 1/x + + def false_branch(x): + checkify.check(jnp.all(x >= 0), "x must be positive") + return x*2 + + @jax.jit + def f(pred, x): + return lax.cond(pred, true_branch, false_branch, x) + + checked_f = checkify.checkify(f) + + err, _ = checked_f(True, 0.) + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "x cannot be 0") + err, _ = checked_f(False, 0.) + self.assertIsNone(err.get()) + + err, _ = checked_f(False, -1.) + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "x must be positive") + err, _ = checked_f(True, -1.) + self.assertIsNone(err.get()) + def test_assert_batching_rule(self): @jax.vmap def f(x): @@ -863,12 +890,12 @@ def g(x): checkify.checkify(g)(0.) # does not crash def test_grad(self): - @checkify.checkify @jax.grad def f(x): checkify.check(jnp.all(x > 0), "should be positive!") return x + f = checkify.checkify(f) err, _ = f(1.) self.assertIsNone(err.get()) @@ -906,6 +933,30 @@ def while_body(s): self.assertIsNotNone(err.get()) self.assertStartsWith(err.get(), "value needs to be less than 6") + def test_assert_cond_no_data_dependence(self): + def f(): + return jax.lax.cond(True, + lambda: checkify.check(False, "hi!"), + lambda: checkify.check(False, "bye!")) + + f = checkify.checkify(f) + err, _ = f() + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "hi!") + + def test_assert_switch_no_data_dependence(self): + def branch(): + checkify.check(False, "hi!") + + def f(): + return lax.switch(0, [branch]*3) + + checked_f = checkify.checkify(f) + + err, _ = checked_f() + self.assertIsNotNone(err.get()) + self.assertStartsWith(err.get(), "hi!") + class LowerableChecksTest(jtu.JaxTestCase): def setUp(self): super().setUp() @@ -926,6 +977,5 @@ def f(x): with self.assertRaisesRegex(xla_extension.XlaRuntimeError, "x needs to be positive"): f(-1.) - if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
Problem mixing checkify.check and jax.lax.switch ### Description I'm trying to do a switch over functions that have a checkify check inside. It goes somewhat like this: ```python def make_branch(i): def branch(): result = jnp.full((1,), i) checkify.check(jnp.sum(result) > 0, 'Failed!') return result return branch index = jnp.full((), 0) result = jax.lax.switch(index, [make_branch(i) for i in range(3)]) ``` This, unfortunately, fails with `Cannot abstractly evaluate a checkify.check which was not functionalized`, presumably because `switch` compiles its branches. I've tried transforming branches with `checkify`: ```python def make_branch(i): @checkify.checkify def branch(): result = jnp.full((1,), i) checkify.check(jnp.sum(result) > 0, 'Failed!') return result return branch index = jnp.full((), 0) err, result = jax.lax.switch(index, [make_branch(i) for i in range(3)]) checkify.check_error(err) ``` This approach, unfortunately, fails with another error: ``` branch 0 and 1 outputs must have same type structure, got PyTreeDef((CustomNode(Error[((4, 'Failed! (check failed at <ipython-input-14-15495f6829f6>:6 (branch))'),)], [*, *, *]), *)) and PyTreeDef((CustomNode(Error[((5, 'Failed! (check failed at <ipython-input-14-15495f6829f6>:6 (branch))'),)], [*, *, *]), *)). ``` For some reason type signatures for branches are different where error is envolved. That seems like an unexpected behavior that prohibits using checks with conditional constructs. Is there a way around it? ### What jax/jaxlib version are you using? jax v0.3.17, jaxlib v0.3.15 ### Which accelerator(s) are you using? GPU ### Additional System Info Google Colab
hm, I think this means we need a `switch_error_check` which will do this functionalizing/merging for us. Thanks for reporting! (and for bearing with us while we make checkify feature complete!) aha, actually the issue is that the `lax.switch` doesn't have any inputs: if you add an operand, the check is succesfully eliminated (so you don't get the first functionalization error). ``` def make_branch(i): def branch(x): checkify.check(jnp.sum(x) > 0, 'Failed!') return x return branch def f(x): return lax.switch(0, [make_branch(i) for i in range(3)], x) checked_f = checkify.checkify(f) err, _ = checked_f(-1.) ``` This still needs to be fixed, but might unblock you for the moment?
2022-09-21T11:59:38
google/jax
12,532
google__jax-12532
[ "10813" ]
ae49d2e033f5aa637d67c5679102cc1a21164e6e
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py --- a/jax/_src/numpy/reductions.py +++ b/jax/_src/numpy/reductions.py @@ -86,17 +86,21 @@ def _reduction(a, name, np_fun, op, init_val, has_identity=True, if not _all(core.greater_equal_dim(shape[d], 1) for d in pos_dims): raise ValueError(f"zero-size array to reduction operation {name} which has no identity") - result_dtype = dtypes.canonicalize_dtype(dtype or dtypes.dtype(a)) + result_dtype = dtype or dtypes.dtype(a) - # promote_integers=True matches NumPy's behavior for sum() and prod(), which promotes - # all int-like inputs to the widest available dtype. if dtype is None and promote_integers: + # Note: NumPy always promotes to 64-bit; jax instead promotes to the + # default dtype as defined by dtypes.int_ or dtypes.uint. if dtypes.issubdtype(result_dtype, np.bool_): - result_dtype = dtypes.canonicalize_dtype(np.int64) + result_dtype = dtypes.int_ elif dtypes.issubdtype(result_dtype, np.unsignedinteger): - result_dtype = dtypes.canonicalize_dtype(np.uint64) + if np.iinfo(result_dtype).bits < np.iinfo(dtypes.uint).bits: + result_dtype = dtypes.uint elif dtypes.issubdtype(result_dtype, np.integer): - result_dtype = dtypes.canonicalize_dtype(np.int64) + if np.iinfo(result_dtype).bits < np.iinfo(dtypes.int_).bits: + result_dtype = dtypes.int_ + + result_dtype = dtypes.canonicalize_dtype(result_dtype) if upcast_f16_for_computation and dtypes.issubdtype(result_dtype, np.inexact): computation_dtype = _upcast_f16(result_dtype)
[x64] Reductions are too eager to promote to 64-bit ```python In [1]: import jax.numpy as jnp In [2]: jnp.arange(4, dtype='int8').sum() Out[2]: DeviceArray(6, dtype=int64) ``` The root cause is this line: https://github.com/google/jax/blob/d849f495193c69e8c133d0c6a424ba80d72938cb/jax/_src/numpy/reductions.py#L81 It's not an issue for floating-point types, because numpy does not promote those in most cases
Still need to flip the default for this to be fixed
2022-09-27T17:32:19
google/jax
12,538
google__jax-12538
[ "12536" ]
9e4114f0f13fec49e054240b344dad9e56f988dc
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,7 +61,7 @@ def generate_proto(source): author='JAX team', author_email='[email protected]', packages=find_packages(exclude=["examples"]), - package_data={'jax': ['py.typed']}, + package_data={'jax': ['py.typed', "*.pyi", "**/*.pyi"]}, python_requires='>=3.7', install_requires=[ 'absl-py',
Array typing/IDE errors since version 0.3.18 ### Description JAX 0.3.18 seems to have included some updates to the way array types are handled (via the introduction of `jax.Array`). This seems to have broken some things for those of us running type checkers. Basic operations like `some_array.shape` now result in errors in both `pyright` and `mypy`: ![image](https://user-images.githubusercontent.com/6992947/192605080-591e182a-ed32-4ee0-90d8-91b8aae0d155.png) Tab completion for common array operations like `.shape`, `.reshape()`, `.astype()`, etc also seems broken. Is this a bug? If so is there a fix planned? Thanks so much!! ### What jax/jaxlib version are you using? jax v0.3.18, v0.3.19 ### Which accelerator(s) are you using? _No response_ ### Additional system info _No response_ ### NVIDIA GPU info _No response_
Thanks for the report, and sorry for this issue! In 0.3.18 we introduced more restrictive array types as part of the work in #12049. Those array attributes are defined in the stub file here: https://github.com/google/jax/blob/main/jax/_src/basearray.pyi This is enough to satisfy mypy in our github CI checks, as well as google's internal fork of pytype, which is used for Google's internal static type checking. I wonder why it's not working for you? Perhaps the `.pyi` file is somehow not being properly registered?
2022-09-27T19:56:09
google/jax
12,546
google__jax-12546
[ "12542" ]
933b6a2fa413046f1cd1413fe144af71dd04b03c
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -537,6 +537,8 @@ def _device_array_use_fast_path(execute, out_pytree_def, args_flat, out_flat): not execute.args[5] and not execute.args[6] and # Has no host callbacks not execute.args[8] and + # impl rule must have been called, i.e. top trace is an EvalTrace + isinstance(core.find_top_trace(args_flat), core.EvalTrace) and # Not supported: ShardedDeviceArray all(device_array.type_is_device_array(x) for x in out_flat) and # Not supported: dynamic shapes
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -3872,6 +3872,53 @@ def test_jit_negative_static_argnums(self): g = jax.jit(lambda x, y: x * y, static_argnums=-1) g(1, 2) # doesn't crash + def test_fastpath_cache_confusion(self): + # https://github.com/google/jax/issues/12542 + @jax.jit + def a(x): + return () + + @jax.jit + def b(x): + return a(x) + + + @jax.jit + def g(x): + return x, x + + @jax.jit + def h(x): + return g(x) + + jaxpr = jax.make_jaxpr(h)(7) + jax.core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7) + + b(8) # don't crash + + def test_fastpath_cache_confusion2(self): + @jax.jit + def a(): # note nullary function, still staged out though + return () + + @jax.jit + def b(x): + return a() + + + @jax.jit + def g(x): + return x, x + + @jax.jit + def h(x): + return g(x) + + jaxpr = jax.make_jaxpr(h)(7) + jax.core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7) + + b(8) # don't crash + @jtu.with_config(jax_experimental_subjaxpr_lowering_cache=True) class SubcallTraceCacheTest(jtu.JaxTestCase):
C++ jit dispatch failure due to stale cache entries The following program ``` import jax @jax.jit def a(x): return () @jax.jit def b(x): return a(x) @jax.jit def g(x): return x, x @jax.jit def h(x): return g(x) jaxpr = jax.make_jaxpr(h)(7) jax.core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, 7) b(8) ``` fails with: ``` Traceback (most recent call last): File "/Users/phawkins/p/jax/t.py", line 27, in <module> b(8) File "/Users/phawkins/p/jax/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback return fun(*args, **kwargs) File "/Users/phawkins/p/jax/jax/_src/api.py", line 603, in cache_miss out_flat = xla.xla_call( File "/Users/phawkins/p/jax/jax/core.py", line 1953, in bind return call_bind(self, fun, *args, **params) File "/Users/phawkins/p/jax/jax/core.py", line 1969, in call_bind outs = top_trace.process_call(primitive, fun_, tracers, params) File "/Users/phawkins/p/jax/jax/core.py", line 695, in process_call return primitive.impl(f, *tracers, **params) File "/Users/phawkins/p/jax/jax/_src/dispatch.py", line 233, in _xla_call_impl compiled_fun = xla_callable(fun, device, backend, name, donated_invars, File "/Users/phawkins/p/jax/jax/linear_util.py", line 295, in memoized_fun ans = call(fun, *args) File "/Users/phawkins/p/jax/jax/_src/dispatch.py", line 367, in _xla_callable_uncached return lower_xla_callable(fun, device, backend, name, donated_invars, False, File "/Users/phawkins/p/jax/jax/_src/profiler.py", line 313, in wrapper return func(*args, **kwargs) File "/Users/phawkins/p/jax/jax/_src/dispatch.py", line 453, in lower_xla_callable jaxpr, out_type, consts = pe.trace_to_jaxpr_final2( File "/Users/phawkins/p/jax/jax/_src/profiler.py", line 313, in wrapper return func(*args, **kwargs) File "/Users/phawkins/p/jax/jax/interpreters/partial_eval.py", line 2080, in trace_to_jaxpr_final2 jaxpr, out_type, consts = trace_to_subjaxpr_dynamic2(fun, main, debug_info) File "/Users/phawkins/p/jax/jax/interpreters/partial_eval.py", line 2030, in trace_to_subjaxpr_dynamic2 ans = fun.call_wrapped(*in_tracers_) File "/Users/phawkins/p/jax/jax/linear_util.py", line 168, in call_wrapped ans = self.f(*args, **dict(self.params, **kwargs)) File "/Users/phawkins/p/jax/t.py", line 12, in b return a(x) File "/Users/phawkins/p/jax/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback return fun(*args, **kwargs) File "/Users/phawkins/p/jax/jax/_src/api.py", line 624, in cache_miss fastpath_data = _device_array_use_fast_path(execute, out_pytree_def, args_flat, out_flat) File "/Users/phawkins/p/jax/jax/_src/api.py", line 557, in _device_array_use_fast_path assert len(avals) == len(out_flat) jax._src.traceback_util.UnfilteredStackTrace: AssertionError The stack trace below excludes JAX-internal frames. The preceding is the original exception that occurred, unmodified. -------------------- The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/phawkins/p/jax/t.py", line 27, in <module> b(8) File "/Users/phawkins/p/jax/t.py", line 12, in b return a(x) AssertionError ``` `eval_jaxpr` leaves `most_recent_entry` populated on the `xla_callable` cache for `g`, since it uses `xla_call`'s `impl` rule but never steals the cache entry. Later, the `jit` cache miss logic gets confused because it finds the stale entry for `g` when it calls `a`. `a` doesn't get a `most_recent_entry` because it is being traced into a jaxpr and its `impl` rule never runs.
2022-09-28T03:57:30
google/jax
12,594
google__jax-12594
[ "12582" ]
aafc77d3c098acfe8e1a654b50a05f69f577af92
diff --git a/jax/_src/config.py b/jax/_src/config.py --- a/jax/_src/config.py +++ b/jax/_src/config.py @@ -699,6 +699,16 @@ def _update_jax_array_thread_local(val): help=('Enables an internal upgrade that implements `jax.custom_vjp` by ' 'reduction to `jax.custom_jvp` and `jax.custom_transpose`.')) +raise_persistent_cache_errors = config.define_bool_state( + name='jax_raise_persistent_cache_errors', + default=False, + help=('If true, exceptions raised when reading or writing to the ' + 'persistent compilation cache will be allowed through, halting ' + 'program execution if not manually caught. If false, exceptions are ' + 'caught and raised as warnings, allowing program execution to ' + 'continue. Defaults to false so cache bugs or intermittent issues ' + 'are non-fatal.')) + hlo_source_file_canonicalization_regex = config.define_string_state( name='jax_hlo_source_file_canonicalization_regex', default=None, diff --git a/jax/_src/dispatch.py b/jax/_src/dispatch.py --- a/jax/_src/dispatch.py +++ b/jax/_src/dispatch.py @@ -78,6 +78,7 @@ Buffer = xe.Buffer XlaExecutable = xc.Executable +CompileOptions = xc.CompileOptions map, unsafe_map = util.safe_map, map zip, unsafe_zip = util.safe_zip, zip @@ -1016,6 +1017,10 @@ def compile_or_get_cached(backend, computation: ir.Module, compile_options, sym_name = computation.operation.attributes['sym_name'] module_name = ir.StringAttr(sym_name).value + + if FLAGS.jax_dump_ir_to: + _dump_ir_to_file(module_name, mlir.module_to_string(computation)) + # Convert ir.Module to a string representation, unless the # back-end expliclity flags the ability to handle a module directly # (avoiding the overhead of back and forth conversions) @@ -1036,23 +1041,57 @@ def compile_or_get_cached(backend, computation: ir.Module, compile_options, if "--xla_gpu_enable_xla_runtime_executable=true" in os.environ.get("XLA_FLAGS", ""): supported_platforms.append("gpu") if cc.is_initialized() and backend.platform in supported_platforms: - cached_executable = cc.get_executable(serialized_computation, - compile_options, backend) + cached_executable = _cache_read(serialized_computation, module_name, + compile_options, backend) if cached_executable is not None: - logging.info('Persistent compilation cache hit for %s.', module_name) + logging.info("Persistent compilation cache hit for '%s'", module_name) return cached_executable else: compiled = backend_compile(backend, serialized_computation, compile_options, host_callbacks) - cc.put_executable(module_name, serialized_computation, compile_options, - compiled, backend) + _cache_write(serialized_computation, module_name, compile_options, + backend, compiled) return compiled - if FLAGS.jax_dump_ir_to: - _dump_ir_to_file(module_name, mlir.module_to_string(computation)) return backend_compile(backend, serialized_computation, compile_options, host_callbacks) +def _cache_read(computation: Union[str, bytes, ir.Module], + module_name: str, + compile_options: CompileOptions, + backend: Backend) -> Optional[XlaExecutable]: + """Looks up `computation` in the persisent compilation cache.""" + # Avoid import cycle between jax and jax.experimental + from jax.experimental.compilation_cache import compilation_cache as cc + + try: + return cc.get_executable(computation, compile_options, backend) + except Exception as ex: + if config.jax_raise_persistent_cache_errors: + raise + warnings.warn( + f"Error reading persistent compilation cache entry for " + f"'{module_name}': {type(ex).__name__}: {ex}") + return None + +def _cache_write(computation: Union[str, bytes, ir.Module], + module_name: str, + compile_options: CompileOptions, + backend: Backend, + compiled: XlaExecutable): + """Writes `computation` to the persistent compilation cache.""" + # Avoid import cycle between jax and jax.experimental + from jax.experimental.compilation_cache import compilation_cache as cc + + try: + cc.put_executable(module_name, computation, compile_options, compiled, + backend) + except Exception as ex: + if config.jax_raise_persistent_cache_errors: + raise + warnings.warn( + f"Error writing persistent compilation cache entry for " + f"'{module_name}': {type(ex).__name__}: {ex}") def get_buffer_counts(out_avals, ordered_effects, has_unordered_effects): buffer_counts = [aval_to_num_buffers(aval) for aval in out_avals]
diff --git a/tests/compilation_cache_test.py b/tests/compilation_cache_test.py --- a/tests/compilation_cache_test.py +++ b/tests/compilation_cache_test.py @@ -19,7 +19,8 @@ import sys import tempfile import unittest -from unittest import SkipTest +from unittest import mock, SkipTest +import warnings from absl.testing import absltest from jax.experimental import PartitionSpec as P @@ -35,9 +36,12 @@ import numpy as np from jax.config import config +from jax._src.config import raise_persistent_cache_errors + config.parse_flags_with_absl() FLAGS = config.FLAGS [email protected]_config(jax_raise_persistent_cache_errors=True) class CompilationCacheTest(jtu.JaxTestCase): def setUp(self): @@ -295,6 +299,38 @@ def f(x): files_in_directory = len(os.listdir(tmpdir)) self.assertEqual(files_in_directory, 2) + def test_cache_write_warning(self): + with tempfile.TemporaryDirectory() as tmpdir: + cc.initialize_cache(tmpdir) + f = jit(lambda x: x*x) + + with raise_persistent_cache_errors(False), \ + mock.patch.object(cc._cache.__class__, 'put') as mock_put, \ + warnings.catch_warnings(record=True) as w: + mock_put.side_effect = RuntimeError("test error") + self.assertEqual(f(2), 4) + self.assertLen(w, 1) + self.assertIn( + "Error writing persistent compilation cache entry " + "for 'jit__lambda_': RuntimeError: test error", + str(w[0].message)) + + def test_cache_read_warning(self): + with tempfile.TemporaryDirectory() as tmpdir: + cc.initialize_cache(tmpdir) + f = jit(lambda x: x*x) + + with raise_persistent_cache_errors(False), \ + mock.patch.object(cc._cache.__class__, 'get') as mock_get, \ + warnings.catch_warnings(record=True) as w: + mock_get.side_effect = RuntimeError("test error") + self.assertEqual(f(2), 4) + self.assertLen(w, 1) + self.assertIn( + "Error reading persistent compilation cache entry " + "for 'jit__lambda_': RuntimeError: test error", + str(w[0].message)) + def create_new_debug_options(self, debug_options_obj): debug_options_obj.xla_cpu_enable_fast_math = False debug_options_obj.xla_cpu_fast_math_honor_infs = False
Make persistent compilation cache warn instead of raise an error on cache read/write failures This would allow programs to continue with caching when there's a problem, instead of crashing. This could be useful for long training jobs that use a GCS cache, which we've seen a lot of issues with lately.
2022-09-30T01:27:00
google/jax
12,607
google__jax-12607
[ "12596" ]
fb8558cfdd10b40ceafa1cde7b29777e03b8b29e
diff --git a/jax/_src/callback.py b/jax/_src/callback.py --- a/jax/_src/callback.py +++ b/jax/_src/callback.py @@ -86,13 +86,13 @@ def pure_callback_batching_rule(args, dims, *, callback, vectorized: bool, else: is_batched = [d is not batching.not_mapped for d in dims] unbatched_args, batched_args = util.partition_list(is_batched, new_args) - def _batch_fun(*batched_args): + def _batch_fun(batched_args): merged_args = util.merge_lists(is_batched, unbatched_args, batched_args) return pure_callback_p.bind( *merged_args, callback=callback, result_avals=result_avals, vectorized=vectorized) from jax._src.lax.control_flow import map as lax_map - outvals = lax_map(_batch_fun, *batched_args) + outvals = lax_map(_batch_fun, batched_args) return tuple(outvals), (0,) * len(outvals)
diff --git a/tests/python_callback_test.py b/tests/python_callback_test.py --- a/tests/python_callback_test.py +++ b/tests/python_callback_test.py @@ -592,6 +592,13 @@ def h(x, y): out = h(jnp.arange(4.), 4.) np.testing.assert_allclose(out, np.sin(np.arange(4.)) + 4.) + @jax.jit + @functools.partial(jax.vmap) + def h(x, y): + return jax.pure_callback(lambda x, y: np.sin(x) + y, x, x, y) + out = h(jnp.arange(4.), jnp.arange(10., 14.)) + np.testing.assert_allclose(out, np.sin(np.arange(4.)) + jnp.arange(10., 14.)) + def test_vmap_vectorized_callback(self): def cb(x):
Vmap of pure_callback with multiple arguments error ### Description Here is the minimal example of the error: ``` import jax.numpy as jnp from jax import pure_callback, vmap dim = 5 num_samples = 3 test_a = jnp.ones(shape=(num_samples, dim)) def _simple_pass(a): return a def simple_pass(a): return pure_callback(_simple_pass, a, a, vectorized=False) out = vmap(simple_pass)(test_a) def _simple_pass_pair(a, b): return a, b def simple_pass_pair(a, b): return pure_callback(_simple_pass_pair, (a, b), a, b, vectorized=False) out_pair = vmap(simple_pass_pair)(test_a, test_a) ``` While the vmapping in the case when we have one input argument works, in the case when we have **two or more** input arguments we get the following error: ``` Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/code.py", line 90, in runcode exec(code, self.locals) File "<input>", line 1, in <module> File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 198, in runfile pydev_imports.execfile(filename, global_vars, local_vars) # execute the script File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile exec(compile(contents+"\n", file, 'exec'), glob, loc) File "/Users/lenarttreven/PycharmProjects/learning_dynamical_systems/experiments/random/pure_callback.py", line 28, in <module> out_pair = vmap(simple_pass_pair)(test_a, test_a) File "/Users/lenarttreven/PycharmProjects/learning_dynamical_systems/experiments/random/pure_callback.py", line 25, in simple_pass_pair return pure_callback(_simple_pass, (a, b), a, b, vectorized=False) File "/Users/lenarttreven/python_venv/artemis/lib/python3.10/site-packages/jax/_src/callback.py", line 136, in pure_callback out_flat = pure_callback_p.bind( File "/Users/lenarttreven/python_venv/artemis/lib/python3.10/site-packages/jax/_src/callback.py", line 95, in pure_callback_batching_rule outvals = lax_map(_batch_fun, *batched_args) TypeError: map() takes 2 positional arguments but 3 were given ``` ### What jax/jaxlib version are you using? jax v0.3.20, jaxlib v0.3.20 ### Which accelerator(s) are you using? CPU ### Additional system info Mac M1 ### NVIDIA GPU info _No response_
2022-09-30T22:36:11
google/jax
12,678
google__jax-12678
[ "12388" ]
d174b3dce3837873e8e4a410627c89eb53165ba5
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py --- a/jax/_src/numpy/reductions.py +++ b/jax/_src/numpy/reductions.py @@ -20,7 +20,6 @@ import numpy as np -import jax from jax import core from jax import lax from jax._src import api @@ -28,7 +27,7 @@ from jax._src.numpy.ndarray import ndarray from jax._src.numpy.util import _broadcast_to, _check_arraylike, _complex_elem_type, _promote_dtypes_inexact, _promote_dtypes_numeric, _where, _wraps from jax._src.lax import lax as lax_internal -from jax._src.util import canonicalize_axis as _canonicalize_axis, maybe_named_axis +from jax._src.util import canonicalize_axis as _canonicalize_axis, maybe_named_axis, prod as _prod _all = builtins.all @@ -328,8 +327,10 @@ def _average(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, weights=None avg = mean(a, axis=axis, keepdims=keepdims) if axis is None: weights_sum = lax.full((), core.dimension_as_value(a.size), dtype=avg.dtype) + elif isinstance(axis, tuple): + weights_sum = lax.full_like(avg, _prod(core.dimension_as_value(a.shape[d]) for d in axis)) else: - weights_sum = lax.full_like(avg, core.dimension_as_value(a.shape[axis]), dtype=avg.dtype) + weights_sum = lax.full_like(avg, core.dimension_as_value(a.shape[axis])) else: _check_arraylike("average", a, weights) a, weights = _promote_dtypes_inexact(a, weights) @@ -337,17 +338,25 @@ def _average(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, weights=None a_shape = np.shape(a) a_ndim = len(a_shape) weights_shape = np.shape(weights) - axis = None if axis is None else _canonicalize_axis(axis, a_ndim) + + if axis is None: + pass + elif isinstance(axis, tuple): + axis = tuple(_canonicalize_axis(d, a_ndim) for d in axis) + else: + axis = _canonicalize_axis(axis, a_ndim) if a_shape != weights_shape: # Make sure the dimensions work out - if axis is None: - raise ValueError("Axis must be specified when shapes of a and " - "weights differ.") if len(weights_shape) != 1: raise ValueError("1D weights expected when shapes of a and " "weights differ.") - if not core.symbolic_equal_dim(weights_shape[0], a_shape[axis]): + if axis is None: + raise ValueError("Axis must be specified when shapes of a and " + "weights differ.") + elif isinstance(axis, tuple): + raise ValueError("Single axis expected when shapes of a and weights differ") + elif not core.symbolic_equal_dim(weights_shape[0], a_shape[axis]): raise ValueError("Length of weights not " "compatible with specified axis.")
diff --git a/jax/_src/test_util.py b/jax/_src/test_util.py --- a/jax/_src/test_util.py +++ b/jax/_src/test_util.py @@ -37,8 +37,9 @@ from jax._src import dtypes as _dtypes from jax import lax from jax._src.config import flags, bool_env, config +from jax._src.numpy.lax_numpy import _promote_dtypes, _promote_dtypes_inexact from jax._src.util import prod, unzip2 -from jax.tree_util import tree_map, tree_all +from jax.tree_util import tree_map, tree_all, tree_flatten, tree_unflatten from jax._src.lib import xla_bridge from jax._src import dispatch from jax._src.public_test_util import ( # noqa: F401 @@ -758,6 +759,21 @@ def decorator(cls): return decorator +def promote_like_jnp(fun, inexact=False): + """Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`. + + jnp and np have different type promotion semantics; this decorator allows + tests make an np reference implementation act more like an jnp + implementation. + """ + _promote = _promote_dtypes_inexact if inexact else _promote_dtypes + def wrapper(*args, **kw): + flat_args, tree = tree_flatten(args) + args = tree_unflatten(tree, _promote(*flat_args)) + return fun(*args, **kw) + return wrapper + + class JaxTestCase(parameterized.TestCase): """Base class for JAX tests including numerical checks and boilerplate.""" _default_config = { diff --git a/tests/lax_numpy_operators_test.py b/tests/lax_numpy_operators_test.py --- a/tests/lax_numpy_operators_test.py +++ b/tests/lax_numpy_operators_test.py @@ -29,11 +29,9 @@ import jax.ops from jax import lax from jax import numpy as jnp -from jax import tree_util from jax._src import dtypes from jax._src import test_util as jtu -from jax._src.numpy.lax_numpy import _promote_dtypes, _promote_dtypes_inexact from jax.config import config config.parse_flags_with_absl() @@ -66,13 +64,6 @@ # uint64 is problematic because with any uint type it promotes to float: int_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64] -def _indexer_with_default_outputs(indexer, use_defaults=True): - """Like jtu.with_jax_dtype_defaults, but for __getitem__ APIs""" - class Indexer: - @partial(jtu.with_jax_dtype_defaults, use_defaults=use_defaults) - def __getitem__(self, *args): - return indexer.__getitem__(*args) - return Indexer() def _valid_dtypes_for_shape(shape, dtypes): # Not all (shape, dtype) pairs are valid. In particular, Python scalars only @@ -400,21 +391,6 @@ def _shapes_are_equal_length(shapes): return all(len(shape) == len(shapes[0]) for shape in shapes[1:]) -def _promote_like_jnp(fun, inexact=False): - """Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`. - - jnp and np have different type promotion semantics; this decorator allows - tests make an np reference implementation act more like an jnp - implementation. - """ - _promote = _promote_dtypes_inexact if inexact else _promote_dtypes - def wrapper(*args, **kw): - flat_args, tree = tree_util.tree_flatten(args) - args = tree_util.tree_unflatten(tree, _promote(*flat_args)) - return fun(*args, **kw) - return wrapper - - class JaxNumpyOperatorTests(jtu.JaxTestCase): """Tests for LAX-backed Numpy operators.""" @@ -460,7 +436,7 @@ def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes, [tolerance, tol, jtu.default_tolerance()]) with jtu.strict_promotion_if_dtypes_match(dtypes): - self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op, + self._CheckAgainstNumpy(jtu.promote_like_jnp(np_op, inexact), jnp_op, args_maker, check_dtypes=check_dtypes, tol=tol) self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes, atol=tol, rtol=tol) @@ -595,7 +571,7 @@ def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes): rng = rng_factory(self.rng()) args_maker = self._GetArgsMaker(rng, shapes, dtypes) with jtu.strict_promotion_if_dtypes_match(dtypes): - self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp_op, args_maker) + self._CheckAgainstNumpy(jtu.promote_like_jnp(np_op), jnp_op, args_maker) self._CompileAndCheck(jnp_op, args_maker) @parameterized.named_parameters(jtu.cases_from_list( diff --git a/tests/lax_numpy_reducers_test.py b/tests/lax_numpy_reducers_test.py --- a/tests/lax_numpy_reducers_test.py +++ b/tests/lax_numpy_reducers_test.py @@ -57,6 +57,18 @@ python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_] +def _valid_dtypes_for_shape(shape, dtypes): + # Not all (shape, dtype) pairs are valid. In particular, Python scalars only + # have one type in each category (float, bool, etc.) + if shape is jtu.PYTHON_SCALAR_SHAPE: + return [t for t in dtypes if t in python_scalar_dtypes] + return dtypes + +def _shape_and_dtypes(shapes, dtypes): + for shape in shapes: + for dtype in _valid_dtypes_for_shape(shape, dtypes): + yield (shape, dtype) + def _compatible_shapes(shape): if np.ndim(shape) == 0 or shape in scalar_shapes: return [shape] @@ -457,6 +469,47 @@ def testReductionWithRepeatedAxisError(self): with self.assertRaisesRegex(ValueError, r"duplicate value in 'axis': \(0, 0\)"): jnp.sum(jnp.arange(3), (0, 0)) + @jtu.sample_product( + [dict(shape=shape, dtype=dtype, axis=axis, weights_shape=weights_shape) + for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes) + for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] + # `weights_shape` is either `None`, same as the averaged axis, or same as + # that of the input + for weights_shape in ([None, shape] if axis is None or len(shape) == 1 or isinstance(axis, tuple) + else [None, (shape[axis],), shape]) + ], + keepdims=([False, True] if numpy_version >= (1, 23) else [None]), + returned=[False, True], + ) + def testAverage(self, shape, dtype, axis, weights_shape, returned, keepdims): + rng = jtu.rand_default(self.rng()) + kwds = dict(returned=returned) + if keepdims is not None: + kwds['keepdims'] = keepdims + if weights_shape is None: + np_fun = lambda x: np.average(x, axis, **kwds) + jnp_fun = lambda x: jnp.average(x, axis, **kwds) + args_maker = lambda: [rng(shape, dtype)] + else: + np_fun = lambda x, weights: np.average(x, axis, weights, **kwds) + jnp_fun = lambda x, weights: jnp.average(x, axis, weights, **kwds) + args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)] + np_fun = jtu.promote_like_jnp(np_fun, inexact=True) + tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5, + np.float64: 1e-12, np.complex64: 1e-5} + check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE and numpy_version >= (1, 22) + if numpy_version == (1, 23, 0) and keepdims and weights_shape is not None and axis is not None: + # Known failure: https://github.com/numpy/numpy/issues/21850 + pass + else: + try: + self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, + check_dtypes=check_dtypes, tol=tol) + except ZeroDivisionError: + self.skipTest("don't support checking for ZeroDivisionError") + self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes, + rtol=tol, atol=tol) + @parameterized.named_parameters( jtu.cases_from_list( {"testcase_name": diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -44,7 +44,6 @@ from jax._src import dtypes from jax._src import test_util as jtu from jax._src.lax import lax as lax_internal -from jax._src.numpy.lax_numpy import _promote_dtypes, _promote_dtypes_inexact from jax._src.numpy.util import _parse_numpydoc, ParsedDoc, _wraps from jax._src.util import prod, safe_zip from jax._src import array @@ -139,21 +138,6 @@ def _shapes_are_equal_length(shapes): return all(len(shape) == len(shapes[0]) for shape in shapes[1:]) -def _promote_like_jnp(fun, inexact=False): - """Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`. - - jnp and np have different type promotion semantics; this decorator allows - tests make an np reference implementation act more like an jnp - implementation. - """ - _promote = _promote_dtypes_inexact if inexact else _promote_dtypes - def wrapper(*args, **kw): - flat_args, tree = tree_util.tree_flatten(args) - args = tree_util.tree_unflatten(tree, _promote(*flat_args)) - return fun(*args, **kw) - return wrapper - - class LaxBackedNumpyTests(jtu.JaxTestCase): """Tests for LAX-backed Numpy implementation.""" @@ -1447,7 +1431,7 @@ def testConcatenate(self, axis, dtype, base_shape, arg_dtypes): wrapped_axis = axis % len(base_shape) shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:] for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)] - @_promote_like_jnp + @jtu.promote_like_jnp def np_fun(*args, dtype=dtype): dtype = dtype or args[0].dtype args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32) @@ -1692,7 +1676,7 @@ def testApplyOverAxes(self, shape, dtype, func, keepdims, axes): def testRepeat(self, axis, shape, dtype, repeats, fixed_size): rng = jtu.rand_default(self.rng()) np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis) - np_fun = _promote_like_jnp(np_fun) + np_fun = jtu.promote_like_jnp(np_fun) if fixed_size: total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0] jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis, @@ -2388,7 +2372,7 @@ def testColumnStack(self, shape, dtypes, array_input): args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])] else: args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] - np_fun = _promote_like_jnp(np.column_stack) + np_fun = jtu.promote_like_jnp(np.column_stack) jnp_fun = jnp.column_stack with jtu.strict_promotion_if_dtypes_match(dtypes): self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker) @@ -2417,9 +2401,9 @@ def testStack(self, shape, axis, dtypes, array_input, out_dtype): args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] if numpy_version < (1, 24): - np_fun = _promote_like_jnp(lambda *args: np.stack(*args, axis=axis).astype(out_dtype)) + np_fun = jtu.promote_like_jnp(lambda *args: np.stack(*args, axis=axis).astype(out_dtype)) else: - np_fun = _promote_like_jnp(partial(np.stack, axis=axis, dtype=out_dtype, casting='unsafe')) + np_fun = jtu.promote_like_jnp(partial(np.stack, axis=axis, dtype=out_dtype, casting='unsafe')) jnp_fun = partial(jnp.stack, axis=axis, dtype=out_dtype) with jtu.strict_promotion_if_dtypes_match(dtypes): @@ -2447,9 +2431,9 @@ def testHVDStack(self, shape, op, dtypes, array_input, out_dtype): args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] if numpy_version < (1, 24) or op == "dstack": - np_fun = _promote_like_jnp(lambda *args: getattr(np, op)(*args).astype(out_dtype)) + np_fun = jtu.promote_like_jnp(lambda *args: getattr(np, op)(*args).astype(out_dtype)) else: - np_fun = partial(_promote_like_jnp(getattr(np, op)), dtype=out_dtype) + np_fun = partial(jtu.promote_like_jnp(getattr(np, op)), dtype=out_dtype) jnp_fun = partial(getattr(jnp, op), dtype=out_dtype) with jtu.strict_promotion_if_dtypes_match(dtypes): @@ -2904,47 +2888,6 @@ def testSqueeze(self, arg_shape, dtype, ax): self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker) self._CompileAndCheck(jnp_fun, args_maker) - @jtu.sample_product( - [dict(shape=shape, dtype=dtype, axis=axis, weights_shape=weights_shape) - for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes) - for axis in list(range(-len(shape), len(shape))) + [None] - # `weights_shape` is either `None`, same as the averaged axis, or same as - # that of the input - for weights_shape in ([None, shape] if axis is None or len(shape) == 1 - else [None, (shape[axis],), shape]) - ], - keepdims=([False, True] if numpy_version >= (1, 23) else [None]), - returned=[False, True], - ) - def testAverage(self, shape, dtype, axis, weights_shape, returned, keepdims): - rng = jtu.rand_default(self.rng()) - kwds = dict(returned=returned) - if keepdims is not None: - kwds['keepdims'] = keepdims - if weights_shape is None: - np_fun = lambda x: np.average(x, axis, **kwds) - jnp_fun = lambda x: jnp.average(x, axis, **kwds) - args_maker = lambda: [rng(shape, dtype)] - else: - np_fun = lambda x, weights: np.average(x, axis, weights, **kwds) - jnp_fun = lambda x, weights: jnp.average(x, axis, weights, **kwds) - args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)] - np_fun = _promote_like_jnp(np_fun, inexact=True) - tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5, - np.float64: 1e-12, np.complex64: 1e-5} - check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE and numpy_version >= (1, 22) - if numpy_version == (1, 23, 0) and keepdims and weights_shape is not None and axis is not None: - # Known failure: https://github.com/numpy/numpy/issues/21850 - pass - else: - try: - self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, - check_dtypes=check_dtypes, tol=tol) - except ZeroDivisionError: - self.skipTest("don't support checking for ZeroDivisionError") - self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes, - rtol=tol, atol=tol) - @jtu.sample_product( [dict(arg=arg, dtype=dtype, ndmin=ndmin) for arg, dtypes in [ @@ -4014,7 +3957,7 @@ def testWhereThreeArgument(self, shapes, dtypes): rng = jtu.rand_default(self.rng()) args_maker = self._GetArgsMaker(rng, shapes, dtypes) def np_fun(cond, x, y): - return _promote_like_jnp(partial(np.where, cond))(x, y) + return jtu.promote_like_jnp(partial(np.where, cond))(x, y) with jtu.strict_promotion_if_dtypes_match(dtypes): self._CheckAgainstNumpy(np_fun, jnp.where, args_maker) self._CompileAndCheck(jnp.where, args_maker) @@ -4978,7 +4921,7 @@ def np_op(x1, x2): tol = {np.complex64: 1e-5, np.complex128: 1e-14} with jtu.strict_promotion_if_dtypes_match(dtypes): - self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol) + self._CheckAgainstNumpy(jtu.promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol) self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol) @jtu.sample_product( @@ -5004,7 +4947,7 @@ def np_op(x1, x2): tol = {np.complex64: 1e-5, np.complex128: 1e-14} with jtu.strict_promotion_if_dtypes_match(dtypes): - self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol) + self._CheckAgainstNumpy(jtu.promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol) self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol) def testDefaultDtypes(self):
jax.numpy.average can't take tuple axis ### Description Tuple axis is documented: https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.average.html#jax.numpy.average And supported in numpy: ```python import numpy as np np.average(np.arange(0, 4), axis=(0,), weights=np.arange(1, 5)) # 2.0 ``` But will result an error ```python from jax import numpy as jnp jnp.average(jnp.arange(0, 4), axis=(0,), weights=jnp.arange(1, 5)) ``` ``` TypeError: 'tuple' object cannot be interpreted as an integer ``` Possibly related: #6451 ### What jax/jaxlib version are you using? jax v0.3.17, jaxlib v0.3.15 ### Which accelerator(s) are you using? CPU ### Additional System Info Linux
@jakevdp are you up for taking this one?
2022-10-05T23:08:20
google/jax
12,691
google__jax-12691
[ "12688" ]
7a825362faebde6ca7ffbf3a50d59042d2a5ae0b
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -898,7 +898,8 @@ def get_atom(t: JaxprTracer) -> Atom: def newvar(t: JaxprTracer) -> Var: var = gensym(type_substitute(t.aval)) - assert t_to_var.setdefault(id(t), var) is var + var_ = t_to_var.setdefault(id(t), var) + assert var is var_ return var def type_substitute(aval: AbstractValue) -> AbstractValue:
PYTHONOPTIMIZE=1 causes KeyErrors ### Description When I set the flag: `export PYTHONOPTIMIZE=1`, the code: ``` import jax.numpy as jnp m = jnp.eye(5) a = jnp.ones(5) jnp.linalg.solve(m,a) ``` returns the following error: ``` The above exception was the direct cause of the following exception: Traceback (most recent call last): File "error.py", line 6, in <module> jnp.linalg.solve(m,a) File "/home/ben/.../python3.8/site-packages/jax/_src/numpy/linalg.py", line 553, in solve return lax_linalg._solve(a, b) KeyError: 139645559661888 ``` ### What jax/jaxlib version are you using? jax v0.3.21, jaxlib v0.3.20 ### Which accelerator(s) are you using? CPU ### Additional system info Ubuntu, jax is installed into a site_packages repo for ROS. ### NVIDIA GPU info _No response_
Thanks for the report! I see the issue; there's an assert statement we're counting on running.
2022-10-07T06:16:17
google/jax
12,738
google__jax-12738
[ "12643" ]
41417eed6f3770e05aeb1d2500967c5314d5de08
diff --git a/jax/_src/tree_util.py b/jax/_src/tree_util.py --- a/jax/_src/tree_util.py +++ b/jax/_src/tree_util.py @@ -475,15 +475,27 @@ def _prefix_error(key_path: KeyPath, prefix_tree: Any, full_tree: Any, # point, and because prefix_tree is not a leaf, each can be flattened once): prefix_tree_children, prefix_tree_meta = flatten_one_level(prefix_tree) full_tree_children, full_tree_meta = flatten_one_level(full_tree) + prefix_tree_keys = _child_keys(prefix_tree) + full_tree_keys = _child_keys(full_tree) + try: + diff = set(prefix_tree_keys).symmetric_difference(set(full_tree_keys)) + except: + diff = None if len(prefix_tree_children) != len(full_tree_children): yield lambda name: ValueError( "pytree structure error: different numbers of pytree children at key path\n" f" {{name}}{key_path.pprint()}\n" f"At that key path, the prefix pytree {{name}} has a subtree of type\n" f" {type(prefix_tree)}\n" - f"with {len(prefix_tree_children)} children, " + f"with {len(prefix_tree_children)} child keys\n" + f" {' '.join(str(k.key) for k in prefix_tree_keys)}\n" f"but at the same key path the full pytree has a subtree of the same " - f"type but with {len(full_tree_children)} children.".format(name=name)) + f"type but with {len(full_tree_children)} child keys\n" + f" {' '.join(str(k.key) for k in full_tree_keys)}\n" + .format(name=name) + + ("" if diff is None else + f"so the symmetric difference on key sets is\n" + f" {' '.join(str(k.key) for k in diff)}")) return # don't look for more errors in this subtree # Or they may disagree if their roots have different pytree metadata: @@ -510,11 +522,10 @@ def _prefix_error(key_path: KeyPath, prefix_tree: Any, full_tree: Any, # If the root types and numbers of children agree, there must be an error # in a subtree, so recurse: - keys = _child_keys(prefix_tree) - keys_ = _child_keys(full_tree) - assert keys == keys_, \ - f"equal pytree nodes gave differing keys: {keys} and {keys_}" - for k, t1, t2 in zip(keys, prefix_tree_children, full_tree_children): + assert prefix_tree_keys == full_tree_keys, \ + ("equal pytree nodes gave differing prefix_tree_keys: " + f"{prefix_tree_keys} and {full_tree_keys}") + for k, t1, t2 in zip(prefix_tree_keys, prefix_tree_children, full_tree_children): yield from _prefix_error(key_path + k, t1, t2)
diff --git a/tests/pjit_test.py b/tests/pjit_test.py --- a/tests/pjit_test.py +++ b/tests/pjit_test.py @@ -2603,9 +2603,7 @@ def testAxisResourcesMismatch(self): " pjit out_axis_resources tree root\n" "At that key path, the prefix pytree pjit out_axis_resources has a " "subtree of type\n" - " <class 'list'>\n" - "with 2 children, but at the same key path the full pytree has a " - "subtree of the same type but with 3 children.") + " <class 'list'>\n") with self.assertRaisesRegex(ValueError, error): pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message diff --git a/tests/tree_util_test.py b/tests/tree_util_test.py --- a/tests/tree_util_test.py +++ b/tests/tree_util_test.py @@ -505,6 +505,13 @@ def test_different_num_children_multiple(self): with self.assertRaisesRegex(ValueError, expected): raise e2('in_axes') + def test_different_num_children_print_key_diff(self): + e, = prefix_errors({'a': 1}, {'a': 2, 'b': 3}) + expected = ("so the symmetric difference on key sets is\n" + " b") + with self.assertRaisesRegex(ValueError, expected): + raise e('in_axes') + def test_different_metadata(self): e, = prefix_errors({1: 2}, {3: 4}) expected = ("pytree structure error: different pytree metadata "
better error messages for bad tree structures on `pjit` `in_axis_resources` and `out_axis_resources` constructing manual partition specs for models can be an involved and error-prone process right now, if you're missing keys (due to eg having used `param` instead of `param_with_axes` in `flax`) you'll get an error like this: ``` ValueError: pytree structure error: different numbers of pytree children at key path pjit out_axis_resources[<flat index 0>][<flat index 0>]['params'] At that key path, the prefix pytree pjit out_axis_resources has a subtree of type <class 'dict'> with 11 children, but at the same key path the full pytree has a subtree of the same type but with 17 children. ``` it would be very helpful if the error message told me _which keys_ were missing
2022-10-11T00:49:08
google/jax
12,757
google__jax-12757
[ "12756" ]
58d516c49ee75bbf5f5ba4731ea2a1261329e4ce
diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py --- a/jax/_src/scipy/special.py +++ b/jax/_src/scipy/special.py @@ -69,7 +69,8 @@ def gammaincc(a, x): return lax.igammac(a, x) -@_wraps(osp_special.erf, module='scipy.special') +@_wraps(osp_special.erf, module='scipy.special', skip_params=["out"], + lax_description="Note that the JAX version does not support complex inputs.") def erf(x): x, = _promote_args_inexact("erf", x) return lax.erf(x)
jax.scipy.special.erf: Doesn't support complex arguments despite documentation promising that ### Description From the documentation ( https://jax.readthedocs.io/en/latest/_autosummary/jax.scipy.special.erf.html ): ``` Returns the error function of complex argument. ``` But: ``` from jax.scipy.special import erf erf(1j) ``` gives ``` TypeError: erf does not accept dtype complex64. Accepted dtypes are subtypes of floating. ``` . So either the implementation or the documentation seem broken here. Needless to say, it would be very useful if `erf` would indeed support complex arguments. A related feature request: #9098 ### What jax/jaxlib version are you using? jax v0.3.21, jaxlib v0.3.20 ### Which accelerator(s) are you using? CPU ### Additional system info Python 3.10.6, linux ### NVIDIA GPU info _No response_
Thanks for the report! It's a bit confusing, but the documentation comes from scipy rather than JAX (notice the "*Original docstring below*" on the page you linked to). We should add a note to the docs mentioning the restriction on input types.
2022-10-11T21:28:26
google/jax
12,798
google__jax-12798
[ "12583" ]
8536faec6768d89e54481e145ebf0d675758716c
diff --git a/jax/_src/config.py b/jax/_src/config.py --- a/jax/_src/config.py +++ b/jax/_src/config.py @@ -285,6 +285,47 @@ def validate(new_val): return _StateContextManager(name, help, update_thread_local_hook, validate) + def define_int_state( + self, name: str, default: Optional[int], + help: str, update_global_hook: Optional[Callable[[str], None]] = None, + update_thread_local_hook: Optional[Callable[[Optional[str]], None]] \ + = None): + """Set up thread-local state and return a contextmanager for managing it. + Args: + name: string, converted to lowercase to define the name of the config + option (and absl flag). It is converted to uppercase to define the + corresponding shell environment variable. + enum_values: list of strings representing the possible values for the + option. + default: optional int, default value. + help: string, used to populate the flag help information as well as the + docstring of the returned context manager. + Returns: + A contextmanager to control the thread-local state value. + See docstring for ``define_bool_state``. + """ + name = name.lower() + default_env = os.getenv(name.upper(), default) + if default_env is not None: + try: + default = int(default_env) + except ValueError: + raise ValueError(f"Invalid value \"{default_env}\" for JAX flag {name}") + self.DEFINE_integer(name, default, help=help, update_hook=update_global_hook) + self._contextmanager_flags.add(name) + + def get_state(self): + val = getattr(_thread_local_state, name, unset) + return val if val is not unset else self._read(name) + setattr(Config, name, property(get_state)) + + def validate(new_val): + if new_val is not None and not isinstance(new_val, int): + raise ValueError(f'new int config value must be None or of type int, ' + f'got {new_val} of type {type(new_val)}') + + return _StateContextManager(name, help, update_thread_local_hook, validate) + def define_string_state( self, name: str, default: Optional[str], help: str, update_global_hook: Optional[Callable[[str], None]] = None, @@ -709,6 +750,16 @@ def _update_jax_array_thread_local(val): 'continue. Defaults to false so cache bugs or intermittent issues ' 'are non-fatal.')) +persistent_cache_min_instruction_count = config.define_int_state( + name='jax_persistent_cache_min_instruction_count', + default=6, + help=('The minimum number of instructions a computation needs to have to ' + 'be written to the persistent compilation cache. This threshold can ' + 'be raised to decrease the number of entries written to the cache. ' + 'The (unoptimized) instruction count is meant to be a proxy for ' + 'compile time, so programs with longer compile times are still ' + 'cached.')) + hlo_source_file_canonicalization_regex = config.define_string_state( name='jax_hlo_source_file_canonicalization_regex', default=None, diff --git a/jax/_src/dispatch.py b/jax/_src/dispatch.py --- a/jax/_src/dispatch.py +++ b/jax/_src/dispatch.py @@ -1046,8 +1046,8 @@ def compile_or_get_cached(backend, computation: ir.Module, compile_options, else: compiled = backend_compile(backend, serialized_computation, compile_options, host_callbacks) - _cache_write(serialized_computation, module_name, compile_options, - backend, compiled) + _cache_write(computation, serialized_computation, module_name, + compile_options, backend, compiled) return compiled return backend_compile(backend, serialized_computation, compile_options, @@ -1072,16 +1072,31 @@ def _cache_read(computation: Union[str, bytes, ir.Module], module_name: str, return None -def _cache_write(computation: Union[str, bytes, ir.Module], module_name: str, - compile_options: CompileOptions, backend: Backend, - compiled: XlaLoadedExecutable): - """Writes `computation` to the persistent compilation cache.""" +def _cache_write(computation: ir.Module, + serialized_computation: Union[str, bytes, ir.Module], + module_name: str, compile_options: CompileOptions, + backend: Backend, compiled: XlaLoadedExecutable): + """Writes `serialized_computation` to the persistent compilation cache.""" # Avoid import cycle between jax and jax.experimental from jax.experimental.compilation_cache import compilation_cache as cc + min_instr_count = config.jax_persistent_cache_min_instruction_count + if min_instr_count: + count = _instruction_count(computation, max_count=min_instr_count) + if count < min_instr_count: + logging.info( + "Not writing persistent cache entry for '%s' because it has " + "fewer than %i instructions", module_name, min_instr_count) + return + else: + # Don't log `count` because it won't be more than max_count + logging.info( + "'%s' has at least %i instructions, writing persistent cache entry", + module_name, min_instr_count) + try: - cc.put_executable(module_name, computation, compile_options, compiled, - backend) + cc.put_executable(module_name, serialized_computation, compile_options, + compiled, backend) except Exception as ex: if config.jax_raise_persistent_cache_errors: raise @@ -1089,6 +1104,26 @@ def _cache_write(computation: Union[str, bytes, ir.Module], module_name: str, f"Error writing persistent compilation cache entry for " f"'{module_name}': {type(ex).__name__}: {ex}") + +def _instruction_count(module: ir.Module, max_count: Optional[int] = None): + + def _blocks_count(blocks, count): + for block in blocks: + for op in block.operations: + count += 1 + # Untested premature performance optimization + if max_count is not None and count >= max_count: + return max_count + for region in op.regions: + count = _blocks_count(region.blocks, count) + return count + + count = 0 + for func in module.body.operations: + count = _blocks_count(func.body.blocks, count) + return count + + def get_buffer_counts(out_avals, ordered_effects, has_unordered_effects): buffer_counts = [aval_to_num_buffers(aval) for aval in out_avals] if ordered_effects or has_unordered_effects:
diff --git a/tests/compilation_cache_test.py b/tests/compilation_cache_test.py --- a/tests/compilation_cache_test.py +++ b/tests/compilation_cache_test.py @@ -36,12 +36,14 @@ import numpy as np from jax.config import config -from jax._src.config import raise_persistent_cache_errors +from jax._src.config import (persistent_cache_min_instruction_count, + raise_persistent_cache_errors) config.parse_flags_with_absl() FLAGS = config.FLAGS [email protected]_config(jax_raise_persistent_cache_errors=True) [email protected]_config(jax_raise_persistent_cache_errors=True, + jax_persistent_cache_min_instruction_count=0) class CompilationCacheTest(jtu.JaxTestCase): def setUp(self): @@ -331,6 +333,24 @@ def test_cache_read_warning(self): "for 'jit__lambda_': RuntimeError: test error", str(w[0].message)) + def test_min_instruction_count(self): + with tempfile.TemporaryDirectory() as tmpdir: + cc.initialize_cache(tmpdir) + + with persistent_cache_min_instruction_count(20): + # 2 instructions at time of writing + jit(lambda x: x * x)(2) + files_in_cache = len(os.listdir(tmpdir)) + self.assertEqual(files_in_cache, 0) + + def f(xs): + c, b = jax.lax.scan(lambda c, x: (c + x, c + x), 0, xs) + return c + 1, b + # 32 instructions at time of writing + jit(f)(jax.numpy.ones(8)) + files_in_cache = len(os.listdir(tmpdir)) + self.assertEqual(files_in_cache, 1) + def create_new_debug_options(self, debug_options_obj): debug_options_obj.xla_cpu_enable_fast_math = False debug_options_obj.xla_cpu_fast_math_honor_infs = False
Persistent compilation cache can exceed GCS retry rate ### Description A Cloud TPU user is reporting the following error when using the persistent compilation cache: ``` tensorflow.python.framework.errors_impl.AbortedError: All 10 retry attempts failed. The last failure: Error executing an HTTP request: HTTP response code 429 with body '{ "error": { "code": 429, "message": "The rate of change requests to the object [...] exceeds the rate limit. Please reduce the rate of create, update, and delete requests.", "errors": [ { "message": "The rate of change requests to the object [...] exceeds the rate limit. Please reduce the rate of create, updat' when resuming upload gs://[...] ``` Ideas for fixing or mitigating: - Somehow batch requests? Is this possible? - Add an adjustable threshold to only cache larger programs. Small programs benefit less from caching, and maybe this would provide a knob for lowering the GCS request rate. - https://github.com/google/jax/issues/12582 - Other ideas? ### What jax/jaxlib version are you using? jax 0.3.17, jaxlib 0.3.15 ### Which accelerator(s) are you using? TPU ### Additional system info _No response_ ### NVIDIA GPU info _No response_
the adjustable threshold seems like a very good idea - from the absl logging output it looks like it's dumping a really huge amount of very tiny programs that are probably not worth reading from / writing to cloud storage
2022-10-14T01:11:43
google/jax
12,904
google__jax-12904
[ "11895" ]
280153334bca32a4fae1f75bd6f1abe1a8f9eacd
diff --git a/jax/_src/dlpack.py b/jax/_src/dlpack.py --- a/jax/_src/dlpack.py +++ b/jax/_src/dlpack.py @@ -70,4 +70,5 @@ def from_dlpack(dlpack): xla_shape = buf.xla_shape() assert not xla_shape.is_tuple() aval = core.ShapedArray(xla_shape.dimensions(), xla_shape.numpy_dtype()) - return dispatch.maybe_create_array_from_da(buf, aval, buf.device()) + return jnp.asarray( # asarray ensures dtype canonicalization + dispatch.maybe_create_array_from_da(buf, aval, buf.device()))
diff --git a/tests/array_interoperability_test.py b/tests/array_interoperability_test.py --- a/tests/array_interoperability_test.py +++ b/tests/array_interoperability_test.py @@ -137,6 +137,14 @@ def testJaxToTensorFlow(self, shape, dtype): y = tf.experimental.dlpack.from_dlpack(dlpack) self.assertAllClose(np, y.numpy()) + @unittest.skipIf(not tf, "Test requires TensorFlow") + def testTensorFlowToJaxInt64(self): + # See https://github.com/google/jax/issues/11895 + x = jax.dlpack.from_dlpack( + tf.experimental.dlpack.to_dlpack(tf.ones((2, 3), tf.int64))) + dtype_expected = jnp.int64 if config.x64_enabled else jnp.int32 + self.assertEqual(x.dtype, dtype_expected) + @jtu.sample_product( shape=all_shapes, dtype=torch_dtypes, @@ -182,6 +190,14 @@ def testJaxToTorch(self, shape, dtype): y = torch.utils.dlpack.from_dlpack(dlpack) self.assertAllClose(np, y.cpu().numpy()) + @unittest.skipIf(not torch, "Test requires PyTorch") + def testTorchToJaxInt64(self): + # See https://github.com/google/jax/issues/11895 + x = jax.dlpack.from_dlpack( + torch.utils.dlpack.to_dlpack(torch.ones((2, 3), dtype=torch.int64))) + dtype_expected = jnp.int64 if config.x64_enabled else jnp.int32 + self.assertEqual(x.dtype, dtype_expected) + @jtu.sample_product( shape=all_shapes, dtype=torch_dtypes,
BUG: can create 64-bit arrays using dlpack when x64 is disabled ### Description Repro: ```python import tensorflow as tf import jax import jax.dlpack assert not jax.config.jax_enable_x64 x = jax.dlpack.from_dlpack(tf.experimental.dlpack.to_dlpack(tf.ones(5, tf.int64))) assert x.dtype == jax.numpy.int64 ``` This can cause loud and confusing errors down the line when our MHLO expects 32-bit numbers but gets 64-bit. For example, with our illegal 64-bit `x` array, try doing `x[0]`. ### What jax/jaxlib version are you using? HEAD/0.3.15 ### Which accelerator(s) are you using? CPU ### Additional System Info _No response_
> This can cause loud and confusing errors down the line Arguably a separate issue to file regarding this: if we do somehow end up with unexpected 64-bit device (or abstract) arrays, we ideally catch that and err early on, ahead of lowering in particular. Reviving this issue. Hi, I had experienced the same exact problem and the error I got was: ```python XlaRuntimeError: UNKNOWN: <unknown>:0: error: type of return operand 0 ('tensor<1xi64>') doesn't match function result type ('tensor<1xi32>') in function @main <unknown>:0: note: see current operation: "func.return"(%0) : (tensor<1xi64>) -> () <unknown>:0: note: in bytecode version 0 produced by: MLIR16.0.0git ``` Which wasn't very intuitive to figure out. Furthermore, it seems that some tensorflow_datasets may use int64 to describe labels (like in tfds.load('mnist')) and if used together with dlpack it will result in this issue. Isn´t there a way to protect users from this, like a more intuitive RuntimeError, Warning? For context I will leave [here the colab](https://colab.research.google.com/drive/1dXdlgdduOQ9SY_MIahMtpOZk1E5WcmLb?usp=sharing) that I used to replicate this issue. @hawkinsp @jakevdp – So long as the x64 flag exists and means what it means, how should we behave on `jax.dlpack.from_dlpack` given a 64-bit-dtyped array, when x64 mode is off? Some options: 1. Error 2. Convert to 32-bit silently 3. Convert to 32-bit and `warnings.warn` I'll send a PR for what we think is best. converting silently is consistent with how the x64 flag works in similar situations, e.g. `jnp.asarray(numpy_arr)`
2022-10-20T22:22:02
google/jax
12,957
google__jax-12957
[ "12944" ]
964988c968ac66814f131bd899bfb0ece7ee79c0
diff --git a/jax/_src/numpy/linalg.py b/jax/_src/numpy/linalg.py --- a/jax/_src/numpy/linalg.py +++ b/jax/_src/numpy/linalg.py @@ -601,17 +601,22 @@ def _lstsq(a: ArrayLike, b: ArrayLike, rcond: Optional[float], *, f"{b.ndim}-dimensional array given. Array must be one or two-dimensional") m, n = a.shape dtype = a.dtype - if rcond is None: - rcond = jnp.finfo(dtype).eps * max(n, m) + if a.size == 0: + s = jnp.empty(0, dtype=a.dtype) + rank = jnp.array(0, dtype=int) + x = jnp.empty((n, *b.shape[1:]), dtype=a.dtype) else: - rcond = jnp.where(rcond < 0, jnp.finfo(dtype).eps, rcond) - u, s, vt = svd(a, full_matrices=False) - mask = s >= jnp.array(rcond, dtype=s.dtype) * s[0] - rank = mask.sum() - safe_s = jnp.where(mask, s, 1).astype(a.dtype) - s_inv = jnp.where(mask, 1 / safe_s, 0)[:, jnp.newaxis] - uTb = jnp.matmul(u.conj().T, b, precision=lax.Precision.HIGHEST) - x = jnp.matmul(vt.conj().T, s_inv * uTb, precision=lax.Precision.HIGHEST) + if rcond is None: + rcond = jnp.finfo(dtype).eps * max(n, m) + else: + rcond = jnp.where(rcond < 0, jnp.finfo(dtype).eps, rcond) + u, s, vt = svd(a, full_matrices=False) + mask = s >= jnp.array(rcond, dtype=s.dtype) * s[0] + rank = mask.sum() + safe_s = jnp.where(mask, s, 1).astype(a.dtype) + s_inv = jnp.where(mask, 1 / safe_s, 0)[:, jnp.newaxis] + uTb = jnp.matmul(u.conj().T, b, precision=lax.Precision.HIGHEST) + x = jnp.matmul(vt.conj().T, s_inv * uTb, precision=lax.Precision.HIGHEST) # Numpy returns empty residuals in some cases. To allow compilation, we # default to returning full residuals in all cases. if numpy_resid and (rank < n or m <= n):
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -890,6 +890,9 @@ def testMultiDot(self, shapes, dtype): ((4, 6), (4,)), ((6, 6), (6, 1)), ((8, 6), (8, 4)), + ((0, 3), (0,)), + ((3, 0), (3,)), + ((3, 1), (3, 0)), ] ], rcond=[-1, None, 0.5],
jax.numpy.linalg.lstsq throws IndexError with zero rows ### Description [jax.numpy.linalg.lstsq](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.linalg.lstsq.html) throws an IndexError when the number of rows is zero: ```python3 import numpy as np import jax.numpy as jnp A = np.zeros([0, 3]) b = np.zeros([0]) x, residuals, rank, s = np.linalg.lstsq(A, b, None) print(x) # [0. 0. 0.] A = jnp.zeros([0, 3]) b = jnp.zeros([0]) x, residuals, rank, s = jnp.linalg.lstsq(A, b, None) # IndexError: index is out of bounds for axis 0 with size 0 ``` ### What jax/jaxlib version are you using? jax 0.3.21, jaxlib 0.3.20 ### Which accelerator(s) are you using? CPU ### Additional system info Python 3.10.7, macOS 11.7 ### NVIDIA GPU info _No response_
Thanks for the report!
2022-10-24T21:06:30
google/jax
13,023
google__jax-13023
[ "13014" ]
66e75edd0b3318d11c4106a9c25eefbdf63ddef9
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -2259,20 +2259,6 @@ def arange(start: core.DimSize, stop: Optional[core.DimSize] = None, return array(np.arange(start, stop=stop, step=step, dtype=dtype)) -def _wrap_numpy_nullary_function(f): - """Adapts `f` to return a DeviceArray instead of an np.ndarray. - - `f` cannot have any non-static array arguments. - """ - @_wraps(f, update_doc=False) - def wrapper(*args, **kwargs): - args = [core.concrete_or_error(None, arg, f"the error occurred in argument {i} jnp.{f.__name__}()") - for i, arg in enumerate(args)] - kwargs = {key: core.concrete_or_error(None, val, f"the error occurred in argument '{key}' jnp.{f.__name__}()") - for key, val in kwargs.items()} - return asarray(f(*args, **kwargs)) - return wrapper - @overload def linspace(start: ArrayLike, stop: ArrayLike, num: int = 50, endpoint: bool = True, retstep: Literal[False] = False, @@ -4281,12 +4267,56 @@ def _static_idx(idx: slice, size: core.DimSize): return stop + k + 1, start + 1, -step, True -blackman = _wrap_numpy_nullary_function(np.blackman) -bartlett = _wrap_numpy_nullary_function(np.bartlett) -hamming = _wrap_numpy_nullary_function(np.hamming) -hanning = _wrap_numpy_nullary_function(np.hanning) -# TODO: lower `kaiser` via lax to allow non-constant beta values. -kaiser = _wrap_numpy_nullary_function(np.kaiser) +@_wraps(np.blackman) +def blackman(M: int) -> Array: + M = core.concrete_or_error(int, M, "M argument of jnp.blackman") + dtype = dtypes.canonicalize_dtype(float_) + if M <= 1: + return ones(M, dtype) + n = lax.iota(dtype, M) + return 0.42 - 0.5 * cos(2 * pi * n / (M - 1)) + 0.08 * cos(4 * pi * n / (M - 1)) + + +@_wraps(np.bartlett) +def bartlett(M: int) -> Array: + M = core.concrete_or_error(int, M, "M argument of jnp.bartlett") + dtype = dtypes.canonicalize_dtype(float_) + if M <= 1: + return ones(M, dtype) + n = lax.iota(dtype, M) + return 1 - abs(2 * n + 1 - M) / (M - 1) + + +@_wraps(np.hamming) +def hamming(M: int) -> Array: + M = core.concrete_or_error(int, M, "M argument of jnp.hamming") + dtype = dtypes.canonicalize_dtype(float_) + if M <= 1: + return ones(M, dtype) + n = lax.iota(dtype, M) + return 0.54 - 0.46 * cos(2 * pi * n / (M - 1)) + + +@_wraps(np.hanning) +def hanning(M: int) -> Array: + M = core.concrete_or_error(int, M, "M argument of jnp.hanning") + dtype = dtypes.canonicalize_dtype(float_) + if M <= 1: + return ones(M, dtype) + n = lax.iota(dtype, M) + return 0.5 * (1 - cos(2 * pi * n / (M - 1))) + + +@_wraps(np.kaiser) +def kaiser(M: int, beta: ArrayLike) -> Array: + M = core.concrete_or_error(int, M, "M argument of jnp.kaiser") + dtype = dtypes.canonicalize_dtype(float_) + if M <= 1: + return ones(M, dtype) + n = lax.iota(dtype, M) + alpha = 0.5 * (M - 1) + return i0(beta * sqrt(1 - ((n - alpha) / alpha) ** 2)) / i0(beta) + def _gcd_cond_fn(xs): x1, x2 = xs
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -2440,6 +2440,20 @@ def testHVDStack(self, shape, op, dtypes, array_input, out_dtype): self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker) self._CompileAndCheck(jnp_fun, args_maker) + @jtu.sample_product( + [dict(name=name, **kwds) + for name in ['blackman', 'bartlett', 'hamming', 'hanning', 'kaiser'] + for kwds in ([dict(beta=1), dict(beta=0.5)] if name == 'kaiser' else [{}]) + ], + size = [0, 1, 5, 10], + ) + def testWindowFunction(self, name, size, **kwds): + jnp_fun = partial(getattr(jnp, name), size, **kwds) + np_fun = partial(getattr(np, name), size, **kwds) + args_maker = lambda: [] + self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker) + self._CompileAndCheck(jnp_fun, args_maker) + @jtu.sample_product( [dict(shape=shape, fill_value_shape=fill_value_shape) for shape in array_shapes + [3, np.array(7, dtype=np.int32)]
Numpy window functions produce large constants Affected functions are: - `jnp.blackman` - `jnp.bartlett` - `jnp.hamming` - `jnp.hanning` - `jnp.kaiser` For example: ```python import jax import jax.numpy as jnp from functools import partial print(jax.jit(partial(jnp.hamming, 100)).lower().as_text()) ``` ``` module @jit__unnamed_wrapped_function_ { func.func public @main() -> tensor<100xf32> { %0 = mhlo.constant dense<[8.000000e-02, 0.0809261277, 0.0837007835, 0.0883127972, 0.0947435945, 0.102967285, 0.11295075, 0.124653794, 0.138029292, 0.153023377, 0.169575676, 0.187619552, 0.207082346, 0.227885664, 0.249945775, 0.273173809, 0.297476292, 0.322755307, 0.34890908, 0.375832349, 0.403416634, 0.43155089, 0.46012184, 0.489014417, 0.518112302, 0.547298372, 5.764550e-01, 0.605464816, 0.634211063, 0.662577927, 0.690451264, 0.71771878, 0.744270622, 0.76999998, 0.794803202, 0.818580448, 0.841235935, 0.862678468, 0.882821619, 0.901584446, 0.918891251, 0.934672355, 0.948864281, 0.961409866, 0.972258627, 0.981366753, 0.988697707, 0.994221866, 0.997917056, 0.999768435, 0.999768435, 0.997917056, 0.994221866, 0.988697707, 0.981366753, 0.972258627, 0.961409866, 0.948864281, 0.934672355, 0.918891251, 0.901584446, 0.882821619, 0.862678468, 0.841235935, 0.818580448, 0.794803202, 0.76999998, 0.744270622, 0.71771878, 0.690451264, 0.662577927, 0.634211063, 0.605464816, 5.764550e-01, 0.547298372, 0.518112302, 0.489014417, 0.46012184, 0.43155089, 0.403416634, 0.375832349, 0.34890908, 0.322755307, 0.297476292, 0.273173809, 0.249945775, 0.227885664, 0.207082346, 0.187619552, 0.169575676, 0.153023377, 0.138029292, 0.124653794, 0.11295075, 0.102967285, 0.0947435945, 0.0883127972, 0.0837007835, 0.0809261277, 8.000000e-02]> : tensor<100xf32> return %0 : tensor<100xf32> } } ```
2022-10-27T22:00:57
google/jax
13,080
google__jax-13080
[ "12263" ]
eb9ac0d64f136ef9ab8e46ea5f60218978715aa7
diff --git a/jax/experimental/jet.py b/jax/experimental/jet.py --- a/jax/experimental/jet.py +++ b/jax/experimental/jet.py @@ -329,12 +329,20 @@ def linear_prop(prim, primals_in, series_in, **params): deflinear(lax.rev_p) deflinear(lax.transpose_p) deflinear(lax.slice_p) -deflinear(lax.dynamic_slice_p) deflinear(lax.reduce_sum_p) deflinear(lax.reduce_window_sum_p) deflinear(lax.fft_p) deflinear(dispatch.device_put_p) +def _dynamic_slice_jet_rule(primals_in, series_in, **params): + operand, *start_indices = primals_in + primal_out = lax.dynamic_slice_p.bind(operand, *start_indices, **params) + series_out = [lax.dynamic_slice_p.bind(terms_in[0], *start_indices, **params) + for terms_in in zip(*series_in)] + return primal_out, series_out + +jet_rules[lax.dynamic_slice_p] = _dynamic_slice_jet_rule + def _dynamic_update_slice_jet_rule(primals_in, series_in, **params): operand, update, *start_indices = primals_in primal_out = lax.dynamic_update_slice_p.bind(operand, update, *start_indices)
diff --git a/tests/jet_test.py b/tests/jet_test.py --- a/tests/jet_test.py +++ b/tests/jet_test.py @@ -311,7 +311,7 @@ def test_cummax(self): self.unary_check(partial(lax.cummax, axis=0)) @jtu.skip_on_devices("tpu") def test_cummin(self): self.unary_check(partial(lax.cummin, axis=0)) @jtu.skip_on_devices("tpu") - def test_dynamic_slice(self): self.unary_check(partial(lax.dynamic_slice, start_indices=(0,0), slice_sizes=(1,1))) + def test_dynamic_slice(self): self.unary_check(partial(lax.dynamic_slice, start_indices=(1,2), slice_sizes=(1,1))) @jtu.skip_on_devices("tpu") def test_dynamic_update_slice(self): self.unary_check(partial(lax.dynamic_update_slice, start_indices=(1,2), update=jnp.arange(6.0).reshape(2, 3)))
jet of dynamic slice does not match jet of gather This is blocking #12219 For example: ```python import jax.numpy as jnp from jax import lax from jax.experimental.jet import jet f_gather = lambda x: x[1:] f_dynamic_slice = lambda x: lax.dynamic_slice(x, (1,), (len(x) - 1,)) # These two functions by design have identical behavior for 1D inputs: x = jnp.arange(4.0) print(f_gather(x)) # [1. 2. 3.] print(f_dynamic_slice(x)) # [1. 2. 3.] # However, their jet transforms do not match h = 0.1 * x print(jet(f_gather, (x,), ((h,),))) # (DeviceArray([1., 2., 3.], dtype=float32), [DeviceArray([0.1, 0.2, 0.3], dtype=float32)]) print(jet(f_dynamic_slice, (x,), ((h,),))) # (DeviceArray([1., 2., 3.], dtype=float32), [DeviceArray([0. , 0.1, 0.2], dtype=float32)]) ``` It looks like the jet rule for dynamic_slice is not behaving correctly.
I think it just needs a (fairly) trivial `jet` rule of its own. `dynamic_slice` doesn't like having its `index` zeroed out by the `deflinear` logic.
2022-11-02T19:34:07
google/jax
13,103
google__jax-13103
[ "13099" ]
2e384ce58f9842cbcb3c8b2426e09888d10f5dce
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -2531,8 +2531,30 @@ def fun(*tangents): return apply_flat_fun(fun, io_tree, *py_args) -def _vjp_pullback_wrapper(cotangent_dtypes, cotangent_shapes, - io_tree, fun, py_args): +def _vjp_pullback_wrapper(name, cotangent_dtypes, cotangent_shapes, io_tree, + fun, *py_args_): + if len(py_args_) != 1: + msg = (f"The function returned by `jax.vjp` applied to {name} was called " + f"with {len(py_args_)} arguments, but functions returned by " + "`jax.vjp` must be called with a single argument corresponding to " + f"the single value returned by {name} (even if that returned " + "value is a tuple or other container).\n" + "\n" + "For example, if we have:\n" + "\n" + " def f(x):\n" + " return (x, x)\n" + " _, f_vjp = jax.vjp(f, 1.0)\n" + "\n" + "the function `f` returns a single tuple as output, and so we call " + "`f_vjp` with a single tuple as its argument:\n" + "\n" + " x_bar, = f_vjp((2.0, 2.0))\n" + "\n" + "If we instead call `f_vjp(2.0, 2.0)`, with the values 'splatted " + "out' as arguments rather than in a tuple, this error can arise.") + raise TypeError(msg) + py_args, = py_args_ in_tree_expected, out_tree = io_tree args, in_tree = tree_flatten(py_args) if in_tree != in_tree_expected: @@ -2637,9 +2659,8 @@ def _vjp(fun: lu.WrappedFun, *primals, has_aux=False, reduce_axes=()): ct_shapes = [np.shape(x) for x in out_primal] # Ensure that vjp_py is a PyTree so that we can pass it from the forward to the # backward pass in a custom VJP. - vjp_py = Partial(partial(_vjp_pullback_wrapper, - ct_dtypes, ct_shapes, - (out_tree, in_tree)), + vjp_py = Partial(partial(_vjp_pullback_wrapper, fun.__name__, + ct_dtypes, ct_shapes, (out_tree, in_tree)), out_vjp) if not has_aux: return out_primal_py, vjp_py
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -4095,6 +4095,14 @@ def h(x): b(8) # don't crash + def test_vjp_multiple_arguments_error_message(self): + # https://github.com/google/jax/issues/13099 + def foo(x): + return (x, x) + _, f_vjp = jax.vjp(foo, 1.0) + with self.assertRaisesRegex(TypeError, "applied to foo"): + f_vjp(1.0, 1.0) + @jtu.with_config(jax_experimental_subjaxpr_lowering_cache=True) class SubcallTraceCacheTest(jtu.JaxTestCase):
Inscrutable custom VJP error ### Description Here's my MWE. I left in the type annotations and the docstring in case they help with making sense of the code. ```python from functools import partial from typing import Any, Callable, Tuple, TypeVar, cast import jax.numpy as jnp from jax import custom_vjp, vjp from jax.tree_util import tree_map XT = TypeVar('XT', bound=Tuple[Any, ...]) Y = TypeVar('Y') @partial(custom_vjp, nondiff_argnums=(0,)) def cotangent_combinator(f: Callable[..., Tuple[XT, Y]], args_tuples: Tuple[Tuple[Any, ...], ...]) -> Tuple[XT, Y]: """ Args: f: A function that accepts positional arguments and returns xs, y where xs is a tuple of length n. args_tuples: n copies of the same tuple of positional arguments accepted by f. Returns: The pair (xs, y). The purpose of the cotangent combinator is to take cotangents of each of the elements of x, and send them back through to each of the corresponding argument tuples. """ return f(*args_tuples[0]) def _cotangent_combinator_fwd(f: Callable[..., Tuple[XT, Y]], args_tuples: Tuple[Tuple[Any, ...], ...] ) -> Tuple[Tuple[XT, Y], Callable[[XT, Y], Tuple[Any, ...]]]: return vjp(f, *args_tuples[0]) def _cotangent_combinator_bwd(f: Callable[..., Tuple[XT, Y]], f_vjp: Callable[[XT, Y], Tuple[Any, ...]], xy_bar: Tuple[XT, Y] ) -> Tuple[Any, ...]: xs_bar, y_bar = xy_bar n = len(xs_bar) xs_zero = tuple(tree_map(jnp.zeros_like, x_bar) for x_bar in xs_bar) all_args_bar = [] for i, x_bar in enumerate(xs_bar): this_xs_bar = cast(XT, (xs_zero[:i] + (x_bar,) + xs_zero[i + 1:])) print_generic(this_xs_bar, y_bar) this_args_bar = f_vjp(this_xs_bar, y_bar) all_args_bar.append(this_args_bar) return tuple(all_args_bar) cotangent_combinator.defvjp(_cotangent_combinator_fwd, _cotangent_combinator_bwd) def f(x: Any) -> Tuple[Any, None]: return (x ** 2, x ** 2), None o = jnp.ones(()) result, f_vjp = vjp(partial(cotangent_combinator, f), ((-1.0,), (-1.0,))) result_bar = (2 * o, 3 * o), None from tjax import print_generic print_generic(result=result) print_generic(result_bar=result_bar) f_vjp(result_bar) ``` gives ``` TypeError: _vjp_pullback_wrapper() takes 5 positional arguments but 6 were given ``` I have no idea where this is coming from since I don't have anything like that in my code. What am I doing wrong? ### What jax/jaxlib version are you using? jax 0.3.23; jaxlib 0.3.22 ### Which accelerator(s) are you using? CPU ### Additional system info Python 3.10
A terrible error message indeed! In the original repro (before edit, sorry that's just when I copied it) the issue was that we were calling ```python f_vjp((2.0, 0.0), None) ``` when we actually needed ```python f_vjp(((2.0, 0.0), None)) ``` Notice the extra parentheses. I think `f_vjp` always takes a single argument (corresponding to how Python functions always return a single value, though that value might be a tuple or pytree or whatever). Let me try the new repro... but if the issue is the same, then at least we know how to fix the code, though it's still a TODO to improve the error message here. The callable returned by `jax.vjp` is a `Partial` over a function which takes 5 arguments (named `_vjp_pullback_wrapper`) where all but the last argument is supplied by the `Partial`. Because of the `Partial`, we don't error quite as early as you might expect (and apparently tracebacks are messed up, because we subclass `functools.partial` perhaps?). I think in the new repro we have a repeat of the same issue on this line: ```python this_args_bar = f_vjp(this_xs_bar, y_bar) ``` where we instead want ```python this_args_bar = f_vjp((this_xs_bar, y_bar)) ``` Yeah, you got it! Thank you. Please let me know if/when I should close this. I'll send a PR with an improved error message momentarily. Let's let the PR close the issue. (Please review the PR if you feel like it!)
2022-11-03T22:15:58
google/jax
13,127
google__jax-13127
[ "13124" ]
2932c1ef06f96ba5c73a82135b76dcb0ce9ecb1f
diff --git a/jax/_src/random.py b/jax/_src/random.py --- a/jax/_src/random.py +++ b/jax/_src/random.py @@ -1327,10 +1327,12 @@ def categorical(key: KeyArray, shape = tuple(shape) _check_shape("categorical", shape, batch_shape) - sample_shape = shape[:len(shape)-len(batch_shape)] + shape_prefix = shape[:len(shape)-len(batch_shape)] + logits_shape = list(shape[len(shape) - len(batch_shape):]) + logits_shape.insert(axis % len(logits_arr.shape), logits_arr.shape[axis]) return jnp.argmax( - gumbel(key, sample_shape + logits_arr.shape, logits_arr.dtype) + - lax.expand_dims(logits_arr, tuple(range(len(sample_shape)))), + gumbel(key, (*shape_prefix, *logits_shape), logits_arr.dtype) + + lax.expand_dims(logits_arr, tuple(range(len(shape_prefix)))), axis=axis)
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -1429,6 +1429,19 @@ def f(): # just lower, don't run, takes too long jax.jit(f).lower() + @jtu.sample_product(shape=[(3, 4)], + logits_shape_base=[(3, 4), (3, 1), (1, 4)], + axis=[-3, -2, -1, 0, 1, 2]) + def test_categorical_shape_argument(self, shape, logits_shape_base, axis): + # https://github.com/google/jax/issues/13124 + logits_shape = list(logits_shape_base) + logits_shape.insert(axis % (len(logits_shape_base) + 1), 10) + assert logits_shape[axis] == 10 + logits = jnp.ones(logits_shape) + samples = jax.random.categorical(jax.random.PRNGKey(0), logits=logits, + axis=axis, shape=shape) + self.assertEqual(samples.shape, shape) + class KeyArrayTest(jtu.JaxTestCase): # Key arrays involve:
jax.random.categorical doesn't respect shape parameter with broadcasted logits ### Description The docs for `jax.random.categorical` say that the output of `jax.random.categorical` is `shape` when `shape` is provided. However, when provided `logits` which have to be broadcasted with `shape`, it just ignores the shape parameter after checking that it's broadcast compatible. I believe this can be fixed by broadcasting logits appropriately. ``` import jax out = jax.random.categorical(jax.random.PRNGKey(0), logits=jax.numpy.ones( (4, 1, 10)), axis=-1, shape=(4, 8)) # out.shape is (4, 1), should be (4, 8) assert out.shape == (4, 8), f"out.shape is {out.shape}, should be (4, 8)" ``` ### What jax/jaxlib version are you using? 0.3.23 ### Which accelerator(s) are you using? CPU ### Additional system info _No response_ ### NVIDIA GPU info _No response_
2022-11-05T02:49:43
google/jax
13,144
google__jax-13144
[ "12514" ]
1e7e8e8d5cc33027dcab75b040acc04c0eca132e
diff --git a/jax/_src/api.py b/jax/_src/api.py --- a/jax/_src/api.py +++ b/jax/_src/api.py @@ -413,7 +413,9 @@ def _prepare_jit(fun, static_argnums, static_argnames, donate_argnums, f, args = argnums_partial_except(f, static_argnums, args, allow_invalid=True) f, kwargs = argnames_partial_except(f, static_argnames, kwargs) args_flat, in_tree = tree_flatten((args, kwargs)) - if donate_argnums: + # Argument donation is incompatible with jax_debug_nans because it re-uses + # donated buffers when rerunning the user's function. + if donate_argnums and not config.jax_debug_nans: donated_invars = donation_vector(donate_argnums, args, kwargs) else: donated_invars = (False,) * len(args_flat) @@ -2042,7 +2044,7 @@ def _prepare_pmap(fun, in_axes, out_axes, static_broadcasted_tuple, dyn_global_arg_shapes = global_arg_shapes args, in_tree = tree_flatten((dyn_args, kwargs)) - if donate_tuple: + if donate_tuple and not config.jax_debug_nans: donated_invars = donation_vector(donate_tuple, dyn_args, kwargs) else: donated_invars = (False,) * len(args) diff --git a/jax/experimental/pjit.py b/jax/experimental/pjit.py --- a/jax/experimental/pjit.py +++ b/jax/experimental/pjit.py @@ -385,7 +385,7 @@ def infer_params(*args, _global_avals=False, **kwargs): args_flat, in_tree = tree_flatten(dyn_args) flat_fun, out_tree = flatten_fun_nokwargs(f, in_tree) - if donate_argnums: + if donate_argnums and not config.jax_debug_nans: donated_invars = donation_vector(donate_argnums, dyn_args, ()) else: donated_invars = (False,) * len(args_flat)
diff --git a/tests/debug_nans_test.py b/tests/debug_nans_test.py --- a/tests/debug_nans_test.py +++ b/tests/debug_nans_test.py @@ -24,11 +24,12 @@ from jax._src import test_util as jtu from jax import numpy as jnp from jax.experimental import pjit -import jax._src.lib +from jax._src.lib import xla_client from jax.config import config config.parse_flags_with_absl() +xla_extension_version = getattr(xla_client, "_version", 0) class DebugNaNsTest(jtu.JaxTestCase): @@ -158,6 +159,44 @@ def testPjit(self): ans = f(jnp.array([0., 1.])) ans.block_until_ready() + def testDebugNansJitWithDonation(self): + # https://github.com/google/jax/issues/12514 + if jtu.device_under_test() == "cpu" and xla_extension_version < 102: + raise SkipTest("CPU buffer donation requires jaxlib > 0.3.22") + + a = jnp.array(0.) + with self.assertRaises(FloatingPointError): + ans = jax.jit(lambda x: 0. / x, donate_argnums=(0,))(a) + ans.block_until_ready() + + def testDebugNansPmapWithDonation(self): + if jtu.device_under_test() == "cpu" and xla_extension_version < 102: + raise SkipTest("CPU buffer donation requires jaxlib > 0.3.22") + + a = jnp.zeros((1,)) + with self.assertRaises(FloatingPointError): + ans = jax.pmap(lambda x: 0. / x, donate_argnums=(0,))(a) + ans.block_until_ready() + + @jtu.ignore_warning(message=".*is an experimental.*") + def testDebugNansPjitWithDonation(self): + if jtu.device_under_test() == "cpu" and xla_extension_version < 102: + raise SkipTest("CPU buffer donation requires jaxlib > 0.3.22") + + if jax.device_count() < 2: + raise SkipTest("test requires >=2 devices") + + p = pjit.PartitionSpec('x') + f = pjit.pjit(lambda x: 0. / x, + in_axis_resources=p, + out_axis_resources=p, + donate_argnums=(0,)) + + with jax.experimental.maps.Mesh(np.array(jax.local_devices()[:2]), ('x',)): + with self.assertRaises(FloatingPointError): + ans = f(jnp.array([0., 1.])) + ans.block_until_ready() + # TODO(skye): add parallel inf tests, ideally by factoring out test logic class DebugInfsTest(jtu.JaxTestCase):
jax_debug_nans interaction with buffer donation When we set `jax_debug_nans`, we might try to re-run jitted computations. But if those jitted computations involve buffer donation, then we'll get a low-level runtime error about repeatedly donating the same input: ``` Invalid nan value encountered in the output of a C++-jit/pmap function. Calling the de-optimized version. [10:13:36 pjrt_stream_executor_client.cc:2130] Execution of replica 0 failed: INVALID_ARGUMENT: Invalid buffer passed to Execute() as argument 0 to replica 0: INVALID_ARGUMENT: Donation requested for invalid buffer ``` TODO: make a repro
As a workaround: anyone encountering this issue should just disable argument donation! Thanks for the information @mattjj! I encountered the same error, but don't know how to fix it by code. Can you give more detail on how to disable argument donation? Perhaps when debug_nans is set, we should ignore donate_agnums... @Abandonist is there any use of the `donate_argnums` parameter to `jit` in your code? If so, just delete it. Or is it in a library you're calling? > Perhaps when debug_nans is set, we should ignore donate_agnums... > > @Abandonist is there any use of the `donate_argnums` parameter to `jit` in your code? If so, just delete it. Or is it in a library you're calling? It's fixxed, thank you very much! @mattjj This parameter was found in the invoked library. Interestingly, the same code can run well in the Colab, but doesn‘t work in Kaggle. Maybe it is caused by different Cudda versions?
2022-11-07T16:40:58
google/jax
13,153
google__jax-13153
[ "13150" ]
96f6c1c9d414e6ebc54ff7f08115a9a9a6d6a8f8
diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py --- a/jax/experimental/sparse/bcoo.py +++ b/jax/experimental/sparse/bcoo.py @@ -1831,7 +1831,7 @@ def bcoo_reshape(mat, *, new_sizes, dimensions): if mat.n_batch: batch_size = np.prod(mat.shape[:mat.n_batch]) cuml_shape = np.cumprod(new_sizes) - if batch_size not in cuml_shape: + if batch_size != 1 and batch_size not in cuml_shape: raise ValueError("bcoo_reshape: new shape cannot mix batch and sparse dimensions; " f"got shape={mat.shape} new_shape={new_sizes} with n_batch={mat.n_batch}") ind = cuml_shape.searchsorted(batch_size, side='right') @@ -1853,16 +1853,19 @@ def bcoo_reshape(mat, *, new_sizes, dimensions): dimensions=(*batch_perm, *range(mat.n_batch, mat.indices.ndim))) # Reshape the sparse dimensions: this is accomplished by re-indexing. - index_cols = tuple(indices[..., i] for i in sparse_perm) - sparse_shape = tuple(mat.shape[mat.n_batch + i] for i in sparse_perm) - flat_indices = jnp.ravel_multi_index(index_cols, dims=sparse_shape, mode='clip') - new_index_cols = jnp.unravel_index(flat_indices, sparse_sizes) - new_indices = jnp.concatenate([col[..., None] for col in new_index_cols], axis=-1) - with jax.numpy_rank_promotion('allow'): - oob_indices = (indices >= jnp.array(mat.shape[mat.n_batch:], dtype=indices.dtype)).any(-1) - new_indices = new_indices.at[oob_indices].set(jnp.array(sparse_sizes, dtype=new_indices.dtype)) - - return BCOO((data, new_indices), shape=new_sizes) + if not sparse_sizes: + indices = jnp.empty_like(indices, shape=(*indices.shape[:-1], 0)) + elif sparse_perm: + index_cols = tuple(indices[..., i] for i in sparse_perm) + sparse_shape = tuple(mat.shape[mat.n_batch + i] for i in sparse_perm) + flat_indices = jnp.ravel_multi_index(index_cols, dims=sparse_shape, mode='clip') + with jax.numpy_rank_promotion('allow'): + oob_indices = (indices >= jnp.array(mat.shape[mat.n_batch:], dtype=indices.dtype)).any(-1) + new_index_cols = jnp.unravel_index(flat_indices, sparse_sizes) + indices = jnp.concatenate([col[..., None] for col in new_index_cols], axis=-1) + indices = indices.at[oob_indices].set(jnp.array(sparse_sizes, dtype=indices.dtype)) + + return BCOO((data, indices), shape=new_sizes) def bcoo_slice(mat, *, start_indices: Sequence[int], limit_indices: Sequence[int],
diff --git a/tests/sparsify_test.py b/tests/sparsify_test.py --- a/tests/sparsify_test.py +++ b/tests/sparsify_test.py @@ -338,7 +338,10 @@ def testSparseConcatenate(self, shapes, func, n_batch): [dict(shape=shape, new_shape=new_shape, n_batch=n_batch, n_dense=n_dense) for shape, new_shape, n_batch, n_dense in [ [(6,), (2, 3), 0, 0], + [(6,), (2, 3), 1, 0], [(1, 4), (2, 2), 0, 0], + [(4, 1), (4,), 1, 0], + [(1, 4), (4,), 1, 0], [(12, 2), (2, 3, 4), 0, 0], [(1, 3, 2), (2, 3), 0, 0], [(1, 6), (2, 3, 1), 0, 0],
[sparse] bcoo_reshape breaks for n_sparse=0 ```python import jax.numpy as jnp from jax.experimental import sparse x = sparse.BCOO.fromdense(jnp.ones((4, 1)), n_batch=1) print(x.reshape(4,)) # ValueError: Need at least one array to concatenate. x = sparse.BCOO.fromdense(jnp.arange(4), n_batch=1) print(x.reshape(2, 2)) # ValueError: Need at least one array to concatenate. ```
2022-11-08T17:46:44
google/jax
13,189
google__jax-13189
[ "12728" ]
19d76a781848bb5be3bc9223c438ad9a3e5b27d8
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -4367,11 +4367,12 @@ def gcd(x1: ArrayLike, x2: ArrayLike) -> Array: def lcm(x1: ArrayLike, x2: ArrayLike) -> Array: _check_arraylike("lcm", x1, x2) x1, x2 = _promote_dtypes(x1, x2) + x1, x2 = abs(x1), abs(x2) if not issubdtype(_dtype(x1), integer): raise ValueError("Arguments to jax.numpy.lcm must be integers.") d = gcd(x1, x2) return where(d == 0, _lax_const(d, 0), - abs(multiply(x1, floor_divide(x2, d)))) + multiply(x1, floor_divide(x2, d))) @_wraps(np.extract)
diff --git a/tests/lax_numpy_operators_test.py b/tests/lax_numpy_operators_test.py --- a/tests/lax_numpy_operators_test.py +++ b/tests/lax_numpy_operators_test.py @@ -282,6 +282,7 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, all_shapes, jtu.rand_small_positive, []), op_record("gcd", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []), op_record("lcm", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []), + op_record("lcm", 2, [np.int8], all_shapes, jtu.rand_not_small, []) ] JAX_BITWISE_OP_RECORDS = [
jax.numpy.lcm int8 overflow difference with np ### Description The following code in jax ```python import jax.numpy as jnp a = jnp.array([5], dtype=jnp.int8) b = jnp.array([26], dtype=jnp.int8) print(jnp.lcm(a, b)) ``` produces `[126]` while in numpy (checked in 1.23.0 and 1.21.6) (and pytorch 1.11 and 1.12 ) it gives `[-126]` keeping true to the overflow for an `int8` dtype. Is this difference of results between **`np`** and **`jnp`** desired here? Surely, `jax.numpy` cannot follow NumPy API in every single output but this is something achievable in this case. The cause is perhaps that jax uses `abs` at the end of lcm operation. I suppose it should suffice to apply `abs` on `x1` and `x2` as `gcd` does. Note: Mathematically lcm(5,26)=130, so getting a negative value (as with numpy) can at least signal a warning but getting a positive (but incorrect) value close to the correct value can easily mislead. int8 dtype was just picked for the sake of simpler example but I am guessing this won't be restricted to int8 dtype. If you want then I can create a PR for this and in case I m totally mistakened please do enlighten me. Thank you very much. ### What jax/jaxlib version are you using? jax 0.3.21, jaxlib v0.3.20. Also checked with jax master repo on GitHub. ### Which accelerator(s) are you using? CPU ### Additional system info Python 3.8.10, Ubuntu 20, Google Colab ### NVIDIA GPU info _No response_
Thanks for the report. The reason for the difference is the `abs` call here: https://github.com/google/jax/blob/ec5b1c93d73da2fefb99940f629b41a5f3540de2/jax/_src/numpy/lax_numpy.py#L4192 I'm not sure why the `abs` is there, but I think in principle we could remove it in order to match numpy's behavior in this. > Thanks for the report. The reason for the difference is the `abs` call here: > > https://github.com/google/jax/blob/ec5b1c93d73da2fefb99940f629b41a5f3540de2/jax/_src/numpy/lax_numpy.py#L4192 > > I'm not sure why the `abs` is there, but I think in principle we could remove it in order to match numpy's behavior in this. Exactly. I had checked and removed this abs call and it the results do match with np. Let me know if this is approved for for a PR alongside any other changes if required. So long as it doesn't break any existing tests, that sounds good.
2022-11-10T19:07:45
google/jax
13,288
google__jax-13288
[ "13267" ]
7495a9e3705d67c498723240cfed3f14c5916567
diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py --- a/jax/_src/scipy/special.py +++ b/jax/_src/scipy/special.py @@ -29,6 +29,7 @@ from jax._src.numpy.lax_numpy import moveaxis, _promote_args_inexact, _promote_dtypes_inexact from jax._src.numpy.util import _wraps from jax._src.ops import special as ops_special +from jax._src.third_party.scipy.betaln import betaln as _betaln_impl from jax._src.typing import Array, ArrayLike @@ -38,10 +39,11 @@ def gammaln(x: ArrayLike) -> Array: return lax.lgamma(x) -@_wraps(osp_special.betaln, module='scipy.special') -def betaln(x: ArrayLike, y: ArrayLike) -> Array: - x, y = _promote_args_inexact("betaln", x, y) - return lax.lgamma(x) + lax.lgamma(y) - lax.lgamma(x + y) +betaln = _wraps( + osp_special.betaln, + module='scipy.special', + update_doc=False +)(_betaln_impl) @_wraps(osp_special.betainc, module='scipy.special') diff --git a/jax/_src/third_party/scipy/betaln.py b/jax/_src/third_party/scipy/betaln.py new file mode 100644 --- /dev/null +++ b/jax/_src/third_party/scipy/betaln.py @@ -0,0 +1,62 @@ +from jax import lax +import jax.numpy as jnp +from jax._src.typing import Array, ArrayLike +from jax._src.numpy.lax_numpy import _promote_args_inexact + +def algdiv(a: ArrayLike, b: ArrayLike) -> Array: + """ + Compute ``log(gamma(a))/log(gamma(a + b))`` when ``b >= 8``. + + Derived from scipy's implmentation of `algdiv`_. + + This differs from the scipy implementation in that it assumes a <= b + because recomputing ``a, b = jnp.minimum(a, b), jnp.maximum(a, b)`` might + be expensive and this is only called by ``betaln``. + + .. _algdiv: + https://github.com/scipy/scipy/blob/c89dfc2b90d993f2a8174e57e0cbc8fbe6f3ee19/scipy/special/cdflib/algdiv.f + """ + c0 = 0.833333333333333e-01 + c1 = -0.277777777760991e-02 + c2 = 0.793650666825390e-03 + c3 = -0.595202931351870e-03 + c4 = 0.837308034031215e-03 + c5 = -0.165322962780713e-02 + h = a / b + c = h / (1 + h) + x = h / (1 + h) + d = b + (a - 0.5) + # Set sN = (1 - x**n)/(1 - x) + x2 = x * x + s3 = 1.0 + (x + x2) + s5 = 1.0 + (x + x2 * s3) + s7 = 1.0 + (x + x2 * s5) + s9 = 1.0 + (x + x2 * s7) + s11 = 1.0 + (x + x2 * s9) + # Set w = del(b) - del(a + b) + # where del(x) is defined by ln(gamma(x)) = (x - 0.5)*ln(x) - x + 0.5*ln(2*pi) + del(x) + t = (1.0 / b) ** 2 + w = ((((c5 * s11 * t + c4 * s9) * t + c3 * s7) * t + c2 * s5) * t + c1 * s3) * t + c0 + w = w * (c / b) + # Combine the results + u = d * lax.log1p(a / b) + v = a * (lax.log(b) - 1.0) + return jnp.where(u <= v, (w - v) - u, (w - u) - v) + + +def betaln(a: ArrayLike, b: ArrayLike) -> Array: + """Compute the log of the beta function. + + Derived from scipy's implementation of `betaln`_. + + This implementation does not handle all branches of the scipy implementation, but is still much more accurate + than just doing lgamma(a) + lgamma(b) - lgamma(a + b) when inputs are large (> 1M or so). + + .. _betaln: + https://github.com/scipy/scipy/blob/ef2dee592ba8fb900ff2308b9d1c79e4d6a0ad8b/scipy/special/cdflib/betaln.f + """ + a, b = _promote_args_inexact("betaln", a, b) + a, b = jnp.minimum(a, b), jnp.maximum(a, b) + small_b = lax.lgamma(a) + (lax.lgamma(b) - lax.lgamma(a + b)) + large_b = lax.lgamma(a) + algdiv(a, b) + return jnp.where(b < 8, small_b, large_b)
diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py --- a/tests/lax_scipy_test.py +++ b/tests/lax_scipy_test.py @@ -303,6 +303,18 @@ def testIssue3758(self): q = np.array([1., 40., 30.], dtype=np.float32) self.assertAllClose(np.array([1., 0., 0.], dtype=np.float32), lsp_special.zeta(x, q)) + def testIssue13267(self): + """Tests betaln(x, 1) across wide range of x.""" + xs = jnp.geomspace(1, 1e30, 1000) + primals_out, tangents_out = jax.jvp(lsp_special.betaln, primals=[xs, 1.0], tangents=[jnp.ones_like(xs), 0.0]) + # Check that betaln(x, 1) = -log(x). + # Betaln is still not perfect for small values, hence the atol (but it's close) + atol = jtu.if_device_under_test("tpu", 1e-3, 1e-5) + self.assertAllClose(primals_out, -jnp.log(xs), atol=atol) + # Check that d/dx betaln(x, 1) = d/dx -log(x) = -1/x. + self.assertAllClose(tangents_out, -1 / xs, atol=atol) + + def testXlogyShouldReturnZero(self): self.assertAllClose(lsp_special.xlogy(0., 0.), 0., check_dtypes=False)
`betaln` is wildly inaccurate for large values ### Description `jax.scipy.special.betaln` becomes very inaccurate for large inputs. <img width="608" alt="Screen Shot 2022-11-15 at 4 54 10 PM" src="https://user-images.githubusercontent.com/391217/202057219-814b0ca5-ea8c-41c0-8f5c-748e19fa8fe0.png"> The image is comparing jax, scipy, and a jax version I made that parallels scipy's version (at the expense of duplicated work, due to the evaluation of both branches). The more accurate jax version can be found [here](https://gist.github.com/imh/b6e3e7e2ea3224e365360cea85578240). I would be happy to open up a PR for it, if someone could answer the following: - If this went in `jax._src.scipy.special`, where should I import `jnp.where` from to avoid circular dependencies? - Since I just ported (two branches of) of scipy's version of `betaln`, I expect there may be be license issues including it in jax. Is that the case? Scipy's license is linked in the ported version. ### What jax/jaxlib version are you using? 0.3.23 ### Which accelerator(s) are you using? CPU ### Additional system info MacOS 12.6 ### NVIDIA GPU info _No response_
We welcome PRs! * The circularity with `jnp.where` usually isn't a problem because `jax.scipy` ends up imported after `jax.numpy`. Try it? * In general if the code is copied or derived from code elsewhere, it must go in the `third_party` subdirectory (https://github.com/google/jax/tree/main/jax/_src/third_party) of `jax._src` since it is under a different license and we need to comply with that license. That's probably fine for SciPy code, and indeed there's already a little scipy-derived code in there you can follow as an example.
2022-11-17T04:36:53
google/jax
13,300
google__jax-13300
[ "13296" ]
0324cac8882e3ea1b2148818ee2322e2b96696da
diff --git a/jax/experimental/sparse/transform.py b/jax/experimental/sparse/transform.py --- a/jax/experimental/sparse/transform.py +++ b/jax/experimental/sparse/transform.py @@ -70,6 +70,13 @@ sparse_rules : Dict[core.Primitive, Callable] = {} +_zero_preserving_linear_unary_primitives = [ + lax.copy_p, + lax.imag_p, + lax.neg_p, + lax.real_p, +] + _zero_preserving_unary_primitives = [ lax.abs_p, lax.asin_p, @@ -77,19 +84,15 @@ lax.atan_p, lax.atanh_p, lax.bessel_i1e_p, - lax.copy_p, lax.expm1_p, lax.log1p_p, - lax.neg_p, - lax.real_p, - lax.imag_p, lax.sign_p, lax.sin_p, lax.sinh_p, lax.sqrt_p, lax.tan_p, lax.tanh_p, - lax.convert_element_type_p + lax.convert_element_type_p, ] _densifying_primitives : List[core.Primitive] = [ @@ -462,15 +465,17 @@ def _ensure_unique_indices(spenv, spvalue): if spvalue.is_dense() or spvalue.unique_indices: return spvalue arr = spvalues_to_arrays(spenv, spvalue) - arr = arr.sum_duplicates(nse=arr.nse) + arr = arr.sum_duplicates(nse=arr.nse, remove_zeros=False) return arrays_to_spvalues(spenv, arr) -def _zero_preserving_unary_op(prim): +def _zero_preserving_unary_op(prim, linear): def func(spenv, *spvalues, **kwargs): assert len(spvalues) == 1 - # Since unary operations don't commute with addition, we need to ensure - # that indices are unique before applying the operator elementwise. - spvalue = _ensure_unique_indices(spenv, spvalues[0]) + spvalue = spvalues[0] + if not linear: + # For non-linear unary operations, we need to ensure that + # indices are unique before applying the operator elementwise. + spvalue = _ensure_unique_indices(spenv, spvalue) buf = spenv.data(spvalue) buf_out = prim.bind(buf, **kwargs) if spvalues[0].is_sparse(): @@ -484,7 +489,9 @@ def func(spenv, *spvalues, **kwargs): return func for _prim in _zero_preserving_unary_primitives: - sparse_rules[_prim] = _zero_preserving_unary_op(_prim) + sparse_rules[_prim] = _zero_preserving_unary_op(_prim, linear=False) +for _prim in _zero_preserving_linear_unary_primitives: + sparse_rules[_prim] = _zero_preserving_unary_op(_prim, linear=True) def _standard_sparse_rule(prim, sparse_op): def _sparse_rule(spenv, *spvalues, **kwds):
diff --git a/tests/sparsify_test.py b/tests/sparsify_test.py --- a/tests/sparsify_test.py +++ b/tests/sparsify_test.py @@ -500,6 +500,7 @@ def testWeakTypes(self): @parameterized.named_parameters( {"testcase_name": f"_{op.__name__}", "op": op, "dtype": dtype, "kwds": kwds} for op, dtype, kwds in [ + (jnp.copy, jnp.float32, {}), (lax.abs, jnp.float32, {}), (lax.asin, jnp.float32, {}), (lax.asinh, jnp.float32, {}), @@ -530,10 +531,14 @@ def testUnaryOperationsNonUniqueIndices(self, op, dtype, kwds): indices = rng_idx((nse, len(shape)), jnp.int32) mat = BCOO((data, indices), shape=shape) - sparse_result = self.sparsify(partial(op, **kwds))(mat).todense() + sparse_result = self.sparsify(partial(op, **kwds))(mat) dense_result = op(mat.todense(), **kwds) - self.assertArraysAllClose(sparse_result, dense_result) + self.assertArraysAllClose(sparse_result.todense(), dense_result) + + # Ops that commute with addition should not deduplicate indices. + if op in [jnp.copy, lax.neg, lax.real, lax.imag]: + self.assertArraysAllClose(sparse_result.indices, indices) class SparsifyTracerTest(SparsifyTest):
[sparse] Difference between BCOO matrices removes zeros ### Description The difference operation between two `BCOO` sparse matrices implicitly removes zeros. The behaviour is different from the one observed for the addition. Consider the example below. ```python import jax.experimental.sparse as jsp import jax.numpy as jnp a = jsp.BCOO((jnp.array([1.0]), jnp.array([[0, 0]])), shape=(3, 3)) b_values = jnp.array([0.0, 1.0, 0.0]) b_indices = jnp.array([[0, 0], [1, 1], [2, 2]]) b = jsp.BCOO((b_values, b_indices), shape=(3, 3)) print("Difference\n", (a - b).indices) print("Addition\n", (a + b).indices) ``` The output is ``` Difference [[0 0] [1 1] [3 3] [3 3]] Addition [[0 0] [0 0] [1 1] [2 2]] ``` As you can see, in the difference operation the indices associated to null values in `a` or `b` have disappeared and they have been replaced with the out-of-bounds index `[3,3]`. The expected behaviour can be obtained using `a + (-1)*b`. ### What jax/jaxlib version are you using? jax v0.3.25, jaxlib v.0.3.25 ### Which accelerator(s) are you using? CPU/GPU ### Additional system info Python 3.10.8, ArchLinux ### NVIDIA GPU info NVIDIA-SMI 520.56.06 Driver Version: 520.56.06 CUDA Version: 11.8
Hi - thanks for the report. The issue isn't in subtraction, but rather the sparse rule for `neg_p`. Currently it's treated as any other unary operation, and in general unary operations do not commute with addition (i.e. `op(x + y) != op(x) + op(y)` in general for operations like `sin` and `cos`). For this reason, the unary operation sparse rule will deduplicate indices, which has the side effect of (1) sorting the indices, and (2) padding-out explicit zeros. This is fine in general if you consider the sparse data structure only as an abstract representation of a dense matrix. Is it important in your application for explicit zeros to be maintained during operations? The sparse design currently does not include that in its design considerations, and so I expect you'll run into this kind of thing frequently if you depend on details of the sparse representation. So, that said, I think this is "working as intended" for now, but we may be able to think about change the design goals of the sparse functionality if it's required for important use-cases. For the case of `neg_p` in particular, I think it would make sense to avoid re-indexing because it does commute with addition. What do you think? Thank you for the clarification on the reasons leading to this behaviour! In my application, I use BCOO matrices as "dictionaries" where the listed indices can be associated even to a null value, while non-listed indices are considered as undefined. I believe that this use case can be quite common: for example if we want to represent scalar values associated to points scattered on a N-dimensional grid, we can use a sparse matrix to represent points and values and we want to allow the null value. Surely, I think that the `neg_p` behaviour should be consistent with the sum as you propose! For the other operations, I agree that the duplicates should be removed, but I'm not sure that zeros should be removed too. In particular, for functions like `cos` (`cos(0) != 0`) that transform null values in non-null values, if the code does not explicitly prescribe to remove zeros, maybe the author expects that zero values will become non-null values after the transformation. Makes sense - so I think we could call `sum_duplicates` with `remove_zeros=False` for unary ops, and also specialize `lax.neg_p` to avoid index modification. Also, regarding cosine, I shouldn't have brought that up because unary ops that map zeros to non-zero values are not implemented in sparse (because the result is generally a dense matrix). #13300 should address these issues - thanks for raising them!
2022-11-17T18:14:45
google/jax
13,341
google__jax-13341
[ "7708" ]
42e367af9c58cb67496e235eddf325aa285600e4
diff --git a/jax/_src/prng.py b/jax/_src/prng.py --- a/jax/_src/prng.py +++ b/jax/_src/prng.py @@ -1062,13 +1062,23 @@ def threefry_2x32(keypair, count): def threefry_split(key: jnp.ndarray, num: int) -> jnp.ndarray: - return _threefry_split(key, int(num)) # type: ignore + if config.jax_threefry_partitionable: + return _threefry_split_foldlike(key, int(num)) # type: ignore + else: + return _threefry_split_original(key, int(num)) # type: ignore @partial(jit, static_argnums=(1,), inline=True) -def _threefry_split(key, num) -> jnp.ndarray: +def _threefry_split_original(key, num) -> jnp.ndarray: counts = lax.iota(np.uint32, num * 2) return lax.reshape(threefry_2x32(key, counts), (num, 2)) +@partial(jit, static_argnums=(1,), inline=True) +def _threefry_split_foldlike(key, num) -> jnp.ndarray: + k1, k2 = key + counts1, counts2 = iota_32x2_shape((num,)) + bits1, bits2 = threefry2x32_p.bind(k1, k2, counts1, counts2) + return jnp.stack([bits1, bits2], axis=1) + def threefry_fold_in(key: jnp.ndarray, data: jnp.ndarray) -> jnp.ndarray: assert not data.shape @@ -1177,7 +1187,12 @@ def _rbg_seed(seed: jnp.ndarray) -> jnp.ndarray: return jnp.concatenate([halfkey, halfkey]) def _rbg_split(key: jnp.ndarray, num: int) -> jnp.ndarray: - return vmap(_threefry_split, (0, None), 1)(key.reshape(2, 2), num).reshape(num, 4) + if config.jax_threefry_partitionable: + _threefry_split = _threefry_split_foldlike + else: + _threefry_split = _threefry_split_original + return vmap( + _threefry_split, (0, None), 1)(key.reshape(2, 2), num).reshape(num, 4) def _rbg_fold_in(key: jnp.ndarray, data: jnp.ndarray) -> jnp.ndarray: assert not data.shape
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -367,6 +367,24 @@ def testPRNGValues(self): _prng_key_as_array(random.fold_in(k, 4)), np.array([2285895361, 433833334], dtype='uint32')) + @skipIf(not config.jax_threefry_partitionable, 'enable after upgrade') + def test_threefry_split_fold_in_symmetry(self): + with jax.default_prng_impl('threefry2x32'): + key = random.PRNGKey(72) + folds = jnp.array([random.fold_in(key, i) for i in range(8)]) + splits = random.split(key, 8) + self.assertArraysEqual(folds, splits) + + @skipIf(not config.jax_threefry_partitionable, 'enable after upgrade') + def test_threefry_split_vmapped_fold_in_symmetry(self): + # See https://github.com/google/jax/issues/7708 + with jax.default_prng_impl('threefry2x32'): + key = random.PRNGKey(72) + folds = vmap(lambda k, _: random.fold_in(k, lax.axis_index('batch')), + in_axes=(None, 0), axis_name='batch')(key, jnp.ones(8)) + splits = random.split(key, 8) + self.assertArraysEqual(folds, splits) + @jtu.sample_product([ {"seed": 0, "type": int, "jit": True, "key": [0, 0]}, {"seed": 0, "type": int, "jit": False, "key": [0, 0]},
Split keys over named batch axis A pattern that I find myself using often in jax is defining a `loss`, and then defining an outer `average_loss` that `vmap`s the inner `loss` function and then averages, i.e. ``` def loss(key, params, x): return ... def average_loss(key, params, xs): keys = jax.random.split(key, num=xs.shape[0]) losses = vmap(loss, in_axes=(0, None, 0))(keys, params, xs) return jnp.mean(losses) ``` I can use `fold_in` and a named axis to make this cleaner: ``` def loss(key, params, x): key = random.fold_in(key, lax.axis_index('batch')) ... return jnp.mean(loss, 'batch') average_loss = vmap(loss, in_axes=(None, None, 0), axis_name='batch') ``` But these two approaches don't give the exact same results. It would be nice to have an 'official' way to split keys over batch axes.
Even if we don't dictate an official recipe, we should see if we can make these behave equivalently. It seems like this is because `split` folds twice as many indices as `fold_in`, but it's not explained anywhere. It might have to do with the design of threefry. Any clues @mattjj @hawkinsp? The "2x32" in threefry-2x32 refers to processing two 32-bit inputs (call them "words" for lack of a better word). Our `split` hashes `2 * n` words in order to produce `2 * n` words as `n` new keys. Meanwhile `fold_in` is our own addition to the RNG menu. It currently seems to directly use the threefry-2x32 function as a keyed 32-bit hash. I don't know that this is any more or less valid than the alternative of, say, `fold_in(key, n) == split(key, n + 1)[n]` (ignoring the overflow case where `n` is the u32 max), but it is simple and fast. If we want the latter meaning instead, a question then is how to efficiently compute the n'th split alone. Here's one way: ```python from jax import numpy as jnp from jax._src.prng import threefry_2x32, threefry_split, threefry_seed def ith_fry(key, i, n): return threefry_2x32(key, jnp.array([i, n + i], dtype=jnp.uint32)) def ith_split(key, i, n): j1, j2 = 2 * i, 2 * i + 1 return jnp.array([ith_fry(key, j1 % n, n)[j1 // n], ith_fry(key, j2 % n, n)[j2 // n]]) def nth_split(key, n): return ith_split(key, n, n + 1) ``` Quick check on these: ```python >>> key = threefry_seed(32) >>> split = lambda key, n: jnp.array([ith_split(key, i, n) for i in range(n)]) >>> (threefry_split(key, 10) == split(key, 10)).all() DeviceArray(True, dtype=bool) >>> nth_split_ref = lambda key, n: threefry_split(key, n + 1)[n] >>> all((nth_split(key, n) == nth_split_ref(key, n)).all() for n in range(50)) True ``` We likely have more room to change `split` so that the `nth_split` logic is simpler and faster. Today's `split` feeds threefry pairs of input words whose values are spaced apart by `n`, but it might be fine to give it consecutive counts instead (see `ith_fry` above). We could also rework how we reshape the threefry output in `split` as well. Today we effectively transpose the random words on the way out. That may not be necessary, and changing it would prevent the need for divmod indexing in `ith_split`. Both modifications would remove dependencies on `n`. Anyway, we would need to do something along these lines in order to address the initial request here from @dieterichlawson. We don't only want `fold_in(key, n)` to behave as `split(key, n + 1)[n]` there. What we want is that, for all `n` and `i in range(n)`, `fold_in(key, i)` equals `split(key, n)[i]`. Because `fold_in` is independent of `n`, we'd want an `ith_split` that is independent of it as well.
2022-11-21T22:40:53
google/jax
13,366
google__jax-13366
[ "13283" ]
f341b273feeca1ce6ca168888f8b1e761000b664
diff --git a/jax/_src/custom_batching.py b/jax/_src/custom_batching.py --- a/jax/_src/custom_batching.py +++ b/jax/_src/custom_batching.py @@ -19,6 +19,7 @@ import jax from jax import core from jax import linear_util as lu +from jax import tree_util from jax.interpreters import ad from jax.interpreters import batching from jax.interpreters.batching import not_mapped @@ -66,11 +67,11 @@ def __call__(self, *args, **kwargs): in_avals = [core.raise_to_shaped(core.get_aval(x)) for x in args_flat] debug = pe.debug_info(self.fun, in_tree, False, "custom_vmap") jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals, debug) - assert not len(consts) closed_call = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ()) + in_tree = treedef_tuple((tree_structure(consts), in_tree)) out_flat = custom_vmap_p.bind(*consts, *args_flat, call=closed_call, - rule=self.vmap_rule, + rule=ClosedRule(self.vmap_rule), in_tree=in_tree, out_tree=out_tree()) return tree_unflatten(out_tree(), out_flat) @@ -78,6 +79,21 @@ def __call__(self, *args, **kwargs): ### utils +# Define a class, instead of making a function closing over `rule`, so +# that we can override __str__ +class ClosedRule: + def __init__(self, rule): + functools.update_wrapper(self, rule) + self.rule = rule + + def __call__(self, axis_size, all_in_batched, *all_args): + _, args = all_args + consts_batched, in_batched = all_in_batched + assert not any(tree_util.tree_leaves(consts_batched)), consts_batched + return call_rule(self.rule, axis_size, in_batched, args) + + def __str__(self): + return str(self.rule) def ensure_list(xs): return xs if type(xs) is list else list(xs)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -8805,6 +8805,28 @@ def rule(axis_size, in_batched, xs): ys = api.vmap(f)(xs) self.assertAllClose(ys, jnp.cos(xs)) + def test_closure(self): + z = jnp.array([2., 1., 3.]) + + @api.custom_vmap + def f(x): return z + jnp.sin(x) + + @f.def_vmap + def rule(axis_size, in_batched, *args): + self.assertEqual(len(in_batched), 1) + self.assertEqual(len(args), 1) + xs, = args + xs_batched, = in_batched + self.assertEqual(xs_batched, True) + self.assertEqual(axis_size, xs.shape[0]) + return z + jnp.cos(xs), xs_batched + + x, xs = jnp.array(1.), jnp.arange(3) + y = f(x) + self.assertAllClose(y, z + jnp.sin(x)) + ys = api.vmap(f)(xs) + self.assertAllClose(ys, z + jnp.cos(xs)) + def test_rule_multi_output(self): @api.custom_vmap def f(x): return jnp.sin(x), jnp.cos(x) @@ -8962,6 +8984,36 @@ def rule(axis_size, in_batched, xs): self.assertAllClose(ys, jnp.cos(xs)) self.assertAllClose(tys, -jnp.sin(xs) * txs) + def test_jvp_closure(self): + z = jnp.array([2., 1., 3.]) + def bcast(x): return z + x - z + + @api.custom_vmap + def f(x): return z + jnp.sin(x) + + @f.def_vmap + def rule(axis_size, in_batched, xs): + self.assertEqual(axis_size, 3) + self.assertEqual(in_batched, [True]) + return z + jnp.cos(xs), in_batched[0] + + f_jvp = lambda x, tx: api.jvp(f, [x], [tx]) + + x, tx = jnp.array(1.), jnp.array(2.) + xs, txs = jnp.arange(3.), jnp.arange(3.) * 2. + + y, ty = f_jvp(x, tx) + self.assertAllClose(y, z + jnp.sin(x)) + self.assertAllClose(ty, bcast(jnp.cos(x)) * tx) + + ys, tys = api.vmap(f_jvp)(xs, txs) + self.assertAllClose(ys, z + jnp.cos(xs)) + self.assertAllClose(tys, bcast(-jnp.sin(xs)) * txs) + + ys, tys = api.jvp(api.vmap(f), [xs], [txs]) + self.assertAllClose(ys, z + jnp.cos(xs)) + self.assertAllClose(tys, bcast(-jnp.sin(xs)) * txs) + def test_jvp_nary(self): @api.custom_vmap def f(x, y): return jnp.sin(x) + y
support consts in custom batching rules To save memory in a setup where `jax.checkpoint` was insufficient, I implemented a custom transposition via `jax.custom_derivatives.linear_call`. This worked just fine and yielded the desired savings. However, when `vmap`ing, I hit a road blocker. Unfortunately, `jax.custom_derivative.linear_call` does not implement a batching rule and thus naively applying `vmap` does not work even if the arguments to `linear_call` do support batching. Furthermore, in my case it is not possible to implement the batching manually via `jax.custom_batching.custom_vmap` because `custom_vmap` [does not work with constants in jaxpr](https://github.com/google/jax/blob/3a837c8/jax/_src/custom_batching.py#L69). I think the best approach would be to allow both `linear_call` to be batched and `custom_vmap` to work with constants. Either of the two would unblock me :) Below is a minimal reproducible example: ```python from functools import partial import jax from jax.custom_batching import sequential_vmap from jax.custom_derivatives import linear_call import numpy as np def _mul(residual_args, a, *, c): b, = residual_args c = np.array(c) # needs to be known at trace-time return a * b * c def _mul_T(residual_args, out, *, c): b, = residual_args c = np.array(c) # needs to be known at trace-time return out * b * c def mul(a, b, c): print(a.shape) return linear_call(partial(_mul, c=c), partial(_mul_T, c=c), (b, ), a) a, b, c = np.arange(12, dtype=float), 10, np.array([2., 4.]).reshape(2, 1) m = partial(mul, b=b, c=c) jax.vmap(sequential_vmap(m), in_axes=(0, ))(a) ```
Currently, this can not be circumvented by using `jax.custom_transpose` since it is missing a batching rule too. The current plan is to push ahead with `custom_vmap` and perhaps do away with `linear_call`. Tracking at #9073. Ok, so in this case it would be great if `custom_vmap` would work with constants. Should I file a separate issue for that and close this one? Furthermore, to me #13298 becomes relevant then because there are some things that `linear_call` can do which `custom_transpose` can't (yet). I'll repurpose this issue. We'll certainly need to make sure consts are supported before we consider custom batching complete. It doesn't hurt to have an open issue reminding us to do this, with your particular example registered. Thanks for filing!
2022-11-23T01:20:42
google/jax
13,420
google__jax-13420
[ "13340" ]
cc1d2aaaeda6c0e10ffc66b9fc7f6f446e1bc6db
diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py --- a/jax/experimental/sparse/bcoo.py +++ b/jax/experimental/sparse/bcoo.py @@ -2468,7 +2468,7 @@ def _eye(cls, N, M, k, *, dtype=None, index_dtype='int32', n_batch=0, n_dense=0) return cls.fromdense(jnp.eye(N, M, k, dtype=dtype), n_batch=n_batch, n_dense=n_dense, index_dtype=index_dtype) - k = jnp.asarray(k) + if n_batch == 0: data = jnp.ones(diag_size, dtype=dtype) idx = jnp.arange(diag_size, dtype=index_dtype) diff --git a/jax/experimental/sparse/coo.py b/jax/experimental/sparse/coo.py --- a/jax/experimental/sparse/coo.py +++ b/jax/experimental/sparse/coo.py @@ -110,7 +110,6 @@ def _eye(cls, N, M, k, *, dtype=None, index_dtype='int32'): # if k is out of range, return an empty matrix. return cls._empty((N, M), dtype=dtype, index_dtype=index_dtype) - k = jnp.asarray(k) data = jnp.ones(diag_size, dtype=dtype) idx = jnp.arange(diag_size, dtype=index_dtype) zero = _const(idx, 0) diff --git a/jax/experimental/sparse/csr.py b/jax/experimental/sparse/csr.py --- a/jax/experimental/sparse/csr.py +++ b/jax/experimental/sparse/csr.py @@ -82,7 +82,6 @@ def _eye(cls, N, M, k, *, dtype=None, index_dtype='int32'): # if k is out of range, return an empty matrix. return cls._empty((N, M), dtype=dtype, index_dtype=index_dtype) - k = jnp.asarray(k) data = jnp.ones(diag_size, dtype=dtype) idx = jnp.arange(diag_size, dtype=index_dtype) zero = _const(idx, 0)
diff --git a/tests/sparse_test.py b/tests/sparse_test.py --- a/tests/sparse_test.py +++ b/tests/sparse_test.py @@ -2493,14 +2493,20 @@ def test_empty(self, cls, shape): for k in [-2, 0, 1]) def test_eye(self, cls, N, M, k): sparse_format = cls.__name__.lower() - mat = sparse.eye(N, M, k, sparse_format=sparse_format) + func = partial(sparse.eye, N, M, k, sparse_format=sparse_format) expected = jnp.eye(N, M, k) expected_nse = jnp.count_nonzero(expected) + mat = func() self.assertIsInstance(mat, cls) self.assertArraysEqual(mat.todense(), expected) self.assertEqual(mat.nse, expected_nse) + mat_jit = jit(func)() + self.assertIsInstance(mat_jit, cls) + self.assertArraysEqual(mat_jit.todense(), expected) + self.assertEqual(mat_jit.nse, expected_nse) + @parameterized.named_parameters( {"testcase_name": f"{nse}_BCOO{shape}", "shape": shape, "nse": nse} for shape in ([2, 5], [5, 3])
sparse.eye calls Numpy preventing JIT ### Description ``` from jax import jit, numpy as jnp from jax.experimental.sparse import eye def f(A): n = A.shape[0] return eye(n) A = jnp.ones([3, 3]) f(A) # BCOO(float32[3, 3], nse=3) jit(f)(A) # TracerArrayConversionError ... ``` The problem function is: https://github.com/google/jax/blob/42e367af9c58cb67496e235eddf325aa285600e4/jax/_src/lax/lax.py#L4708-L4713 I think the return could just be changed to `jnp.array`. ### What jax/jaxlib version are you using? v0.3.23 ### Which accelerator(s) are you using? _No response_ ### Additional system info _No response_ ### NVIDIA GPU info _No response_
Thanks for the report - looks like the implementation is a bit sloppy with respect to static and dynamic values. Who wrote this code anyway? [oh no...](https://github.com/google/jax/pull/10782) 😀
2022-11-28T18:54:45
google/jax
13,536
google__jax-13536
[ "12824" ]
1132098c90805f3eb546809fd146991b37f89d36
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -197,6 +197,8 @@ # List of patterns, relative to source directory, that match notebook # files that will not be executed. nb_execution_excludepatterns = [ + # Includes GPU timings that shouldn't be executed by doc build + 'notebooks/quickstart.*', # Slow notebook: long time to load tf.ds 'notebooks/neural_network_with_tfds_data.*', # Slow notebook
Wrong benchmarks in the documentation ### Description In the QuickStart documentation page: https://jax.readthedocs.io/en/latest/notebooks/quickstart.html The indicated benchmarks are as follows (544 ms, 478 ms, 469 ms) as the image below, but the second one should be slower than the two others as it is also indicated in the same image. ![image](https://user-images.githubusercontent.com/66799406/195980887-db5c5e66-9b16-4aa2-81a6-22ab92606daf.png) I have tested this in a Colab free tier with T4 GPU and got the following: ![image](https://user-images.githubusercontent.com/66799406/195980890-c9c4c9d3-98a3-4fca-a769-5cc284054abe.png) I think there is a problem in the reported benchmarks of the documentation. Thank you ### What jax/jaxlib version are you using? 0.3.21 ### Which accelerator(s) are you using? GPU T4 ### Additional system info Colab free tier ### NVIDIA GPU info ``` +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 53C P0 28W / 70W | 14204MiB / 15109MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 2250 C /usr/bin/python3 14201MiB | +-----------------------------------------------------------------------------+ ```
I think the issue is that we're regenerating the documents on a CI node that is probably on CPU and quite slow... What do you think the best fix is: maybe add a disclaimer that timings may vary depending on hardware? Can we fix these and not regenerate them?
2022-12-06T19:38:38
google/jax
13,654
google__jax-13654
[ "13653" ]
a66b3dcdd378b723e275a19258de826260b4c83e
diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py --- a/jax/experimental/sparse/bcoo.py +++ b/jax/experimental/sparse/bcoo.py @@ -400,7 +400,7 @@ def bcoo_extract(indices: Array, mat: Array) -> Array: @bcoo_extract_p.def_impl def _bcoo_extract_impl(indices, mat): mat = jnp.asarray(mat) - n_batch, n_sparse, _, _ = _validate_bcoo_indices(indices, mat.shape) + n_batch, n_sparse, _, nse = _validate_bcoo_indices(indices, mat.shape) ind_slices = tuple(np.zeros(s, int) if i_s == 1 else np.arange(s) for s, i_s in zip(mat.shape[:n_batch], indices.shape[:n_batch])) @@ -412,8 +412,13 @@ def _bcoo_extract_impl(indices, mat): batch_ind = tuple(grid)[:-1] if not sparse_ind + batch_ind: - return mat[None] - return mat.at[batch_ind + sparse_ind].get(mode='fill', fill_value=0) + result = mat[None] + else: + result = mat.at[batch_ind + sparse_ind].get(mode='fill', fill_value=0) + if n_sparse == 0 and nse != 1: + result = lax.broadcast_in_dim( + result, _tuple_replace(result.shape, n_batch, nse), range(result.ndim)) + return result @bcoo_extract_p.def_abstract_eval def _bcoo_extract_abstract_eval(indices, mat):
diff --git a/tests/sparse_test.py b/tests/sparse_test.py --- a/tests/sparse_test.py +++ b/tests/sparse_test.py @@ -904,6 +904,17 @@ def test_bcoo_extract_ad(self, shape, dtype, n_batch, n_dense): self.assertEqual(j1.shape, data.shape + M.shape) self.assertEqual(hess.shape, data.shape + 2 * M.shape) + def test_bcoo_extract_zero_nse(self): + # Regression test for https://github.com/google/jax/issues/13653 + + # (n_batch, n_sparse, n_dense) = (1, 0, 0), nse = 2 + args_maker = lambda: (jnp.zeros((3, 2, 0), dtype='int32'), jnp.arange(3)) + self._CompileAndCheck(sparse.bcoo_extract, args_maker) + + # (n_batch, n_sparse, n_dense) = (0, 0, 1), nse = 2 + args_maker = lambda: (jnp.zeros((2, 0), dtype='int32'), jnp.arange(3)) + self._CompileAndCheck(sparse.bcoo_extract, args_maker) + @jtu.sample_product( [dict(shape=shape, n_batch=n_batch, n_dense=n_dense) for shape in [(), (5,), (5, 8), (3, 4, 5), (3, 4, 3, 2)]
[sparse] bcoo_extract incorrect for zero sparse dimensions Found in the course of #13624 Minimal repro: ```python import jax.numpy as jnp from jax.experimental import sparse mat = jnp.zeros(3, dtype='float32') indices = jnp.zeros((3, 2, 0), dtype='int32') jax.jit(sparse.bcoo_extract)(indices, mat) # Error ```
2022-12-14T20:19:31
google/jax
13,884
google__jax-13884
[ "13875" ]
982a25703e8c480d87b382a7e044d97e4abb7125
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -29,12 +29,12 @@ _libtpu_version = '0.1.dev20221212' _dct = {} -with open('jax/version.py') as f: +with open('jax/version.py', encoding='utf-8') as f: exec(f.read(), _dct) __version__ = _dct['__version__'] _minimum_jaxlib_version = _dct['_minimum_jaxlib_version'] -with open('README.md') as f: +with open('README.md', encoding='utf-8') as f: _long_description = f.read() if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
pip install failed and got UnicodeDecodeError for jax>=0.3.15 on Windows, Python 3.9.13 ### Description I got `UnicodeDecodeError` when running pip install for jax `0.3.15` on Windows 10: ```cmd PS C:\tensorflow> pip install --no-cache-dir jax==0.3.15 Collecting jax==0.3.15 Downloading jax-0.3.15.tar.gz (1.0 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.0/1.0 MB 3.5 MB/s eta 0:00:00 Preparing metadata (setup.py) ... error error: subprocess-exited-with-error × python setup.py egg_info did not run successfully. │ exit code: 1 ╰─> [6 lines of output] Traceback (most recent call last): File "<string>", line 2, in <module> File "<pip-setuptools-caller>", line 34, in <module> File "C:\Users\Jason Wu\AppData\Local\Temp\pip-install-5rpko2f6\jax_4375f8ec13e046228768476f1ee3a194\setup.py", line 33, in <module> _long_description = f.read() UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1304: illegal multibyte sequence [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: metadata-generation-failed × Encountered error while generating package metadata. ╰─> See above for output. note: This is an issue with the package mentioned above, not pip. hint: See above for details. ``` Also I tried `0.4.1` and `0.3.25`, both got the same error: ```cmd PS C:\tensorflow> pip install --no-cache-dir jax==0.4.1 Collecting jax==0.4.1 Downloading jax-0.4.1.tar.gz (1.2 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 3.6 MB/s eta 0:00:00 Preparing metadata (setup.py) ... error error: subprocess-exited-with-error × python setup.py egg_info did not run successfully. │ exit code: 1 ╰─> [6 lines of output] Traceback (most recent call last): File "<string>", line 2, in <module> File "<pip-setuptools-caller>", line 34, in <module> File "C:\Users\Jason Wu\AppData\Local\Temp\pip-install-trctdimm\jax_3d064f6acc4d4a0b91bcebef44bb0eb7\setup.py", line 38, in <module> _long_description = f.read() UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1301: illegal multibyte sequence [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: metadata-generation-failed × Encountered error while generating package metadata. ╰─> See above for output. note: This is an issue with the package mentioned above, not pip. hint: See above for details. ``` ```cmd PS C:\tensorflow> pip install --no-cache-dir jax==0.3.25 Collecting jax==0.3.25 Downloading jax-0.3.25.tar.gz (1.1 MB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.1/1.1 MB 4.2 MB/s eta 0:00:00 Preparing metadata (setup.py) ... error error: subprocess-exited-with-error × python setup.py egg_info did not run successfully. │ exit code: 1 ╰─> [6 lines of output] Traceback (most recent call last): File "<string>", line 2, in <module> File "<pip-setuptools-caller>", line 34, in <module> File "C:\Users\Jason Wu\AppData\Local\Temp\pip-install-h_ymbbfo\jax_9f7e4365d3a24fb4bb4e74d1b86678e6\setup.py", line 38, in <module> _long_description = f.read() UnicodeDecodeError: 'cp950' codec can't decode byte 0xe2 in position 1304: illegal multibyte sequence [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: metadata-generation-failed × Encountered error while generating package metadata. ╰─> See above for output. note: This is an issue with the package mentioned above, not pip. hint: See above for details. ``` It's worth noting that `0.3.14` installed without issue: ```cmd PS C:\tensorflow> pip install --no-cache-dir jax==0.3.14 Collecting jax==0.3.14 Downloading jax-0.3.14.tar.gz (990 kB) ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 990.1/990.1 kB 2.6 MB/s eta 0:00:00 Preparing metadata (setup.py) ... done Requirement already satisfied: absl-py ... ... Building wheels for collected packages: jax Building wheel for jax (setup.py) ... done Created wheel for jax: filename=jax-0.3.14-py3-none-any.whl size=1147576 sha256=ae0aadd2688c543930de9ba7e31e58b3363a8752ae25e4770941b53e1c3476d6 Stored in directory: C:\Users\Jason Wu\AppData\Local\Temp\pip-ephem-wheel-cache-q92x7zxq\wheels\3d\22\cf\75c40ec058e8d0f9b8427cd35366e1fd618475451fc8fc6fd7 Successfully built jax Installing collected packages: jax Successfully installed jax-0.3.14 ``` Since I'm building Tensorflow from source, it requires Jax > `0.3.15`. Please let me know if there is any other information that needs to be provided. thanks! ### What jax/jaxlib version are you using? jax v0.3.15 ### Which accelerator(s) are you using? CPU ### Additional system info Python 3.9.13, pip 22.3.1, Windows 10, ### NVIDIA GPU info _No response_
The issue is that Windows default string encoding varies by platform. We should be able to fix this by specifying `utf8` explicitly in the `setup.py` file, but until then you should be able to run pip install if you first set your local string encoding via an environment variable: ``` export LC_ALL="en_US.UTF-8" ``` (see https://stackoverflow.com/questions/25036897/pip-install-unicodedecodeerror) Note, however, that the JAX team does not distribute jaxlib wheels for Windows, so even if you `pip install jax` you'll need to either compile jaxlib or find another source for them if you want to use JAX on Windows. See https://github.com/google/jax#installation for more information & resources.
2023-01-05T17:34:59
google/jax
13,982
google__jax-13982
[ "13981" ]
f7c915e6a239b87b1e4f35710adb35014cdc940f
diff --git a/jax/_src/numpy/index_tricks.py b/jax/_src/numpy/index_tricks.py --- a/jax/_src/numpy/index_tricks.py +++ b/jax/_src/numpy/index_tricks.py @@ -54,7 +54,11 @@ def __getitem__(self, key: Union[slice, Tuple[slice, ...]]) -> Union[Array, List with jax.numpy_dtype_promotion('standard'): output = _promote_dtypes(*output) output_arr = jnp.meshgrid(*output, indexing='ij', sparse=self.sparse) - return output_arr if self.sparse else jnp.stack(output_arr, 0) + if self.sparse: + return output_arr + if len(output_arr) == 0: + return jnp.arange(0) + return jnp.stack(output_arr, 0) class _Mgrid(_IndexGrid):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -4353,6 +4353,7 @@ def testMgrid(self): # wrap indexer for appropriate dtype defaults. np_mgrid = _indexer_with_default_outputs(np.mgrid) assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0) + assertAllEqual(np_mgrid[()], jnp.mgrid[()]) assertAllEqual(np_mgrid[:4], jnp.mgrid[:4]) assertAllEqual(np_mgrid[:4,], jnp.mgrid[:4,]) assertAllEqual(np_mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())
Zero-length indices in `jax.numpy.mgrid` ### Description ```python np.mgrid[()] # yields an integer type array of zero length jnp.mgrid[()] # raises a ValueError because it tries to stack a tuple of zero length ``` ### What jax/jaxlib version are you using? jax commit f729da4a368e9, jaxlib 0.4.1 ### Which accelerator(s) are you using? CPU/GPU ### Additional system info python 3.10.9, Linux (Arch Linux) ### NVIDIA GPU info ``` | NVIDIA-SMI 525.60.13 Driver Version: 525.60.13 CUDA Version: 12.0 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 NVIDIA GeForce ... On | 00000000:01:00.0 Off | N/A | | 0% 40C P8 25W / 350W | 17MiB / 24576MiB | 0% Default | | | | N/A | ```
2023-01-12T12:05:13
google/jax
14,020
google__jax-14020
[ "13983" ]
8f538f95dcb56a68dbeb9fe9a44d90e82b3f0757
diff --git a/jax/experimental/ode.py b/jax/experimental/ode.py --- a/jax/experimental/ode.py +++ b/jax/experimental/ode.py @@ -90,7 +90,7 @@ def initial_step_size(fun, t0, y0, order, rtol, atol, f0): h1 = jnp.where((d1 <= 1e-15) & (d2 <= 1e-15), jnp.maximum(1e-6, h0 * 1e-3), - (0.01 / jnp.max(d1 + d2)) ** (1. / (order + 1.))) + (0.01 / jnp.maximum(d1, d2)) ** (1. / (order + 1.))) return jnp.minimum(100. * h0, h1)
Typo in function `initial_step_size` in `jax/experimental/ode.py` ### Description The function `initial_step_size` is based from an algorithm in [this book](http://mezbanhabibi.ir/wp-content/uploads/2020/01/ordinary-differential-equations-vol.1.-Nonstiff-problems.pdf) (page 169). This is the algorithm: <img width="486" alt="image" src="https://user-images.githubusercontent.com/49232747/212095806-ce29dcd6-40f1-4bc1-af54-f567330c0acb.png"> In step `e`, we need to compute `max(d1, d2)`, but in the `initial_step_size` function you compute `max(d1 + d2)`. ### What jax/jaxlib version are you using? jax v0.4.1, jaxlib v0.4.1 ### Which accelerator(s) are you using? CPU ### Additional system info _No response_ ### NVIDIA GPU info _No response_
Thanks for noticing this and bringing it to our attention! Amazing!
2023-01-15T04:56:39
google/jax
14,027
google__jax-14027
[ "14026" ]
c4d21f97ea0db8da4eceab31bf1b3756b1d04626
diff --git a/jax/_src/lax/control_flow/conditionals.py b/jax/_src/lax/control_flow/conditionals.py --- a/jax/_src/lax/control_flow/conditionals.py +++ b/jax/_src/lax/control_flow/conditionals.py @@ -687,7 +687,9 @@ def transposed(*args): return _make_closed_jaxpr(transposed, res_avals + jaxpr.out_avals) def _cond_transpose(reduce_axes, cts, *args, branches, linear): + del linear # could use for error checking, but see #14026 index, *ops = args + linear = [type(x) is ad.UndefinedPrimal for x in ops] in_avals = map(raise_to_shaped, branches[0].in_avals) num_res = len(ops) - sum(linear) if any(isinstance(eff, state.RefEffect) for branch in branches for eff in
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -2564,5 +2564,45 @@ def body(c, _): jax.grad(f)(1.) # doesn't crash + def test_custom_jvp_tangent_cond_transpose(self): + # https://github.com/google/jax/issues/14026 + def mask_fun(arr, choice): + out = (1 - choice) * arr.sum() + choice * (1 - arr.sum()) + return out + + def switch_fun(arr, choice): + choice = jnp.floor(choice).astype(jnp.int32) + out = jax.lax.switch(choice, [lambda x: x.sum(), lambda x: 1 - x.sum()], arr) + return out + + test_arr = jnp.arange(3.) + test_val = 0. + + expected1 = jax.grad(mask_fun)(test_arr, test_val) + expected2 = jax.grad(switch_fun)(test_arr, test_val) + + def good_switchfun_jvp(primals, tangents): + arr, choice = primals + arr_dot, choice_dot = tangents + return switch_fun(arr, choice), mask_fun(arr_dot, choice) + + def bad_switchfun_jvp(primals, tangents): + arr, choice = primals + arr_dot, choice_dot = tangents + return switch_fun(arr, choice), switch_fun(arr_dot, choice) + + good_custom_switchfun = jax.custom_jvp(switch_fun) + good_custom_switchfun.defjvp(good_switchfun_jvp) + expected3 = jax.grad(good_custom_switchfun)(test_arr, test_val) + + bad_custom_switchfun = jax.custom_jvp(switch_fun) + bad_custom_switchfun.defjvp(bad_switchfun_jvp) + actual = jax.grad(bad_custom_switchfun)(test_arr, test_val) + + self.assertAllClose(expected1, expected2) + self.assertAllClose(expected2, expected3) + self.assertAllClose(expected3, actual) + + if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
custom_jvp + cond on tangent side = assertion failure ### Description from @bunelr ```python import jax import jax.numpy as jnp def mask_fun(arr, choice): out = (1 - choice) * arr.sum() + choice * (1 - arr.sum()) return out def switch_fun(arr, choice): choice = jnp.floor(choice).astype(jnp.int32) out = jax.lax.switch(choice, [lambda x: x.sum(), lambda x: 1 - x.sum()], arr) return out test_arr = jnp.arange(3.) test_val = 0. print(mask_fun(test_arr, test_val)) # 3.0, all good print(switch_fun(test_arr, test_val)) # 3.0, all good print(jax.grad(mask_fun)(test_arr, test_val)) # -> 1. everywhere -> good print(jax.grad(switch_fun)(test_arr, test_val)) # -> 1. everywhere -> good def good_switchfun_jvp(primals, tangents): arr, choice = primals arr_dot, choice_dot = tangents return switch_fun(arr, choice), mask_fun(arr_dot, choice) def bad_switchfun_jvp(primals, tangents): arr, choice = primals arr_dot, choice_dot = tangents # tangent_out is different, goes through the switch implementation. return switch_fun(arr, choice), switch_fun(arr_dot, choice) good_custom_switchfun = jax.custom_jvp(switch_fun) good_custom_switchfun.defjvp(good_switchfun_jvp) print(jax.grad(good_custom_switchfun)(test_arr, test_val)) # 1. everywhere, good bad_custom_switchfun = jax.custom_jvp(switch_fun) bad_custom_switchfun.defjvp(bad_switchfun_jvp) print(jax.grad(bad_custom_switchfun)(test_arr, test_val)) # ASSERTION ERROR ```
This variant of the repro works: ```python import jax import jax.numpy as jnp jax.config.update('jax_enable_checks', True) def mask_fun(arr, choice):   out = (1 - choice) * arr.sum() +  choice * (1 - arr.sum())   return out def switch_fun(arr, choice):   choice = jnp.floor(choice).astype(jnp.int32)   out = jax.lax.switch(choice, [lambda x: x.sum(), lambda x: 1 - x.sum()], arr)   return out def switch_fun_linear(arr, choice):   choice = jnp.floor(choice).astype(jnp.int32)   from jax._src.lax.control_flow.conditionals import _cond   out = _cond(choice, lambda x: 1 - x.sum(), lambda x: x.sum(), arr,               linear=(True,))   return out test_arr = jnp.arange(3.) test_val = 0. print(mask_fun(test_arr, test_val))     # 3.0, all good print(switch_fun(test_arr, test_val))   # 3.0, all good print(jax.grad(mask_fun)(test_arr, test_val))   # -> 1. everywhere -> good print(jax.grad(switch_fun)(test_arr, test_val)) # -> 1. everywhere -> good def good_switchfun_jvp(primals, tangents):   arr, choice = primals   arr_dot, choice_dot = tangents   return switch_fun(arr, choice), mask_fun(arr_dot, choice) def bad_switchfun_jvp(primals, tangents):   arr, choice = primals   arr_dot, choice_dot = tangents   # tangent_out is different, goes through the switch implementation.   return switch_fun(arr, choice), switch_fun_linear(arr_dot, choice) good_custom_switchfun = jax.custom_jvp(switch_fun) good_custom_switchfun.defjvp(good_switchfun_jvp) print(jax.grad(good_custom_switchfun)(test_arr, test_val)) # 1. everywhere, good bad_custom_switchfun = jax.custom_jvp(switch_fun) bad_custom_switchfun.defjvp(bad_switchfun_jvp) print(jax.grad(bad_custom_switchfun)(test_arr, test_val)) # works now! ``` for control flow primitives like switch or cond, jax internally tracks in which inputs functions are guaranteed to be linear when the function is produced from jax's jvp autodiff pass. that linearity information is then useful when transposing. originally (with scan, the first differentiable control flow operation) i think we only meant for that linearity information to be used in error-checking. but i think for cond we may have accidentally made it more load-bearing. when using custom_jvp, rather than jax's internal jvp of switch, this linearity information isn't added by the jvp pass, and so downstream transposition fails the above version of the repro works because i've made a switch_fun_linear variant of the switch_fun function to apply to the arr_dot values, which are linear inputs, and in switch_fun_linear i'm setting the linearity info explicitly. (i don't think the option is plumbed through to switch, so i used an internal cond.) this isn't the fix! manually annotating linearity information shouldn't be necessary. but i just wanted to test the hypothesis that somehow switch not getting linearity information was the problem
2023-01-16T18:30:48
google/jax
14,059
google__jax-14059
[ "14058" ]
a37121e19512ea5ee0ad523eda39fa5bbd8c5442
diff --git a/jax/_src/core.py b/jax/_src/core.py --- a/jax/_src/core.py +++ b/jax/_src/core.py @@ -26,7 +26,7 @@ from operator import attrgetter import threading import types -from typing import (Any, Callable, ClassVar, DefaultDict, Dict, +from typing import (Any, Callable, ClassVar, DefaultDict, Dict, FrozenSet, Generator, Hashable, Iterable, Iterator, List, NamedTuple, Optional, Sequence, Set, Tuple, Type, Union, cast) @@ -2444,6 +2444,13 @@ def subst_axis_names_jaxpr(jaxpr: Union[Jaxpr, ClosedJaxpr], subst: AxisSubst): return jaxpr return do_subst_axis_names_jaxpr(jaxpr, subst) +def replace_jaxpr_effects(jaxpr: ClosedJaxpr, effects: Effects): + return _replace_jaxpr_effects(jaxpr, frozenset(effects)) + +@weakref_lru_cache +def _replace_jaxpr_effects(jaxpr: ClosedJaxpr, effects: FrozenSet[Effect]): + return jaxpr.replace(jaxpr=jaxpr.jaxpr.replace(effects=set(effects))) + axis_substitution_rules: Dict[Primitive, Callable[[ParamDict, AxisSubst, bool], ParamDict]] = {} diff --git a/jax/_src/lax/control_flow/conditionals.py b/jax/_src/lax/control_flow/conditionals.py --- a/jax/_src/lax/control_flow/conditionals.py +++ b/jax/_src/lax/control_flow/conditionals.py @@ -32,6 +32,7 @@ from jax.interpreters import xla from jax.tree_util import tree_flatten, tree_unflatten from jax._src import ad_util +from jax._src.core import replace_jaxpr_effects from jax._src import dtypes from jax._src import source_info_util from jax._src import util @@ -255,10 +256,8 @@ def cond(pred, true_fun, false_fun, *operands): # Raise index in case of effects to allow data-dependence-based discharging # of those effects (even if they don't have an explicit data dependence). index = core.raise_as_much_as_possible(index) - false_jaxpr = false_jaxpr.replace( - jaxpr=false_jaxpr.jaxpr.replace(effects=joined_effects)) - true_jaxpr = true_jaxpr.replace( - jaxpr=true_jaxpr.jaxpr.replace(effects=joined_effects)) + false_jaxpr = replace_jaxpr_effects(false_jaxpr, joined_effects) + true_jaxpr = replace_jaxpr_effects(true_jaxpr, joined_effects) linear = [False] * len(consts) + linear_ops out = cond_p.bind(
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -2400,6 +2400,21 @@ def f_jvp(primals, tangents): x = np.arange(3, dtype='float32') jax.jvp(g, (x,), (x,)) # doesn't crash + def test_cond_excessive_compilation(self): + # Regression test for https://github.com/google/jax/issues/14058 + def f(x): + return x + 1 + + def g(x): + return x + 2 + + with jtu.count_jit_and_pmap_compiles() as count: + for x in range(10): + lax.cond(x, f, g, x) + # Should observe a maximum of 4 compiles: convert_element_type, f, g, cond + # In #14058, this was observed to be 31 compiles. + self.assertLess(count[0], 5) + @parameterized.named_parameters( {"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype} for dtype in jtu.dtypes.all_integer)
lax.cond leads to excessive compilation in JAX 0.4.1 Reported in https://github.com/google/jax/discussions/14032#discussioncomment-4718316. Minimal reproduction: ```python import jax import jaxlib from jax import lax print(jaxlib.__version__) print(jax.__version__) jax.config.update('jax_log_compiles', True) def f(x): return x + 1 def g(x): return x + 2 for x in range(10): lax.cond(x, f, g, x) ``` Output in JAX v0.3.25: ``` 0.3.25 0.3.25 WARNING:jax._src.dispatch:Finished tracing + transforming prim_fun for jit in 0.0018656253814697266 sec WARNING:jax._src.dispatch:Compiling prim_fun (139732125277056 for args (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). WARNING:jax._src.dispatch:Finished XLA compilation of cond in 0.013795614242553711 sec ``` Output in JAX v0.4.1: ``` 0.4.1 0.4.1 WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0008955001831054688 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230252032) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.02367258071899414 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0006630420684814453 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230252112) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.023768901824951172 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0007023811340332031 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230322960) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.018657445907592773 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.000583648681640625 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230251872) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.0319209098815918 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.01588129997253418 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230325360) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.020395517349243164 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0006628036499023438 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230323680) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.018640756607055664 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0006759166717529297 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230322400) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.017685890197753906 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0006914138793945312 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230369536) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.018553495407104492 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0007042884826660156 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230370576) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.03286623954772949 sec WARNING:jax._src.dispatch:Finished tracing + transforming jit(cond) in 0.0027599334716796875 sec WARNING:jax.interpreters.pxla:Compiling prim_fun (139864230367856) for with global shapes and types (ShapedArray(int32[]), ShapedArray(int32[], weak_type=True)). Argument mapping: (OpShardingSharding({replicated}), OpShardingSharding({replicated})). WARNING:jax._src.dispatch:Finished XLA compilation of jit(cond) in 0.02003955841064453 sec ``` Assigning to @yashk2810 because it's possible this is somehow related to the jax.Array change
Actually, I just checked and `config.jax_array` has no effect on this. Maybe try setting `jax_experimental_subjaxpr_lowering_cache` config to True and see what happens? `jax_experimental_subjaxpr_lowering_cache` doesn't seem to affect it either way. The root cause seems to be #12877, which added these lines https://github.com/google/jax/blob/a37121e19512ea5ee0ad523eda39fa5bbd8c5442/jax/_src/lax/control_flow/conditionals.py#L258-L261 Commenting them out fixes the issue.
2023-01-18T17:51:01
google/jax
14,175
google__jax-14175
[ "14171" ]
d2845f264f283897ad9cb8cf742fcd41bd303f5b
diff --git a/docs/autodidax.py b/docs/autodidax.py --- a/docs/autodidax.py +++ b/docs/autodidax.py @@ -1567,7 +1567,8 @@ def xla_callable(hashable_jaxpr: IDHashable, xla_params = _xla_params(c, in_avals) outs = jaxpr_subcomp(c, jaxpr, xla_consts + xla_params) out = xops.Tuple(c, outs) - compiled = xb.get_backend(None).compile(c.build(out)) + compiled = xb.get_backend(None).compile( + xc._xla.mlir.xla_computation_to_mlir_module(c.build(out))) return partial(execute_compiled, compiled, [v.aval for v in jaxpr.outs]) def _xla_consts(c: xe.XlaBuilder, consts: List[Any]) -> List[xe.XlaOp]:
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -20,7 +20,9 @@ import enum from functools import partial import inspect +import importlib import operator +import os import platform import re import subprocess @@ -9509,5 +9511,17 @@ def test_print_environment_info(self, return_string): assert f"jaxlib: {lib.version_str}" in result assert f"numpy: {np.__version__}" in result +class AutodidaxTest(jtu.JaxTestCase): + def test_autodidax_smoketest(self): + autodidax_file = os.path.join( + os.path.dirname(os.path.dirname(__file__)), + 'docs', + 'autodidax.py') + if not os.path.exists(autodidax_file): + self.skipTest("Cannot locate autodidax.py") + spec = importlib.util.spec_from_file_location('autodidax', 'docs/autodidax.py') + autodidax_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(autodidax_module) + if __name__ == '__main__': absltest.main(testLoader=jtu.JaxTestLoader())
autodidax fails with jaxlib 0.4.2 After updating to jaxlib 0.4.2, `python docs/autodidax.py` fails on this line: https://github.com/google/jax/blob/d2845f264f283897ad9cb8cf742fcd41bd303f5b/docs/autodidax.py#L1570 with the following traceback: ```pytb Traceback (most recent call last): File "docs/autodidax.py", line 1675, in <module> z = f(3., 4.) # 'tracing!' prints the first time File "docs/autodidax.py", line 1510, in f_jitted outs = bind(xla_call_p, *consts, *args, jaxpr=jaxpr, num_consts=len(consts)) File "docs/autodidax.py", line 327, in bind outs = top_trace.process_primitive(prim, tracers, params) File "docs/autodidax.py", line 412, in process_primitive return impl_rules[primitive](*tracers, **params) File "docs/autodidax.py", line 1554, in xla_call_impl execute = xla_callable(IDHashable(jaxpr), hashable_consts) File "docs/autodidax.py", line 1570, in xla_callable compiled = xb.get_backend(None).compile(c.build(out)) TypeError: compile(): incompatible function arguments. The following argument types are supported: 1. (self: jaxlib.xla_extension.Client, computation: str, compile_options: jaxlib.xla_extension.CompileOptions = <jaxlib.xla_extension.CompileOptions object at 0x7f84b1946330>, host_callbacks: List[capsule] = []) -> StatusOr[xla::PyLoadedExecutable] Invoked with: <jaxlib.xla_extension.Client object at 0x7f84b3a6cbf0>, <jaxlib.xla_extension.XlaComputation object at 0x7f84b3a727f0> ```
2023-01-27T01:24:03
google/jax
14,235
google__jax-14235
[ "14193" ]
957adbd5ead4bd57c981cdaf1241066cd5e3136c
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -3632,23 +3632,24 @@ def unpackbits(a, axis: Optional[int] = None, count=None, bitorder='big'): @_wraps(np.take, skip_params=['out'], lax_description=""" -The JAX version adds several extra parameters, described below, which are forwarded -to :func:`jax.lax.gather` for finer control over indexing.""", +By default, JAX assumes that all indices are in-bounds. Alternative out-of-bound +index semantics can be specified via the ``mode`` parameter (see below). +""", extra_params=""" mode : string, default="fill" Out-of-bounds indexing mode. The default mode="fill" returns invalid values - (e.g. NaN) for out-of bounds indices. See :attr:`jax.numpy.ndarray.at` for - more discussion of out-of-bounds indexing in JAX. + (e.g. NaN) for out-of bounds indices (see also ``fill_value`` below). + For more discussion of mode options, see :attr:`jax.numpy.ndarray.at`. +fill_value : optional + The fill value to return for out-of-bounds slices when mode is 'fill'. Ignored + otherwise. Defaults to NaN for inexact types, the largest negative value for + signed types, the largest positive value for unsigned types, and True for booleans. unique_indices : bool, default=False If True, the implementation will assume that the indices are unique, which can result in more efficient execution on some backends. indices_are_sorted : bool, default=False If True, the implementation will assume that the indices are sorted in ascending order, which can lead to more efficient execution on some backends. -fill_value : optional - The fill value to return for out-of-bounds slices when mode is 'fill'. Ignored - otherwise. Defaults to NaN for inexact types, the largest negative value for - signed types, the largest positive value for unsigned types, and True for booleans. """) def take(a, indices, axis: Optional[int] = None, out=None, mode=None, unique_indices=False, indices_are_sorted=False, fill_value=None): @@ -5168,9 +5169,8 @@ class _IndexUpdateHelper: in which conflicting updates are applied is implementation-defined and may be nondeterministic (e.g., due to concurrency on some hardware platforms). - By default, JAX assumes that all indices are in-bounds. There is experimental - support for giving more precise semantics to out-of-bounds indexed accesses, - via the ``mode`` parameter (see below). + By default, JAX assumes that all indices are in-bounds. Alternative out-of-bound + index semantics can be specified via the ``mode`` parameter (see below). Arguments ---------
jax.numpy.take returns large negative number when given out of range index ### Description If I use `jax.numpy.take` to try to retrieve an index out of bounds, I get a large negative number: ```python import jax.numpy as jnp arr = jnp.arange(5) print(jnp.take(arr, 10)) # -2147483648 ``` I know that out-of-bounds indexing doesn't error for performance/implementation reasons, but is the value `-2147483648` really a great choice? I think it might be a bit unexpected, e.g. the [documentation](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#out-of-bounds-indexing) says: > the index is clamped to the bounds of the array since something must be returned But it seems that the index here isn't being clamped. ### What jax/jaxlib version are you using? jax v0.4.2, jaxlib v0.4.2 ### Which accelerator(s) are you using? TPU ### Additional system info Python 3.8.10, TPU VM v3-8 ### NVIDIA GPU info _No response_
Thanks for the report – this is expected behavior, though I understand it's somewhat unintuitive. There's some discussion of why JAX doesn' raise errors on out-of-bound indices here: http://go/jax-sharp-bits#out-of-bounds-indexing So in this case, JAX must return some value of the specified dtype to indicate that the index was out-of-bounds. For float arrays, JAX will return `NaN`. But for int arrays, there is no equivalent of a `NaN` value, so for lack of a better choice JAX returns the minimum representable value. You can control this behavior using the `fill_value` and `mode` arguments, as discussed in the [`jnp.take` docstring](http://go/jax-docs/_autosummary/jax.numpy.take.html): > - mode (string, default="fill") – Out-of-bounds indexing mode. The default mode=”fill” returns invalid values (e.g. NaN) for out-of bounds indices. See [jax.numpy.ndarray.at](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.at.html#jax.numpy.ndarray.at) for more discussion of out-of-bounds indexing in JAX. > ... > - fill_value (optional) – The fill value to return for out-of-bounds slices when mode is ‘fill’. Ignored otherwise. Defaults to NaN for inexact types, the largest negative value for signed types, the largest positive value for unsigned types, and True for booleans. I see, thanks for the explanation. I missed those docs, as I just read the main docstring anticipating it would discuss the out-of-bounds behaviour. Maybe we could add something commenting on this behaviour in the main docstring? Something like the note in the docstring for [`jax.numpy.ndarray.at`](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.at.html#jax.numpy.ndarray.at): > By default, JAX assumes that all indices are in-bounds. There is experimental support for giving more precise semantics to out-of-bounds indexed accesses, via the mode parameter (see below).
2023-01-31T23:54:48
google/jax
14,355
google__jax-14355
[ "14339" ]
55c2b6dad692381083fd03da41718c9fd0d36668
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py --- a/jax/_src/numpy/lax_numpy.py +++ b/jax/_src/numpy/lax_numpy.py @@ -2673,11 +2673,35 @@ def wrapper(*args, **kwargs): return tuple(asarray(x) for x in f(*args, **kwargs)) return wrapper -tril_indices = _wrap_indices_function(np.tril_indices) -triu_indices = _wrap_indices_function(np.triu_indices) mask_indices = _wrap_indices_function(np.mask_indices) +def _triu_size(n, m, k): + if k < 0: + return n * m - _triu_size(m, n, (1 - k)) + elif k >= m: + return 0 + else: + mk = _min(n, m - k) + return mk * (mk + 1) // 2 + mk * (m - k - mk) + + +@_wraps(np.triu_indices) +def triu_indices(n, k, m=None): + n = core.concrete_or_error(operator.index, n, "n argument of jnp.triu_indices") + k = core.concrete_or_error(operator.index, k, "k argument of jnp.triu_indices") + m = n if m is None else core.concrete_or_error(operator.index, m, "m argument of jnp.triu_indices") + return nonzero(triu(ones((n, m)), k=k), size=_triu_size(n, m, k)) + + +@_wraps(np.tril_indices) +def tril_indices(n, k, m=None): + n = core.concrete_or_error(operator.index, n, "n argument of jnp.triu_indices") + k = core.concrete_or_error(operator.index, k, "k argument of jnp.triu_indices") + m = n if m is None else core.concrete_or_error(operator.index, m, "m argument of jnp.triu_indices") + return nonzero(tril(ones((n, m)), k=k), size=_triu_size(m, n, -k)) + + @_wraps(np.triu_indices_from) def triu_indices_from(arr: ArrayLike, k: int = 0) -> Tuple[Array]: arr_shape = shape(arr)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1992,9 +1992,9 @@ def testTriLU(self, dtype, shape, op, k): self._CompileAndCheck(jnp_fun, args_maker) @jtu.sample_product( - n=range(1, 5), - k=[-1, 0, 1], - m=range(1, 5), + n=range(5), + k=range(-3, 3), + m=[None, *range(5)], ) def testTrilIndices(self, n, k, m): np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m) @@ -2003,9 +2003,9 @@ def testTrilIndices(self, n, k, m): self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker) @jtu.sample_product( - n=range(1, 5), - k=[-1, 0, 1], - m=range(1, 5), + n=range(5), + k=range(-3, 3), + m=[None, *range(5)], ) def testTriuIndices(self, n, k, m): np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)
Implement `jnp.tril_indices`, `jnp.triu_indices` etc. in JAX. Implement `jnp.tril_indices` etc. using XLA primitives instead of wrapping `numpy` implementations, so we don't materialise their output while staging.
2023-02-08T17:54:24
google/jax
14,365
google__jax-14365
[ "14258" ]
2d47921a3485fe5752342c7d126fe898f20f7ef3
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py --- a/jax/_src/numpy/reductions.py +++ b/jax/_src/numpy/reductions.py @@ -72,7 +72,7 @@ def _reduction(a: ArrayLike, name: str, np_fun: Any, op: ReductionOp, init_val: axis: Axis = None, dtype: DTypeLike = None, out: None = None, keepdims: bool = False, initial: Optional[ArrayLike] = None, where_: Optional[ArrayLike] = None, - parallel_reduce: Optional[Callable[..., ArrayLike]] = None, + parallel_reduce: Optional[Callable[..., Array]] = None, promote_integers: bool = False) -> Array: bool_op = bool_op or op # Note: we must accept out=None as an argument, because numpy reductions delegate to @@ -131,7 +131,12 @@ def _reduction(a: ArrayLike, name: str, np_fun: Any, op: ReductionOp, init_val: else: result = lax.reduce(a, init_val, op, dims) if initial is not None: - result = op(lax.convert_element_type(initial, _asarray(a).dtype), result) + # TODO(jakevdp) require initial to be a scalar in order to match the numpy API. + initial_arr = lax.convert_element_type(initial, _asarray(a).dtype) + if lax.broadcast_shapes(initial_arr.shape, result.shape) != result.shape: + raise ValueError(f"initial value has invalid shape {initial_arr.shape} " + f"for reduction with output shape {result.shape}") + result = op(initial_arr, result) if keepdims: result = lax.expand_dims(result, pos_dims) return lax.convert_element_type(result, dtype or result_dtype)
diff --git a/tests/lax_numpy_reducers_test.py b/tests/lax_numpy_reducers_test.py --- a/tests/lax_numpy_reducers_test.py +++ b/tests/lax_numpy_reducers_test.py @@ -282,6 +282,16 @@ def np_fun(x): self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol) self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol) + @jtu.sample_product(rec = JAX_REDUCER_INITIAL_RECORDS) + def testReducerBadInitial(self, rec): + jnp_op = getattr(jnp, rec.name) + arr = jnp.ones((2, 3, 4)) + initial = jnp.zeros((1, 2, 3)) + msg = (r"initial value has invalid shape \(1, 2, 3\) " + r"for reduction with output shape \(2, 3\)") + with self.assertRaisesRegex(ValueError, msg): + jnp_op(arr, axis=-1, initial=initial) + @parameterized.parameters(itertools.chain.from_iterable( jtu.sample_product_testcases( [dict(name=rec.name, rng_factory=rec.rng_factory, inexact=rec.inexact)],
jnp.max/jnp.min allow non-scalar value for `initial`. ### Description Hello! I would like to report a sort of inverse feature request. Our code currently relies on the following logic working: ```python def f(a, b): return jnp.max(a, axis=-1, initial=b, where=jnp.isfinite(a)) >> a = jnp.full((5,), fill_value=2) >> b = jnp.array([1, 2, 3, 4, 5]) >> f(a, b) Array([2, 2, 3, 4, 5], dtype=int32) ``` _Technically_ this shouldn't work since the `initial` argument of `jnp.max` is supposed to be a scalar. Numpy throws an exception `ValueError: Input object to FillWithScalar is not a scalar`, but it appears that JAX's implementation is more general and works as intended. Can we continue to rely on this behavior? ### What jax/jaxlib version are you using? 0.4.2 ### Which accelerator(s) are you using? GPU ### Additional system info _No response_ ### NVIDIA GPU info _No response_
This behavior breaks with jit pjit merge! But magically works with the old jit. Actually this works with both jit and pjit. The pjit merge breaks the case when `initial` has a trailing dimension of 1: ```python def f(a, b): return jnp.max(a, axis=-1, initial=b, where=jnp.isfinite(a)) a = jnp.full((4, 5, 3), fill_value=2) b = jnp.zeros((4, 5, 1)) jax.vmap(f)(a, b) # Works with old jit, but fails with pjit. ``` The exception is `TypeError: max: arrays must have same number of dimensions, got (5, 1), (5,).`. I think this is an unintended implementation detail, and the behavior is untested so I would probably not rely on it. In fact, your original example looks like a bug to me, because the reduced array is the wrong size, so I'd lean toward checking the shape on `initial` and raising an error if it's not a scalar.
2023-02-08T19:57:16
google/jax
14,446
google__jax-14446
[ "14258" ]
c2b7c5f132fc77ce90889f8d7bb459f9231e4b37
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py --- a/jax/_src/numpy/reductions.py +++ b/jax/_src/numpy/reductions.py @@ -131,11 +131,10 @@ def _reduction(a: ArrayLike, name: str, np_fun: Any, op: ReductionOp, init_val: else: result = lax.reduce(a, init_val, op, dims) if initial is not None: - # TODO(jakevdp) require initial to be a scalar in order to match the numpy API. initial_arr = lax.convert_element_type(initial, _asarray(a).dtype) - if lax.broadcast_shapes(initial_arr.shape, result.shape) != result.shape: - raise ValueError(f"initial value has invalid shape {initial_arr.shape} " - f"for reduction with output shape {result.shape}") + if initial_arr.shape != (): + raise ValueError("initial value must be a scalar. " + f"Got array of shape {initial_arr.shape}") result = op(initial_arr, result) if keepdims: result = lax.expand_dims(result, pos_dims)
diff --git a/tests/lax_numpy_reducers_test.py b/tests/lax_numpy_reducers_test.py --- a/tests/lax_numpy_reducers_test.py +++ b/tests/lax_numpy_reducers_test.py @@ -287,8 +287,7 @@ def testReducerBadInitial(self, rec): jnp_op = getattr(jnp, rec.name) arr = jnp.ones((2, 3, 4)) initial = jnp.zeros((1, 2, 3)) - msg = (r"initial value has invalid shape \(1, 2, 3\) " - r"for reduction with output shape \(2, 3\)") + msg = r"initial value must be a scalar. Got array of shape \(1, 2, 3\)" with self.assertRaisesRegex(ValueError, msg): jnp_op(arr, axis=-1, initial=initial)
jnp.max/jnp.min allow non-scalar value for `initial`. ### Description Hello! I would like to report a sort of inverse feature request. Our code currently relies on the following logic working: ```python def f(a, b): return jnp.max(a, axis=-1, initial=b, where=jnp.isfinite(a)) >> a = jnp.full((5,), fill_value=2) >> b = jnp.array([1, 2, 3, 4, 5]) >> f(a, b) Array([2, 2, 3, 4, 5], dtype=int32) ``` _Technically_ this shouldn't work since the `initial` argument of `jnp.max` is supposed to be a scalar. Numpy throws an exception `ValueError: Input object to FillWithScalar is not a scalar`, but it appears that JAX's implementation is more general and works as intended. Can we continue to rely on this behavior? ### What jax/jaxlib version are you using? 0.4.2 ### Which accelerator(s) are you using? GPU ### Additional system info _No response_ ### NVIDIA GPU info _No response_
This behavior breaks with jit pjit merge! But magically works with the old jit. Actually this works with both jit and pjit. The pjit merge breaks the case when `initial` has a trailing dimension of 1: ```python def f(a, b): return jnp.max(a, axis=-1, initial=b, where=jnp.isfinite(a)) a = jnp.full((4, 5, 3), fill_value=2) b = jnp.zeros((4, 5, 1)) jax.vmap(f)(a, b) # Works with old jit, but fails with pjit. ``` The exception is `TypeError: max: arrays must have same number of dimensions, got (5, 1), (5,).`. I think this is an unintended implementation detail, and the behavior is untested so I would probably not rely on it. In fact, your original example looks like a bug to me, because the reduced array is the wrong size, so I'd lean toward checking the shape on `initial` and raising an error if it's not a scalar. So I think we're going to move toward explicitly requiring `initial` to be a scalar (see https://github.com/google/jax/pull/14365#issuecomment-1426538150) You'd still be able to effectively use broadcasted initial values by wrapping your call in `vmap`, and passing a mapped value to the `initial` argument. Leaving this issue open to track that TODO.
2023-02-13T18:23:03