repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
listlengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
google/jax | 14,501 | google__jax-14501 | [
"14499"
]
| c6a99b699e2935c51060a2b3437ca3d694496e12 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -595,16 +595,26 @@ def bitcast_convert_type(operand: ArrayLike, new_dtype: DTypeLike) -> Array:
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
- operator, which performs a bit cast from one type to another. The bitwidth
- of the source and destination types must match.
+ operator, which performs a bit cast from one type to another.
+
+ The output shape depends on the size of the input and output dtypes with
+ the following logic::
+
+ if new_dtype.itemsize == operand.dtype.itemsize:
+ output_shape = operand.shape
+ if new_dtype.itemsize < operand.dtype.itemsize:
+ output_shape = (*operand.shape, operand.dtype.itemsize // new_dtype.itemsize)
+ if new_dtype.itemsize > operand.dtype.itemsize:
+ assert operand.shape[-1] * operand.dtype.itemsize == new_dtype.itemsize
+ output_shape = operand.shape[:-1]
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
- An array with the same shape as `operand`, bitcast elementwise to
- `new_dtype`.
+ An array of shape `output_shape` (see above) and type `new_dtype`,
+ constructed from the same bits as operand.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
@@ -2418,15 +2428,26 @@ def _convert_element_type_lower(ctx, operand, *, new_dtype, weak_type):
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
- return operand.shape
+ old_dtype = dtypes.canonicalize_dtype(operand.dtype)
+ new_dtype = dtypes.canonicalize_dtype(new_dtype)
+
+ if old_dtype.itemsize == new_dtype.itemsize:
+ return operand.shape
+ elif old_dtype.itemsize > new_dtype.itemsize:
+ return (*operand.shape, old_dtype.itemsize // new_dtype.itemsize)
+ elif operand.shape[-1] * old_dtype.itemsize != new_dtype.itemsize:
+ raise ValueError(
+ f"Attempting to convert array of shape {operand.shape} "
+ f"from {str(old_dtype)} of size {old_dtype.itemsize} "
+ f"to {str(new_dtype)} of size {new_dtype.itemsize}, "
+ f"but {operand.shape[-1]} * {old_dtype.itemsize} != {new_dtype.itemsize}")
+ return operand.shape[:-1]
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
old_dtype = dtypes.canonicalize_dtype(operand.dtype)
if dtypes.issubdtype(old_dtype, np.bool_) or dtypes.issubdtype(old_dtype, np.complexfloating):
if old_dtype != new_dtype:
raise TypeError(f"`bitcast_convert_type` for operand type ({old_dtype}) cannot have different destination type ({new_dtype})")
- if np.dtype(old_dtype).itemsize != np.dtype(new_dtype).itemsize:
- raise TypeError(f"`bitcast_convert_type` for operand type ({old_dtype}) must have destination type ({new_dtype}) of same size.")
return new_dtype
bitcast_convert_type_p = standard_primitive(
| diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -281,16 +281,32 @@ def testConvertElementTypeAgainstNumpy(self, from_dtype, to_dtype):
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
- [dict(from_dtype=from_dtype, to_dtype=to_dtype)
- for from_dtype, to_dtype in itertools.product(
- [np.float32, np.int32, "float32", "int32"], repeat=2)],
+ from_dtype=jtu.dtypes.all_floating + jtu.dtypes.all_integer + jtu.dtypes.all_unsigned,
+ to_dtype=jtu.dtypes.all_floating + jtu.dtypes.all_integer + jtu.dtypes.all_unsigned,
+ shape = [(), (2,), (2, 3)]
)
- def testBitcastConvertType(self, from_dtype, to_dtype):
+ def testBitcastConvertType(self, from_dtype, to_dtype, shape):
rng = jtu.rand_default(self.rng())
- args_maker = lambda: [rng((2, 3), from_dtype)]
+ itemsize_in = np.dtype(from_dtype).itemsize
+ itemsize_out = np.dtype(to_dtype).itemsize
+ if itemsize_in < itemsize_out:
+ shape = (*shape, itemsize_out // itemsize_in)
+ args_maker = lambda: [rng(shape, from_dtype)]
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self._CompileAndCheck(op, args_maker)
+ # Test the shape and dtype of the output. We avoid testing the values here
+ # because the bitwise representation may vary from platform to platform.
+ out = op(*args_maker())
+ if itemsize_in == itemsize_out:
+ expected_shape = shape
+ elif itemsize_in < itemsize_out:
+ expected_shape = shape[:-1]
+ else:
+ expected_shape = (*shape, itemsize_in // itemsize_out)
+ self.assertEqual(out.dtype, to_dtype)
+ self.assertEqual(out.shape, expected_shape)
+
@jtu.sample_product(
[dict(from_dtype=from_dtype, to_dtype=to_dtype)
for from_dtype, to_dtype in itertools.product(
| bitcast_convert_type does not support cross-size casts
### Description
The [current definition](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.bitcast_convert_type.html) of `jax.lax.bitcast_convert_type` requires that the source and destination types have the same size. The [underlying XLA op](https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype) does not have this requirement and has well-defined semantics for differently-sized types.
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
TPU
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| 2023-02-15T22:56:42 |
|
google/jax | 14,630 | google__jax-14630 | [
"14623"
]
| 661c9e14c01551403e006574169671922bca8ee7 | diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py
--- a/jax/experimental/sparse/bcoo.py
+++ b/jax/experimental/sparse/bcoo.py
@@ -1086,11 +1086,43 @@ def bcoo_dot_general_sampled(A: Array, B: Array, indices: Array, *, dimension_nu
return bcoo_dot_general_sampled_p.bind(A, B, indices,
dimension_numbers=(cdims, bdims))
+def _bcoo_dot_general_sampled_slow(A, B, indices, *, dimension_numbers):
+ return _bcoo_extract(indices, lax.dot_general(A, B, dimension_numbers=dimension_numbers))
+
+def _bcoo_dot_general_sampled_simple(A, B, indices, *, dimension_numbers):
+ (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
+ assert not (lhs_contract or rhs_contract or lhs_batch or rhs_batch)
+ assert A.ndim == B.ndim == 1
+ n_batch = indices.ndim - 2
+ n_sparse = indices.shape[-1]
+ nse = indices.shape[-2]
+ assert n_batch + n_sparse == 2
+ if n_batch == 0:
+ return A[indices[:, 0]] * B[indices[:, 1]]
+ elif n_batch == 1:
+ return A[:, None] * B[indices[..., 0]]
+ elif n_batch == 2:
+ out = A[:, None, None] * B[None, :, None]
+ return lax.broadcast_in_dim(out, (len(A), len(B), nse), (0, 1, 2))
+ else:
+ raise ValueError("too many batch dimensions.")
+
@bcoo_dot_general_sampled_p.def_impl
def _bcoo_dot_general_sampled_impl(A, B, indices, *, dimension_numbers):
- # TODO(jakevdp): use a more efficient implementation that avoids the full dot product.
- dense_result = lax.dot_general(A, B, dimension_numbers=dimension_numbers)
- return _bcoo_extract(indices, dense_result)
+ A = jnp.asarray(A)
+ B = jnp.asarray(B)
+ indices = jnp.asarray(indices)
+ (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
+ n_batch = indices.ndim - 2
+ n_sparse = indices.shape[-1]
+
+ # TODO(jakevdp): add fast approach for more general cases.
+ if (not (lhs_contract or rhs_contract or lhs_batch or rhs_batch)
+ and A.ndim == B.ndim == 1 and n_sparse + n_batch == 2):
+ return _bcoo_dot_general_sampled_simple(A, B, indices, dimension_numbers=dimension_numbers)
+
+ return _bcoo_dot_general_sampled_slow(A, B, indices, dimension_numbers=dimension_numbers)
+
@bcoo_dot_general_sampled_p.def_abstract_eval
def _bcoo_dot_general_sampled_abstract_eval(A, B, indices, *, dimension_numbers):
| diff --git a/tests/sparse_test.py b/tests/sparse_test.py
--- a/tests/sparse_test.py
+++ b/tests/sparse_test.py
@@ -1294,6 +1294,30 @@ def sparse_fun(lhs, rhs, indices):
# TODO(jakevdp) fix forward-mode autodiff & enable tests here.
self._CheckGradsSparse(dense_fun, sparse_fun, args_maker, modes=['rev'], argnums=[0, 1])
+ @jtu.sample_product(
+ xshape=[(3,), (5,)],
+ yshape=[(3,), (5,)],
+ dtype=jtu.dtypes.floating + jtu.dtypes.complex,
+ n_batch=[0, 1, 2],
+ )
+ def test_bcoo_dot_general_sampled_fast(self, xshape, yshape, n_batch, dtype):
+ rng = jtu.rand_default(self.rng())
+ sprng = sptu.rand_bcoo(self.rng(), n_batch=n_batch)
+
+ dimension_numbers = (([], []), ([], []))
+ args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype),
+ sprng(xshape + yshape, dtype).indices]
+
+ def f1(x, y, indices):
+ mat_full = lax.dot_general(x, y, dimension_numbers=dimension_numbers)
+ return sparse_bcoo._bcoo_extract(indices, mat_full)
+
+ def f2(x, y, indices):
+ return sparse.bcoo_dot_general_sampled(x, y, indices, dimension_numbers=dimension_numbers)
+
+ self._CheckAgainstNumpy(f1, f2, args_maker)
+ self._CompileAndCheck(f2, args_maker)
+
@jtu.sample_product(
[dict(n_batch=n_batch, n_dense=n_dense, lhs_shape=lhs_shape,
rhs_shape=rhs_shape, dimension_numbers=dimension_numbers)
| Differentiating BCOO sparse matrix constructor produces a dense matrix
### Description
Unless I am mistaken, differentiating the BCOO constructor appears to produce dense matrices. Here's an MWE:
```python
import jax
import jax.numpy as jnp
from jax.experimental.sparse import BCOO
n = 1000
m = 777
p = 11
nonzeroes = jnp.stack((jnp.arange(0,m), jnp.arange(p,m+p)), axis=-1)
def quadratic(p):
x1 = nonzeroes[...,0]
x2 = nonzeroes[...,1]
values = (x1 - x2)**2 / p
matrix = BCOO((values, nonzeroes), shape=(n,n), unique_indices=True)
return jnp.ones(n).T @ matrix @ jnp.ones(n)
graph = jax.xla_computation(jax.grad(quadratic))(1.0)
with open("t.dot", "w") as f:
f.write(graph.as_hlo_dot_graph())
```
I've attached a computational graph: [t.pdf](https://github.com/google/jax/files/10804965/t.pdf). The problem is `dot.28` (dark blue) followed by `gather.57`, which appears to assemble a 1000x1000 matrix, then grab the parts of it that are zero.
### What jax/jaxlib version are you using?
0.4.4
### Which accelerator(s) are you using?
N/A
### Additional system info
N/A
### NVIDIA GPU info
N/A
| I don't think any dense matrices are produced on the JAX side of things. For example, here is the jaxpr of your differentiated function, which does not show any `1000,1000` variable being produced:
```python
print(jax.make_jaxpr(jax.grad(quadratic))(1.0))
```
```
{ lambda a:i32[777,2]; b:f32[]. let
c:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 0
d:i32[777] = gather[
dimension_numbers=GatherDimensionNumbers(offset_dims=(0,), collapsed_slice_dims=(1,), start_index_map=(1,))
fill_value=None
indices_are_sorted=True
mode=GatherScatterMode.PROMISE_IN_BOUNDS
slice_sizes=(777, 1)
unique_indices=True
] a c
e:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 1
f:i32[777] = gather[
dimension_numbers=GatherDimensionNumbers(offset_dims=(0,), collapsed_slice_dims=(1,), start_index_map=(1,))
fill_value=None
indices_are_sorted=True
mode=GatherScatterMode.PROMISE_IN_BOUNDS
slice_sizes=(777, 1)
unique_indices=True
] a e
g:i32[777] = sub d f
h:i32[777] = integer_pow[y=2] g
i:f32[777] = convert_element_type[new_dtype=float32 weak_type=True] h
j:f32[777] = div i b
k:f32[] = integer_pow[y=-2] b
l:f32[1000] = broadcast_in_dim[broadcast_dimensions=() shape=(1000,)] 1.0
m:f32[777] = convert_element_type[new_dtype=float32 weak_type=False] j
n:f32[1000] = bcoo_dot_general[
dimension_numbers=(((0,), (0,)), ((), ()))
lhs_spinfo=BCOOInfo(shape=(1000, 1000), indices_sorted=False, unique_indices=True)
] m a l
o:f32[1000] = broadcast_in_dim[broadcast_dimensions=() shape=(1000,)] 1.0
_:f32[] = dot_general[
dimension_numbers=(((0,), (0,)), ((), ()))
precision=None
preferred_element_type=None
] n o
p:f32[1000] = dot_general[
dimension_numbers=(((), ()), ((), ()))
precision=None
preferred_element_type=None
] 1.0 o
q:f32[777] = broadcast_in_dim[broadcast_dimensions=() shape=(777,)] 1.0
_:f32[777] r:i32[777,2] = bcoo_transpose[
permutation=(1, 0)
spinfo=BCOOInfo(shape=(1, 1), indices_sorted=False, unique_indices=False)
] q a
s:f32[777] = bcoo_dot_general_sampled[
dimension_numbers=(((), ()), ((), ()))
] p l r
t:f32[777] _:i32[777,2] = bcoo_transpose[
permutation=(1, 0)
spinfo=BCOOInfo(shape=(1, 1), indices_sorted=False, unique_indices=False)
] s r
u:f32[777] = convert_element_type[new_dtype=float32 weak_type=True] t
v:f32[777] = mul u k
w:f32[777] = mul v i
x:f32[] = reduce_sum[axes=(0,)] w
y:f32[] = neg x
in (y,) }
```
That said, what might be happening is that you're hitting this TODO: https://github.com/google/jax/blob/7e001d842e1cbfdbf991a4bd9b236012cc40fba4/jax/experimental/sparse/bcoo.py#L1091-L1092
`dot_general_sampled` is used in the transpose rule of `dot_general`, which requires extracting sparse indices from a dense dot product. There are probably more efficient ways to implement this, thus the TODO
One way around this would be to use forward-mode autodiff, which does not use the transpose rule.
Thanks for your quick response! I'm not familiar enough with the internals to know whether this is happening in JAX or somewhere lower-level. However, here's an attempted workaround, via a custom reverse rule.
```python
from functools import partial
from jax import custom_vjp
@partial(custom_vjp, nondiff_argnums=(2,3))
def sparse_matrix_product(target, values, nonzeroes, shape):
return BCOO((values, nonzeroes), shape = shape, unique_indices = True) @ target
def sparse_matrix_product_fwd(target, values, nonzeroes, shape):
out = sparse_matrix_product(target, values, nonzeroes, shape)
carry = (target, values)
return out, carry
def sparse_matrix_product_rev(nonzeroes, shape, carry, cotangents):
(target, values) = carry
target_cotangents = BCOO((values, nonzeroes), shape = shape, unique_indices = True).T @ cotangents
x1 = nonzeroes[...,0]
x2 = nonzeroes[...,1]
sparse_cotangents = cotangents[...,x1,:]
sparse_targets = target[...,x2,:]
values_cotangents = jnp.sum(sparse_cotangents * sparse_targets, axis=-1)
return (target_cotangents, values_cotangents)
sparse_matrix_product.defvjp(sparse_matrix_product_fwd, sparse_matrix_product_rev)
import jax
import jax.numpy as jnp
from jax.experimental.sparse import BCOO
n = 1000
m = 777
p = 11
nonzeroes = jnp.stack((jnp.arange(0,m), jnp.arange(p,m+p)), axis=-1)
def quadratic(p):
x1 = nonzeroes[...,0]
x2 = nonzeroes[...,1]
values = (x1 - x2)**2 / p
matrix = BCOO((values, nonzeroes), shape=(n,n), unique_indices=True)
return jnp.ones(n).T @ matrix @ jnp.ones(n)
def fixed_quadratic(p):
x1 = nonzeroes[...,0]
x2 = nonzeroes[...,1]
values = (x1 - x2)**2 / p
return (jnp.ones(n).T @ sparse_matrix_product(jnp.expand_dims(jnp.ones(n),-1), values, nonzeroes, (n,n))).squeeze()
(jax.grad(quadratic)(1.0)), jax.grad(fixed_quadratic)(1.0)
graph = jax.xla_computation(jax.grad(fixed_quadratic))(1.0)
with open("t.dot", "w") as f:
f.write(graph.as_hlo_dot_graph())
```
Here's the corresponding computational graph: [t.pdf](https://github.com/google/jax/files/10806692/t.pdf). This appears so far to avoid dense matrix assembly, though I have not done any testing yet beyond an interactive notebook and thus there may be mistakes.
Very nice! Unfortunately it's not that easy if you have to handle all the possibilities offered by `dot_general`, which the BCOO code has to account for. We could probably special-case some situations and do something like this in simpler cases. | 2023-02-22T21:18:10 |
google/jax | 14,674 | google__jax-14674 | [
"14670"
]
| 0cdb7f999746b879bcadf921b09b33f8b1c2ecae | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -747,18 +747,25 @@ def dot(lhs: Array, rhs: Array, precision: PrecisionLike = None,
def dot_general(lhs: ArrayLike, rhs: ArrayLike, dimension_numbers: DotDimensionNumbers,
precision: PrecisionLike = None,
preferred_element_type: Optional[DTypeLike] = None) -> Array:
- """More general contraction operator.
+ """General dot product/contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
+ The semantics of ``dot_general`` are complicated, but most users should not have to
+ use it directly. Instead, you can use higher-level functions like :func:`jax.numpy.dot`,
+ :func:`jax.numpy.matmul`, :func:`jax.numpy.tensordot`, :func:`jax.numpy.einsum`,
+ and others which will construct appropriate calls to ``dot_general`` under the hood.
+ If you really want to understand ``dot_general`` itself, we recommend reading XLA's
+ `DotGeneral <https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
+ operator documentation.
+
Args:
lhs: an array
rhs: an array
- dimension_numbers: a tuple of tuples of the form
- `((lhs_contracting_dims, rhs_contracting_dims),
- (lhs_batch_dims, rhs_batch_dims))`
+ dimension_numbers: a tuple of tuples of sequences of ints of the form
+ ``((lhs_contracting_dims, rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))``
precision: Optional. Either ``None``, which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
@@ -768,7 +775,9 @@ def dot_general(lhs: ArrayLike, rhs: ArrayLike, dimension_numbers: DotDimensionN
accumulate results to and return a result with that datatype.
Returns:
- An array containing the result.
+ An array whose first dimensions are the (shared) batch dimensions, followed by
+ the ``lhs`` non-contracting/non-batch dimensions, and finally the ``rhs``
+ non-contracting/non-batch dimensions.
"""
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
cdims = (api_util._ensure_index_tuple(lhs_contract),
| jax.lax.dot_general has minimal documentation
Please:
- [x] Check for duplicate requests.
- [x] Describe your goal, and if possible provide a code snippet with a motivating example.
----------
The docs for [`jax.lax.dot_general`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dot_general.html) are unclear. In particular, the `dimension_numbers` argument isn't clear to me.
| Thanks for the request!
Just brainstorming ways to improve: do the [XLA HLO docs](https://www.tensorflow.org/xla/operation_semantics#dotgeneral) clarify the `dimension_numbers` argument? Or are those unclear too?
There is a [reference implementation in terms of standard `numpy`](https://github.com/google/jax/blob/d27735820049bdb9b0828ee95abf86d1ec50f1f9/jax/_src/lax_reference.py#L208) which we could point to, but I don't think it's concise/clear enough to be helpful.
(By the way, I went to elementary school with someone with your name... any chance you're from The Peninsula originally?)
I wonder if it might make sense to add a note similar to what we have in [`lax.gather`](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.gather.html), something along the these lines:
> The semantics of `dot_general` are complicated, but most users should not have to use it directly. Instead, you can use more user-friendly functions like `jax.numpy.dot`, `jax.numpy.tensordot`, `jax.numpy.einsum`, and others which will construct appropriate calls to `dot_general` under the hood. If you really want to understand `dot_general` itself, we recommend reading XLA's [DotGeneral](https://www.tensorflow.org/xla/operation_semantics#dotgeneral) documentation.
@jakevdp A note like that would be helpful.
@mattjj The XLA docs are more helpful, but still not totally clear. This line is the most helpful
> It follows that the resulting dimension number starts with the batch dimension, then the 'lhs' non-contracting/non-batch dimension, and finally the 'rhs' non-contracting/non-batch dimension.
However, "It follows that" makes it unclear whether it's implied by something about the example, or if it's general to the function. In the jax docs, it might be sufficient to simply specify output dimensions in terms of the inputs. If it's a general property of the return, a change like this could improve things greatly:
> Returns: An array whose first dimensions are the batch dimensions, followed by the the 'lhs' non-contracting/non-batch dimensions, and finally the 'rhs' non-contracting/non-batch dimensions.
Proposed doc update in https://github.com/google/jax/pull/14674; let me know what you think! | 2023-02-24T23:46:35 |
|
google/jax | 14,722 | google__jax-14722 | [
"14691"
]
| 06441883b9c2d7398d159ce9d385fc300a414857 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -1068,6 +1068,11 @@ def gamma(key: KeyArray,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
"""Sample Gamma random values with given shape and float dtype.
+ This implements the standard gamma density, with a unit scale/rate parameter.
+ Dividing the sample output by the rate is equivalent to sampling from
+ *gamma(a, rate)*, and multiplying the sample output by the scale is equivalent
+ to sampling from *gamma(a, scale)*.
+
Args:
key: a PRNG key used as the random key.
a: a float or array of floats broadcast-compatible with ``shape``
| Improved documentation on `jax.random.gamma`
Please:
- [x] Check for duplicate requests.
- [x] Describe your goal, and if possible provide a code snippet with a motivating example.
I think the documentation for the `jax.random.gamma` random variable (sampler) could possibly be clearer. In particular, letting the user know that the standard form is implemented and how to incorporate the rate (or alternatively scale) parameter in their samples. Maybe an addendum along the lines of
*"we implement the standard gamma density. Dividing the sample by the rate is equivalent to sampling from* $\text{Gamma}(\alpha, \text{rate})$*. And multiplying the sample by the scale is equivalent to sampling from* $\text{Gamma}(\alpha, \text{scale})$."
The discussion for this started [here](https://github.com/google/jax/pull/551) and I'm mentioning @mattjj for visibility. Thanks!
| 2023-02-28T21:34:19 |
||
google/jax | 14,798 | google__jax-14798 | [
"14249"
]
| da3b75aacc4040441b9077cf34dfb39fb9ab5c8e | diff --git a/jax/_src/lax/control_flow/solves.py b/jax/_src/lax/control_flow/solves.py
--- a/jax/_src/lax/control_flow/solves.py
+++ b/jax/_src/lax/control_flow/solves.py
@@ -387,7 +387,9 @@ def _linear_solve_batching_rule(spmd_axis_name, axis_size, axis_name, main_type,
(matvec, vecmat, solve, solve_t) = jaxprs
(matvec_bat, vecmat_bat, solve_bat, solve_t_bat) = params_bat
- num_aux = len(solve.out_avals) - len(matvec.out_avals)
+ # number of operator out avals is assumed to be the same for matvec/vecmat
+ num_operator_out_avals = len(matvec.out_avals)
+ num_aux = len(solve.out_avals) - num_operator_out_avals
# Fixpoint computation of which parts of x and b are batched; we need to
# ensure this is consistent between all four jaxprs
b_bat = orig_b_bat
@@ -402,21 +404,23 @@ def _linear_solve_batching_rule(spmd_axis_name, axis_size, axis_name, main_type,
x_bat_out = solve_x_bat
else:
vecmat_jaxpr_batched, vecmat_x_bat = batching.batch_jaxpr(
- vecmat, axis_size, vecmat_bat + b_bat, instantiate=x_bat,
+ vecmat, axis_size, vecmat_bat + b_bat, instantiate=b_bat,
axis_name=axis_name, spmd_axis_name=spmd_axis_name, main_type=main_type)
# batch all aux data by default
x_bat_out = _map(operator.or_, vecmat_x_bat + [True] * num_aux, solve_x_bat)
+ # keep a slice of only the linear operator part of solve's avals
+ x_bat_noaux = x_bat_out[:num_operator_out_avals]
# Apply matvec and solve_t -> new batched parts of b
matvec_jaxpr_batched, matvec_b_bat = batching.batch_jaxpr(
- matvec, axis_size, matvec_bat + x_bat_out, instantiate=b_bat,
+ matvec, axis_size, matvec_bat + x_bat_noaux, instantiate=b_bat,
axis_name=axis_name, spmd_axis_name=spmd_axis_name, main_type=main_type)
if solve_t is None:
solve_t_jaxpr_batched = None
b_bat_out = _map(operator.or_, matvec_b_bat, orig_b_bat)
else:
solve_t_jaxpr_batched, solve_t_b_aux_bat = batching.batch_jaxpr(
- solve_t, axis_size, solve_t_bat + x_bat_out, instantiate=b_bat,
+ solve_t, axis_size, solve_t_bat + x_bat_noaux, instantiate=x_bat_out,
axis_name=axis_name, spmd_axis_name=spmd_axis_name, main_type=main_type)
assert len(solve_t_b_aux_bat) == len(orig_b_bat) + num_aux
solve_t_b_bat, _ = split_list(solve_t_b_aux_bat, [len(orig_b_bat)])
| diff --git a/tests/custom_linear_solve_test.py b/tests/custom_linear_solve_test.py
--- a/tests/custom_linear_solve_test.py
+++ b/tests/custom_linear_solve_test.py
@@ -467,6 +467,23 @@ def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
jtu.check_grads(linear_solve, (a, b), order=1, rtol=3e-3, modes=['rev'])
+ def test_custom_linear_solve_batching_with_aux(self):
+ def solve(mv, b):
+ aux = (np.array(1.), True, 0)
+ return mv(b), aux
+
+ def solve_aux(x):
+ matvec = lambda y: tree_util.tree_map(partial(jnp.dot, A), y)
+ return lax.custom_linear_solve(matvec, (x, x), solve, solve, symmetric=True, has_aux=True)
+
+ rng = self.rng()
+ A = rng.randn(3, 3)
+ A = A + A.T
+ b = rng.randn(3, 3)
+
+ # doesn't crash
+ jax.vmap(solve_aux)(b)
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| Incorrect batching rule with custom_linear_solve when has_aux=True?
### Description
Since [this commit](https://github.com/google/jax/commit/73ed511d3945246467b0e3e12da326d40a3fe153), a bunch of tests that used to pass in JAXopt are now failing with `ValueError: too many values to unpack`. The issue seems to come from the fact that `custom_linear_solve` has an incorrect batching rule when `has_aux=True`. Here's a minimal repro:
```python
import numpy as np
import jax.numpy as jnp
import jax
from jax.scipy.sparse.linalg import cg
rng = np.random.RandomState(0)
M = rng.randn(5, 5)
A = np.dot(M, M.T)
matvec = lambda x: (jnp.dot(A, x[0]), jnp.dot(A, x[1]))
def f(b):
return cg(matvec, (b, b))[0]
b = rng.randn(5)
jax.jacrev(f)(b)
```
Error obtained:
```
Traceback (most recent call last):
File "/Users/mblondel/Desktop/playground/cg_bug.py", line 16, in <module>
jax.jacrev(f)(b)
File "/Users/mblondel/Desktop/playground/cg_bug.py", line 13, in f
return cg(matvec, (b, b))[0]
File "/Users/mblondel/Desktop/projects/jax/jax/_src/scipy/sparse/linalg.py", line 290, in cg
return _isolve(_cg_solve,
File "/Users/mblondel/Desktop/projects/jax/jax/_src/scipy/sparse/linalg.py", line 229, in _isolve
x_maybe_info = lax.custom_linear_solve(
jax._src.source_info_util.JaxStackTraceBeforeTransformation: ValueError: too many values to unpack (expected 1)
[...]
File "/Users/mblondel/Desktop/projects/jax/jax/interpreters/batching.py", line 710, in _match_axes_jaxpr
out_axis_dest, = out_axes_dest
jax._src.traceback_util.UnfilteredStackTrace: ValueError: too many values to unpack (expected 1)
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/mblondel/Desktop/playground/cg_bug.py", line 16, in <module>
jax.jacrev(f)(b)
File "/Users/mblondel/Desktop/projects/jax/jax/_src/api.py", line 1324, in jacfun
jac = vmap(pullback)(_std_basis(y))
ValueError: too many values to unpack (expected 1)
```
If I come back to before the aforementioned commit, this script works, since `cg` didn't use `custom_linear_solve(..., has_aux=True)`.
Also note that I use `(b, b)` to define a pytree. When the input is just an array, there is no error.
### What jax/jaxlib version are you using?
main branch
### Which accelerator(s) are you using?
CPU
### Additional system info
3.8, Mac
### NVIDIA GPU info
_No response_
| 2023-03-06T16:16:13 |
|
google/jax | 14,887 | google__jax-14887 | [
"14882"
]
| 04def0b6ab945a150fce6c714c95e49db3a71693 | diff --git a/jax/_src/lib/__init__.py b/jax/_src/lib/__init__.py
--- a/jax/_src/lib/__init__.py
+++ b/jax/_src/lib/__init__.py
@@ -15,10 +15,9 @@
# This module is largely a wrapper around `jaxlib` that performs version
# checking on import.
-import platform
+import gc
import re
import os
-import warnings
from typing import Optional, Tuple
try:
@@ -94,6 +93,11 @@ def _parse_version(v: str) -> Tuple[int, ...]:
jax_jit = xla_client._xla.jax_jit
pmap_lib = xla_client._xla.pmap_lib
+# XLA garbage collection: see https://github.com/google/jax/issues/14882
+def _xla_gc_callback(*args):
+ xla_client._xla.collect_garbage()
+gc.callbacks.append(_xla_gc_callback)
+
import jaxlib.gpu_solver as gpu_solver # pytype: disable=import-error
import jaxlib.gpu_sparse as gpu_sparse # pytype: disable=import-error
import jaxlib.gpu_prng as gpu_prng # pytype: disable=import-error
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -9629,5 +9629,18 @@ def test_autodidax_smoketest(self):
autodidax_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(autodidax_module)
+class GarbageCollectionTest(jtu.JaxTestCase):
+ def test_xla_gc_callback(self):
+ # https://github.com/google/jax/issues/14882
+ x_np = np.arange(10, dtype='int32')
+ x_jax = jax.device_put(x_np)
+ x_np_weakref = weakref.ref(x_np)
+
+ del x_np
+ del x_jax
+ gc.collect()
+
+ assert x_np_weakref() is None
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| device_put leaks references on numpy inputs
First reported in #14876; here's a minimal repro that reproduces on CPU and GPU:
```python
import sys
import jax
import numpy as np
x = np.arange(1000, dtype='float32')
print(sys.getrefcount(x))
# 2
y = jax.device_put(x)
print(sys.getrefcount(x))
# 3
del y
print(sys.getrefcount(x))
# 3
```
| From @hawkinsp, it looks like triggering XLA's internal garbage cleanup is the solution here:
```python
import sys
import jax
import numpy as np
from jax._src.lib import xla_client
x = np.arange(1000, dtype='float32')
print(sys.getrefcount(x))
# 2
y = jax.device_put(x)
print(sys.getrefcount(x))
# 3
del y
print(sys.getrefcount(x))
# 3
xla_client._xla.collect_garbage()
print(sys.getrefcount(x))
# 2
```
This internal garbage collection is called frequently within XLA when dispatching JAX operations (for example, replacing `_xla.collect_garbage()` with a JAX call like `jax.jit(lambda a: a)(1.0)` shows the same result), but @mattjj suggested we might also add it to [`gc.callbacks`](https://docs.python.org/3/library/gc.html#gc.callbacks) so that when the user forces Python garbage collection, it triggers XLA garbage collection as well.
With this addition, `gc.collect()` causes the reference counts to be as expected.
```python
import gc
from jax._src.lib import xla_client
gc.callbacks.append(lambda *args: xla_client._xla.collect_garbage())
``` | 2023-03-09T21:49:16 |
google/jax | 14,908 | google__jax-14908 | [
"14906"
]
| 623282715de895cd813a0dc44cf7da2d5630b310 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3057,7 +3057,7 @@ def vdot(a, b, *, precision=None):
util._check_arraylike("vdot", a, b)
if issubdtype(_dtype(a), complexfloating):
a = ufuncs.conj(a)
- return dot(a.ravel(), b.ravel(), precision=precision)
+ return dot(ravel(a), ravel(b), precision=precision)
@util._wraps(np.tensordot, lax_description=_PRECISION_DOC)
@@ -3487,9 +3487,9 @@ def sort(a, axis: Optional[int] = -1, kind='quicksort', order=None):
raise ValueError("'order' argument to sort is not supported.")
if axis is None:
- return lax.sort(a.ravel(), dimension=0)
+ return lax.sort(ravel(a), dimension=0)
else:
- return lax.sort(a, dimension=_canonicalize_axis(axis, ndim(a)))
+ return lax.sort(asarray(a), dimension=_canonicalize_axis(axis, ndim(a)))
@util._wraps(np.sort_complex)
@jit
| `jnp.sort` and `jnp.vdot` raise error under `jax.disable_jit()`
### Description
`jnp.sort`, `jnp.vdot` and a few others in `jax.numpy` will raise under `jax.disable_jit()`. IMO, the fact that exactly *these* functions go wrong is not important, nor is the fact that `jax.disable_jit()` is involved.
Taking a look at their implementation, on can spot a pattern:
```python
@partial(jit, static_argnums=(...))
def f(a: ArrayLike):
util._check_arraylike('f', a)
bar = foo(a.ravel()) # << not good
...
```
Without jit, the scalar case will get caught by the tests, lead us to the fix naturally. These functions do array conversion implicitly on the jit boundary hides the fact that array methods might not be available.
Fixing them is easy enough, but I want to ask/discuss something more general here:
1. Maybe we should insert these explicit conversions after `_check_arraylike` everywhere, in the form of `asarray` or explicit `reshape` or `ravel`.
2. If the pattern in 1. is desirable, is it always safe to use the simpler and faster variant `util._asarray`? I think if we can, we should.
3. I see similar problems (that JAX maintainers have rather canonical solutions in JAX itself) popping up almost every time someone tries to contribute to `jax.scipy` etc. Can we have a contributor guide documenting these issues? (`array` conversion, `dtype` handling, type hinting, `jnp` vs `lax`, standard ways to deal with static arguments: `concrete_or_error`, etc.)
4. What is the best place to ask these broader questions that are also half bug reports? Should the Github discussions be preferred?
@jakevdp I bore you the most in many related issues before, so I'm gonna tag you here.
| Agreed we should always call `_check_arraylike` followed by casting the value using `asarray` in order to address this problem, though it's a common-enough pattern we might want to replace it with a more streamlined utility.
> we might want to replace it with a more streamlined utility
Maybe something like
```python
a, b = _ensure_arraylike('f', a, b)
```
which uses something a bit lighter than `jnp.array` internally. This might also helps with typing. Not sure if MyPy supports this kind of type refinement though, as I see many `xxx_arr` in `lax_numpy.py`.
That said, I also see instances like
```python
util._stackable(a) or util._check_arraylike("transpose", a)
```
and
```python
core.is_special_dim_size(repeats) or util._check_arraylike("repeat", repeats)
```
So whatever the new thing is, it better work well will these as well.
In any case, I'm all for a more complete `numpy_util` which I could use in my own work. Also looking forward to less _internal_ tools for `dtype` checking, type promotion, static argument, etc.
Yeah, that's roughly what I've been thinking. Though for typing, if you want to use array attributes you have to rename the variables, e.g.
```python
a_arr, b_arr = util._arraylike(a, b)
```
It's one of my biggest annoyances with the current generation of static type checkers, actually
> It's one of my biggest annoyances with the current generation of static type checkers, actually
Yeah. Pyright seems to work without renaming though, so there is hope.
Fix for this particular issue here: #14908
I'll probably tackle the simplification of `check_arraylike` in a later PR – it's a bigger job.
@jakevdp Thanks! | 2023-03-10T17:45:49 |
|
google/jax | 14,912 | google__jax-14912 | [
"14904"
]
| 04def0b6ab945a150fce6c714c95e49db3a71693 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3399,14 +3399,15 @@ def argwhere(a, *, size=None, fill_value=None):
@util._wraps(np.argmax, skip_params=['out'])
-def argmax(a, axis: Optional[int] = None, out=None, keepdims=None):
- return _argmax(a, None if axis is None else operator.index(axis), keepdims=bool(keepdims))
-
-@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)
-def _argmax(a, axis: Optional[int] = None, out=None, keepdims=False):
+def argmax(a: ArrayLike, axis: Optional[int] = None, out=None, keepdims=None) -> Array:
util._check_arraylike("argmax", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.argmax is not supported.")
+ return _argmax(asarray(a), None if axis is None else operator.index(axis),
+ keepdims=bool(keepdims))
+
+@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)
+def _argmax(a: Array, axis: Optional[int] = None, keepdims: bool = False) -> Array:
if axis is None:
dims = list(range(ndim(a)))
a = ravel(a)
@@ -3419,14 +3420,15 @@ def _argmax(a, axis: Optional[int] = None, out=None, keepdims=False):
return expand_dims(result, dims) if keepdims else result
@util._wraps(np.argmin, skip_params=['out'])
-def argmin(a, axis: Optional[int] = None, out=None, keepdims=None):
- return _argmin(a, None if axis is None else operator.index(axis), keepdims=bool(keepdims))
-
-@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)
-def _argmin(a, axis: Optional[int] = None, out=None, keepdims=False):
+def argmin(a: ArrayLike, axis: Optional[int] = None, out=None, keepdims=None) -> Array:
util._check_arraylike("argmin", a)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.argmin is not supported.")
+ return _argmin(asarray(a), None if axis is None else operator.index(axis),
+ keepdims=bool(keepdims))
+
+@partial(jit, static_argnames=('axis', 'keepdims'), inline=True)
+def _argmin(a: Array, axis: Optional[int] = None, keepdims: bool = False) -> Array:
if axis is None:
dims = list(range(ndim(a)))
a = ravel(a)
| `argmax` and `argmin` doesn't raise when the argument `out` is used.
### Description
https://github.com/google/jax/blob/3fbe4e8230c60e40a1c54732d879438b9d39bd16/jax/_src/numpy/lax_numpy.py#L3401-L3410
`out` is never passed to `_argmax` where the error handling is actually implemented. I guess wrapped functions like `_argmax` shouldn't have default arguments.
Also, `keepdims` should be key word only.
| Thanks for the report! | 2023-03-10T18:35:54 |
|
google/jax | 14,928 | google__jax-14928 | [
"14920"
]
| f96b59f03d890fab1e6928abbf251429484b2d72 | diff --git a/jax/_src/dtypes.py b/jax/_src/dtypes.py
--- a/jax/_src/dtypes.py
+++ b/jax/_src/dtypes.py
@@ -631,7 +631,10 @@ def dtype(x: Any, *, canonicalize: bool = False) -> DType:
elif core.is_opaque_dtype(getattr(x, 'dtype', None)):
dt = x.dtype
else:
- dt = np.result_type(x)
+ try:
+ dt = np.result_type(x)
+ except TypeError as err:
+ raise TypeError(f"Cannot determine dtype of {x}") from err
if dt not in _jax_dtype_set:
raise TypeError(f"Value '{x}' with dtype {dt} is not a valid JAX array "
"type. Only arrays of numeric types are supported by JAX.")
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2273,8 +2273,19 @@ def identity(n: DimSize, dtype: Optional[DTypeLike] = None) -> Array:
def arange(start: DimSize, stop: Optional[DimSize] = None,
step: Optional[DimSize] = None, dtype: Optional[DTypeLike] = None) -> Array:
dtypes.check_user_dtype_supported(dtype, "arange")
- require = partial(core.concrete_or_error, None)
- msg = "It arose in jax.numpy.arange argument `{}`.".format
+ if not jax.config.jax_dynamic_shapes:
+ util.check_arraylike("arange", start)
+ if stop is None and step is None:
+ start = core.concrete_or_error(None, start, "It arose in the jnp.arange argument 'stop'")
+ else:
+ start = core.concrete_or_error(None, start, "It arose in the jnp.arange argument 'start'")
+ util.check_arraylike_or_none("arange", None, stop, step)
+ stop = core.concrete_or_error(None, stop, "It arose in the jnp.arange argument 'stop'")
+ step = core.concrete_or_error(None, step, "It arose in the jnp.arange argument 'step'")
+ start_name = "stop" if stop is None and step is None else "start"
+ for name, val in [(start_name, start), ("stop", stop), ("step", step)]:
+ if val is not None and np.ndim(val) != 0:
+ raise ValueError(f"jax.numpy.arange: arguments must be scalars; got {name}={val}")
if _any(core.is_special_dim_size(d) for d in (start, stop, step)):
if stop is not None or step is not None:
raise ValueError(
@@ -2287,17 +2298,12 @@ def arange(start: DimSize, stop: Optional[DimSize] = None,
dtype = _jnp_dtype(dtype)
if stop is None and step is None:
start_dtype = _dtype(start)
- if not jax.config.jax_dynamic_shapes:
- start = require(start, msg("stop"))
if (not dtypes.issubdtype(start_dtype, np.integer) and
not core.is_opaque_dtype(start_dtype)):
ceil_ = ufuncs.ceil if isinstance(start, core.Tracer) else np.ceil
start = ceil_(start).astype(int) # type: ignore
return lax.iota(dtype, start)
else:
- start = require(start, msg("start"))
- stop = None if stop is None else require(stop, msg("stop"))
- step = None if step is None else require(step, msg("step"))
if step is None and start == 0 and stop is not None:
stop = np.ceil(stop).astype(int)
return lax.iota(dtype, stop)
diff --git a/jax/_src/numpy/util.py b/jax/_src/numpy/util.py
--- a/jax/_src/numpy/util.py
+++ b/jax/_src/numpy/util.py
@@ -343,6 +343,15 @@ def check_arraylike(fun_name: str, *args: Any):
raise TypeError(msg.format(fun_name, type(arg), pos))
+def check_arraylike_or_none(fun_name: str, *args: Any):
+ assert isinstance(fun_name, str), f"fun_name must be a string. Got {fun_name}"
+ if any(not (_arraylike(arg) or arg is None) for arg in args):
+ pos, arg = next((i, arg) for i, arg in enumerate(args)
+ if not (_arraylike(arg) or arg is None))
+ msg = "{} requires ndarray, scalar, or None arguments, got {} at position {}."
+ raise TypeError(msg.format(fun_name, type(arg), pos))
+
+
def _check_no_float0s(fun_name: str, *args: Any):
"""Check if none of the args have dtype float0."""
if any(dtypes.dtype(arg) == dtypes.float0 for arg in args):
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -4953,7 +4953,7 @@ def testToList(self):
self.assertEqual(jnp.asarray(v).tolist(), v.tolist())
def testArangeConcretizationError(self):
- msg = r"It arose in jax.numpy.arange argument `{}`".format
+ msg = r"It arose in the jnp.arange argument '{}'".format
with self.assertRaisesRegex(core.ConcretizationTypeError, msg('stop')):
jax.jit(jnp.arange)(3)
| `jnp.arange` produces obscure error for wrong argument type
This example is admittedly superficial. One could still argue that the error message is cryptic.
```python
>>> jnp.arange([1, 3])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[5], line 1
----> 1 jnp.arange([1, 3])
...
some super long trace
...
File <__array_function__ internals>:200, in result_type(*args, **kwargs)
TypeError: Field elements must be 2- or 3-tuples, got '1'
```
| Thanks - this comes from numpy actually:
```python
>>> np.result_type([1, 2])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[8], line 1
----> 1 np.result_type([1, 2])
File <__array_function__ internals>:180, in result_type(*args, **kwargs)
TypeError: Field elements must be 2- or 3-tuples, got '1'
```
We could probably catch this before hitting the problematic numpy path, which I believe happens here: https://github.com/google/jax/blob/a32a7ff903754cec47c936b762fc80ebd547cfb8/jax/_src/dtypes.py#L634 | 2023-03-11T00:12:19 |
google/jax | 14,952 | google__jax-14952 | [
"14913"
]
| 1925aa110972f68ea4c510c17f63b297f8f312f2 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -26,7 +26,8 @@
import builtins
import collections
-from functools import partial
+from functools import partial, wraps
+import inspect
import math
import operator
import types
@@ -5326,6 +5327,27 @@ def __repr__(self):
Array.at.__doc__ = _IndexUpdateHelper.__doc__
+# TODO(jakevdp): remove these deprecation warnings after June 2023
+def allow_pass_by_position_with_warning(f):
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ sig = inspect.signature(f)
+ try:
+ sig.bind(*args, **kwargs)
+ except TypeError:
+ argspec = inspect.getfullargspec(f)
+ n_positional = len(argspec.args)
+ keywords = argspec.kwonlyargs[:len(args) - n_positional]
+ warnings.warn(
+ f"jnp.ndarray.at[...].{f.__name__}: Passing '{keywords[0]}' by position is deprecated. "
+ f"Pass by keyword instead", category=FutureWarning, stacklevel=2)
+ converted_kwargs = dict(zip(keywords, args[n_positional:]))
+ return f(*args[:n_positional], **converted_kwargs, **kwargs)
+ else:
+ return f(*args, **kwargs)
+ return wrapped
+
+
class _IndexUpdateRef:
"""Helper object to call indexed update functions for an (advanced) index.
@@ -5342,7 +5364,8 @@ def __init__(self, array, index):
def __repr__(self):
return f"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})"
- def get(self, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def get(self, *, indices_are_sorted=False, unique_indices=False,
mode=None, fill_value=None):
"""Equivalent to ``x[idx]``.
@@ -5358,7 +5381,8 @@ def get(self, indices_are_sorted=False, unique_indices=False,
unique_indices=unique_indices, mode=mode,
fill_value=fill_value)
- def set(self, values, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def set(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] = y``.
@@ -5371,7 +5395,8 @@ def set(self, values, indices_are_sorted=False, unique_indices=False,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode)
- def apply(self, func, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def apply(self, func, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``func.at(x, idx)`` for a unary ufunc ``func``.
@@ -5394,7 +5419,8 @@ def _scatter_apply(x, indices, _, dims, **kwargs):
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode)
- def add(self, values, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def add(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] += y``.
@@ -5408,7 +5434,8 @@ def add(self, values, indices_are_sorted=False, unique_indices=False,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode)
- def multiply(self, values, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def multiply(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] *= y``.
@@ -5424,7 +5451,8 @@ def multiply(self, values, indices_are_sorted=False, unique_indices=False,
mode=mode)
mul = multiply
- def divide(self, values, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def divide(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] /= y``.
@@ -5440,7 +5468,8 @@ def divide(self, values, indices_are_sorted=False, unique_indices=False,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode))
- def power(self, values, indices_are_sorted=False, unique_indices=False,
+ @allow_pass_by_position_with_warning
+ def power(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] **= y``.
@@ -5456,7 +5485,8 @@ def power(self, values, indices_are_sorted=False, unique_indices=False,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode))
- def min(self, values, indices_are_sorted=False, unique_indices=False, # noqa: F811
+ @allow_pass_by_position_with_warning
+ def min(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] = minimum(x[idx], y)``.
@@ -5471,7 +5501,8 @@ def min(self, values, indices_are_sorted=False, unique_indices=False, # noqa: F
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices, mode=mode)
- def max(self, values, indices_are_sorted=False, unique_indices=False, # noqa: F811
+ @allow_pass_by_position_with_warning
+ def max(self, values, *, indices_are_sorted=False, unique_indices=False,
mode=None):
"""Pure equivalent of ``x[idx] = maximum(x[idx], y)``.
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -929,6 +929,13 @@ def testFloatIndexingError(self):
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
jnp.zeros(2).at[0.].set(1.)
+ def testIndexingPositionalArgumentWarning(self):
+ x = jnp.arange(4)
+ with self.assertWarnsRegex(
+ FutureWarning, "Passing 'indices_are_sorted' by position is deprecated"):
+ out = x.at[5].set(1, True, mode='drop')
+ self.assertArraysEqual(out, x)
+
def testIndexOutOfBounds(self): # https://github.com/google/jax/issues/2245
x = jnp.arange(5, dtype=jnp.int32) + 1
self.assertAllClose(x, x[:10])
| Make arguments of index update helper functions keyword only
For instance, the following code will fail silently, especially given that `at` is not type hinted. One could argue it's just the user (me) being silly, but it's still surprising and difficult to debug. Making arguments like `mode` keyword-only will eliminate this kind of problem.
```python
I = jnp.array([0, 10])
a = jnp.arange(5).at[I].get('fill', fill_value=-1)
# this is actually: get(indices_are_sorted='fill', fill_value=-1)
```
| This is a great idea – let me explore a bit to see how many downstream projects it would break. | 2023-03-13T17:01:50 |
google/jax | 14,987 | google__jax-14987 | [
"14983"
]
| ed8ddfb3f7811bfaa325c47551add9f5d61199ed | diff --git a/jax/experimental/sparse/transform.py b/jax/experimental/sparse/transform.py
--- a/jax/experimental/sparse/transform.py
+++ b/jax/experimental/sparse/transform.py
@@ -671,6 +671,22 @@ def _mul_sparse(spenv, *spvalues):
sparse_rules_bcoo[lax.mul_p] = _mul_sparse
+def _div_sparse(spenv, *spvalues):
+ X, Y = spvalues
+ if Y.is_sparse():
+ raise NotImplementedError(
+ "Division by a sparse array is not implemented because it "
+ "would result in dense output. If this is your intent, use "
+ "sparse.todense() to convert your arguments to a dense array.")
+ X_promoted = spvalues_to_arrays(spenv, X)
+ out_data = bcoo_multiply_dense(X_promoted, 1. / spenv.data(Y))
+ out_spvalue = spenv.sparse(X.shape, out_data, indices_ref=X.indices_ref,
+ indices_sorted=X.indices_sorted,
+ unique_indices=X.unique_indices)
+ return (out_spvalue,)
+
+sparse_rules_bcoo[lax.div_p] = _div_sparse
+
def _reduce_sum_sparse(spenv, *spvalues, axes):
X, = spvalues
X_promoted = spvalues_to_arrays(spenv, X)
@@ -894,6 +910,8 @@ def _sparse_iter(arr):
"__rmatmul__": sparsify(_swap_args(jnp.matmul)),
"__mul__": sparsify(jnp.multiply),
"__rmul__": sparsify(_swap_args(jnp.multiply)),
+ "__truediv__": sparsify(jnp.divide),
+ "__rtruediv__": sparsify(_swap_args(jnp.divide)),
"__add__": sparsify(jnp.add),
"__radd__": sparsify(_swap_args(jnp.add)),
"__sub__": sparsify(jnp.subtract),
| diff --git a/tests/sparsify_test.py b/tests/sparsify_test.py
--- a/tests/sparsify_test.py
+++ b/tests/sparsify_test.py
@@ -240,6 +240,33 @@ def testSparseMul(self, shape, dtype, n_batch, n_dense, unique_indices):
self.assertAllClose(out.todense(), x.todense() * y.todense())
+ @jtu.sample_product(
+ [dict(shape=shape, n_batch=n_batch, n_dense=n_dense)
+ for shape in [(5,), (5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
+ for n_batch in range(len(shape) + 1)
+ for n_dense in range(len(shape) + 1 - n_batch)
+ ],
+ dtype=jtu.dtypes.integer + jtu.dtypes.floating + jtu.dtypes.complex,
+ )
+ def testSparseDiv(self, shape, dtype, n_batch, n_dense):
+ rng_dense = jtu.rand_nonzero(self.rng())
+ rng_sparse = rand_sparse(self.rng(), rand_method=jtu.rand_some_zero)
+ x = BCOO.fromdense(rng_sparse(shape, dtype), n_batch=n_batch,
+ n_dense=n_dense)
+ spdiv = self.sparsify(operator.truediv)
+
+ # Scalar division
+ divisor = 2
+ expected = x.todense() / divisor
+ self.assertAllClose(expected, spdiv(x, divisor).todense())
+ self.assertAllClose(expected, (x / divisor).todense())
+
+ # Array division
+ divisor = rng_dense(shape, dtype)
+ expected = x.todense() / divisor
+ self.assertAllClose(expected, spdiv(x, divisor).todense())
+ self.assertAllClose(expected, (x / divisor).todense())
+
def testSparseSubtract(self):
x = BCOO.fromdense(3 * jnp.arange(5))
y = BCOO.fromdense(jnp.arange(5))
| Sparse rule for div is not implemented
### Description
Hi all, I've been prototyping some functionality using the experimental `BCOO` interface and quickly run into `NotImplementedError` for basic functionality. For example, computing the mean across a given axis.
```python
import jax.experimental.sparse as xsp
import jax.numpy as jnp
X = jnp.hstack([jnp.ones(5)[:,jnp.newaxis], jnp.zeros(5)[:,jnp.newaxis]])
sX = xsp.BCOO.fromdense(X)
smean = xsp.sparsify(jnp.mean)
smean(sX)
```
Will output `Array(0.5, dtype=float32)`, however, `smean(sX, axis=0)` (or `axis=1`) throws
```python
NotImplementedError: sparse rule for div is not implemented.
```
Is this expected at this time, and are there plans to extend the functionality for `div`?
### What jax/jaxlib version are you using?
jax v0.4.5
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.9.12
### NVIDIA GPU info
_No response_
| Thanks for the question - we haven't implemented sparse div because in general it's unsafe: if the divisor is zero, then it would be a densifying operation (all unspecified entries would become `NaN`).
Perhaps you could call `sum()` and then densify the result and divide by `N` to compute the mean? | 2023-03-14T18:59:23 |
google/jax | 15,008 | google__jax-15008 | [
"14625"
]
| da19bf18a2fde79224e0ad1a1c331e232d105202 | diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py
--- a/jax/_src/numpy/reductions.py
+++ b/jax/_src/numpy/reductions.py
@@ -657,6 +657,7 @@ def _cumulative_reduction(a: ArrayLike, axis: Axis = None,
if axis is None or _isscalar(a):
a = lax.reshape(a, (np.size(a),))
+ if axis is None:
axis = 0
a_shape = list(np.shape(a))
| Numpy and JAX behaviours differ for sums over scalars.
### Description
When passed a scalar, `jax.numpy` cannot sum over the first nor last axis.
```python
import jax.numpy as jnp
import numpy as np
arr = 0. # An array that is scalar for some reason
np.sum(arr, 0) # no pb
np.sum(arr, -1) # no pb
jnp.sum(arr, 0) # raises
jnp.sum(arr, -1) # raises
```
This is also valid for the `jnp.prod` function.
I believe this should likely be a valid input to `jnp.sum` WDYT?
### What jax/jaxlib version are you using?
jax==0.4.3 jaxlib==0.4.3+cuda11.cudnn86
### Which accelerator(s) are you using?
CPU/GPU
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks for the report – I don't see this behavior documented anywhere in numpy, so I suspect it's an implementation detail rather than intended design. I think JAX's behavior makes more sense: in general if you have an array `a` and you sum over `axis=a.ndim`, you will get an error. I think this also makes sense when `ndim = 0`. What do you think?
@jakevdp Should we also raise for `cumsum` and friends
```python
jnp.cumsum(0, axis=2)
# Array([0], dtype=int32, weak_type=True)
```
The NumPy version behaviours slightly differently, and raises for `axis not in {0, -1}`. Both versions violates the documentation
> The result has the same size as `a`, and the same shape as `a` if `axis` is not None or `a` is a 1-d array.
Sometimes NumPy's semantics is just 🤦 .
That looks like a bug in the implementation of `jnp.cumsum`. Are you interested in putting together a fix?
> Are you interested in putting together a fix?
@jakevdp Sure.
I've checked other stats function. For instance `np.mean` raises on scalars when an axis is passed. It seems to confirm that this is more of an implementation side effect than a feature of NumPy. | 2023-03-15T16:22:03 |
|
google/jax | 15,031 | google__jax-15031 | [
"15017"
]
| da19bf18a2fde79224e0ad1a1c331e232d105202 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3112,6 +3112,7 @@ def tensordot(a, b, axes=2, *, precision=None):
@util._wraps(np.einsum, lax_description=_EINSUM_DOC, skip_params=['out'])
def einsum(
+ subscripts,
*operands,
out=None,
optimize="optimal",
@@ -3119,6 +3120,7 @@ def einsum(
_use_xeinsum=False,
_dot_general=lax.dot_general,
):
+ operands = (subscripts, *operands)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.einsum is not supported.")
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -5251,7 +5251,7 @@ def testWrappedSignaturesMatch(self):
}
extra_params = {
- 'einsum': ['precision'],
+ 'einsum': ['subscripts', 'precision'],
'einsum_path': ['subscripts'],
'take_along_axis': ['mode'],
}
| jnp.einsum docstring missing equation arg
### Description
The documented arg list for einsum is missing the first argument.
https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.einsum.html
`jax.numpy.einsum(*operands, out=None, optimize='optimal', precision=None, _use_xeinsum=False)`
compare to
https://numpy.org/doc/stable/reference/generated/numpy.einsum.html
`numpy.einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe', optimize=False)`
(noting the 'subscripts' for the first arg)
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Hi - thanks for the report! It looks like the issue is that the actual signature of `np.einsum` doesn't match its documented signature. For example:
```python
In [1]: import numpy as np
In [2]: np.einsum??
Signature: np.einsum(*operands, out=None, optimize=False, **kwargs)
Source:
@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, out=None, optimize=False, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
...
```
Note that the coded signature matches the JAX implementation, but the signature written at the top of the docstring is different. Given that, I think JAX's implementation is correct, because it matches numpy's implementation. What do you think?
I suspect the reason for the mismatch on numpy's side is that the signature pre-dates python 3.0, so keyword-only arguments weren't originally possible to use here.
The numpy documentation is more helpful if you don't have memorized that
the equation comes first. An example usage would be even better.
On Wed, Mar 15, 2023, 6:57 PM Jake Vanderplas ***@***.***>
wrote:
> I suspect the reason for the mismatch on numpy's side is that the
> signature pre-dates python 3.0, so keyword-only arguments weren't
> originally possible to use here.
>
> —
> Reply to this email directly, view it on GitHub
> <https://github.com/google/jax/issues/15017#issuecomment-1470950228>, or
> unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AFJFSI2NIYVEUTAABZLYDBTW4JCLZANCNFSM6AAAAAAV4L6S5Q>
> .
> You are receiving this because you authored the thread.Message ID:
> ***@***.***>
>
Thanks - for `jax.numpy` functions, the JAX documentation is just a direct copy of the numpy documentation in most cases. | 2023-03-16T16:14:14 |
google/jax | 15,069 | google__jax-15069 | [
"15068"
]
| a7b8129ffbcd73af26cd075bb8f248be5f64b906 | diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py
--- a/jax/_src/numpy/reductions.py
+++ b/jax/_src/numpy/reductions.py
@@ -433,8 +433,9 @@ def _var(a: ArrayLike, axis: Axis = None, dtype: DTypeLike = None,
a = lax_internal.asarray(a).astype(computation_dtype)
a_mean = mean(a, axis, dtype=computation_dtype, keepdims=True, where=where)
centered = lax.sub(a, a_mean)
- if dtypes.issubdtype(centered.dtype, np.complexfloating):
+ if dtypes.issubdtype(computation_dtype, np.complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
+ computation_dtype = centered.dtype # avoid casting to complex below.
else:
centered = lax.square(centered)
@@ -443,13 +444,13 @@ def _var(a: ArrayLike, axis: Axis = None, dtype: DTypeLike = None,
normalizer = core.dimension_as_value(np.size(a))
else:
normalizer = core.dimension_as_value(_axis_size(a, axis))
+ normalizer = lax.convert_element_type(normalizer, computation_dtype)
else:
- normalizer = sum(_broadcast_to(where, np.shape(a)), axis, dtype=dtype, keepdims=keepdims)
- normalizer = normalizer - ddof
-
- result = sum(centered, axis, keepdims=keepdims, where=where)
- result = lax.div(result, lax.convert_element_type(normalizer, result.dtype))
- return lax.convert_element_type(result, dtype)
+ normalizer = sum(_broadcast_to(where, np.shape(a)), axis,
+ dtype=computation_dtype, keepdims=keepdims)
+ normalizer = lax.sub(normalizer, lax.convert_element_type(ddof, computation_dtype))
+ result = sum(centered, axis, dtype=computation_dtype, keepdims=keepdims, where=where)
+ return lax.div(result, normalizer).astype(dtype)
def _var_promote_types(a_dtype: DTypeLike, dtype: DTypeLike) -> Tuple[DType, DType]:
@@ -486,6 +487,8 @@ def _std(a: ArrayLike, axis: Axis = None, dtype: DTypeLike = None,
where: Optional[ArrayLike] = None) -> Array:
check_arraylike("std", a)
dtypes.check_user_dtype_supported(dtype, "std")
+ if dtype is not None and not dtypes.issubdtype(dtype, np.inexact):
+ raise ValueError(f"dtype argument to jnp.std must be inexact; got {dtype}")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.std is not supported.")
return lax.sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, where=where))
| diff --git a/tests/lax_numpy_reducers_test.py b/tests/lax_numpy_reducers_test.py
--- a/tests/lax_numpy_reducers_test.py
+++ b/tests/lax_numpy_reducers_test.py
@@ -753,6 +753,20 @@ def np_fun(*args):
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
+ def testMeanLargeArray(self):
+ # https://github.com/google/jax/issues/15068
+ raise unittest.SkipTest("test is slow, but it passes!")
+ x = jnp.ones((16, 32, 1280, 4096), dtype='int8')
+ self.assertEqual(1.0, jnp.mean(x))
+ self.assertEqual(1.0, jnp.mean(x, where=True))
+
+ def testStdLargeArray(self):
+ # https://github.com/google/jax/issues/15068
+ raise unittest.SkipTest("test is slow, but it passes!")
+ x = jnp.ones((16, 32, 1280, 4096), dtype='int8')
+ self.assertEqual(0.0, jnp.std(x))
+ self.assertEqual(0.0, jnp.std(x, where=True))
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -5027,13 +5027,6 @@ def testFromString(self):
actual = jnp.fromstring(s, sep=',', dtype=int)
self.assertArraysEqual(expected, actual)
- def testMeanLargeArray(self):
- # https://github.com/google/jax/issues/15068
- raise unittest.SkipTest("test is slow, but it passes!")
- x = jnp.ones((16, 32, 1280, 4096), dtype='int8')
- self.assertEqual(1.0, jnp.mean(x))
- self.assertEqual(1.0, jnp.mean(x, where=True))
-
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
| jnp.mean and jnp.std crash on large arrays.
### Description
I have a large array of shape float32[16,32,1280,4096]. When I call jnp.std on this array, jit crashes and fails with the following error: "Python int 2684354560 too large to convert to int32".
Clearly the implementation of jnp.std is dividing by the number of elements in the array, which in this case does not fit into a 32-bit integer.
Target platform: pjit on TPU.
### What jax/jaxlib version are you using?
v0.4.2
### Which accelerator(s) are you using?
TPU
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks, Delesley! I presume we should convert to float earlier, which the division will do anyway. | 2023-03-17T23:06:26 |
google/jax | 15,126 | google__jax-15126 | [
"15023"
]
| 9a0de29114e02657a552210c5a080d49ec16a88b | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -2704,6 +2704,16 @@ def device_get(x: Any):
class ShapeDtypeStruct:
+ """A container for the shape, dtype, and other static attributes of an array.
+
+ ``ShapeDtypeStruct`` is often used in conjunction with :func:`jax.eval_shape`.
+
+ Args:
+ shape: a sequence of integers representing an array shape
+ dtype: a dtype-like object
+ named_shape: (optional) a dictionary representing a named shape
+ sharding: (optional) a :class:`jax.Sharding` object
+ """
__slots__ = ["shape", "dtype", "named_shape", "sharding"]
def __init__(self, shape, dtype, named_shape=None, sharding=None):
self.shape = tuple(shape)
@@ -2764,20 +2774,9 @@ def eval_shape(fun: Callable, *args, **kwargs):
def eval_shape(fun, *args, **kwargs):
out = fun(*args, **kwargs)
+ shape_dtype_struct = lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype)
return jax.tree_util.tree_map(shape_dtype_struct, out)
- def shape_dtype_struct(x):
- return ShapeDtypeStruct(x.shape, x.dtype)
-
- class ShapeDtypeStruct:
- __slots__ = ["shape", "dtype"]
- def __init__(self, shape, dtype):
- self.shape = shape
- self.dtype = dtype
-
- In particular, the output is a pytree of objects that have ``shape`` and
- ``dtype`` attributes, but nothing else about them is guaranteed by the API.
-
But instead of applying ``fun`` directly, which might be expensive, it uses
JAX's abstract interpretation machinery to evaluate the shapes without doing
any FLOPs.
@@ -2790,26 +2789,24 @@ def __init__(self, shape, dtype):
*args: a positional argument tuple of arrays, scalars, or (nested) standard
Python containers (tuples, lists, dicts, namedtuples, i.e. pytrees) of
those types. Since only the ``shape`` and ``dtype`` attributes are
- accessed, only values that duck-type arrays are required, rather than real
- ndarrays. The duck-typed objects cannot be namedtuples because those are
- treated as standard Python containers. See the example below.
+ accessed, one can use :class:`jax.ShapeDtypeStruct` or another container
+ that duck-types as ndarrays (note however that duck-typed objects cannot
+ be namedtuples because those are treated as standard Python containers).
**kwargs: a keyword argument dict of arrays, scalars, or (nested) standard
Python containers (pytrees) of those types. As in ``args``, array values
need only be duck-typed to have ``shape`` and ``dtype`` attributes.
+ Returns:
+ out: a nested PyTree containing :class:`jax.ShapeDtypeStruct` objects as leaves.
+
For example:
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> f = lambda A, x: jnp.tanh(jnp.dot(A, x))
- >>> class MyArgArray(object):
- ... def __init__(self, shape, dtype):
- ... self.shape = shape
- ... self.dtype = jnp.dtype(dtype)
- ...
- >>> A = MyArgArray((2000, 3000), jnp.float32)
- >>> x = MyArgArray((3000, 1000), jnp.float32)
+ >>> A = jax.ShapeDtypeStruct((2000, 3000), jnp.float32)
+ >>> x = jax.ShapeDtypeStruct((3000, 1000), jnp.float32)
>>> out = jax.eval_shape(f, A, x) # no FLOPs performed
>>> print(out.shape)
(2000, 1000)
| jax.ShapeDtypeStruct isn't documented
It is helpful for AOT compilation!
| We should also probably update the documentation of [`jax.eval_shape`](https://jax.readthedocs.io/en/latest/_autosummary/jax.eval_shape.html) to make use of `jax.ShapeDtypeStruct` rather than suggesting that users define a similar structure themselves.
We use `types.SimpleNamespace` to a similar end as an example throughout our reference documentation, e.g. [here](https://github.com/google/jax/blob/d5f8fd3d851acca570cc588ee4a8ef833a9fe7c1/jax/_src/api.py#L2338).
On the one hand, this isn't `ShapeDtypeStruct`. On the other hand, it helps emphasize that anything that duck-types is meant to work. Should we keep those examples as is, or give up on emphasizing the duck-typing point as part of the docstring example, and switch it over to `ShapeDtypeStruct` as well?
We should either document `ShapeDtypeStruct` or remove it from the API.
If we expect users to use duck types, we should describe clearly what attributes the duck types are expected to have.
Do you mean to state those as mutually exclusive? It seems at least possible to have SDS, document it, allow for anything that duck types like SDS on the input side, and always output SDS. With that, it might make sense to have the SDS symbol in the API for type annotations. (?)
We could entertain a protocol for annotations, but by the time we've written a class to define the protocol, it seems easy to fill it out into a concrete class for optional use too?
I didn't mean they were exclusive. I just mean without documentation, it's hard to work out what attributes matter, and maybe the best form of that documentation is a class like SDS.
I agree with @hawkinsp – using `ShapeDtypeStruct` in the docs is helpful in that it's self-documenenting. We could also mention that any duck-typed object will work. but defining a struct in the example makes it look more complicated than it needs to be.
Yeah, great, I agree! | 2023-03-21T19:43:14 |
|
google/jax | 15,140 | google__jax-15140 | [
"15083"
]
| dd2ecf4bb5a2264959833b65a3f1ae7cb81a486d | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -552,7 +552,14 @@ def choice(key: KeyArray,
def normal(key: KeyArray,
shape: Union[Shape, NamedShape] = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample standard normal random values with given shape and float dtype.
+ r"""Sample standard normal random values with given shape and float dtype.
+
+ The values are returned according to the probability density function:
+
+ .. math::
+ f(x) = \frac{1}{\sqrt{2\pi}}e^{-x^2/2}
+
+ on the domain :math:`-\infty < x < \infty`
Args:
key: a PRNG key used as the random key.
@@ -600,7 +607,15 @@ def multivariate_normal(key: KeyArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = None,
method: str = 'cholesky') -> Array:
- """Sample multivariate normal random values with given mean and covariance.
+ r"""Sample multivariate normal random values with given mean and covariance.
+
+ The values are returned according to the probability density function:
+
+ .. math::
+ f(x;\mu, \Sigma) = (2\pi)^{-k/2} \det(\Sigma)^{-1}e^{-\frac{1}{2}(x - \mu)^T \Sigma^{-1} (x - \mu)}
+
+ where :math:`k` is the dimension, :math:`\mu` is the mean (given by ``mean``) and
+ :math:`\Sigma` is the covariance matrix (given by ``cov``).
Args:
key: a PRNG key used as the random key.
@@ -673,7 +688,14 @@ def truncated_normal(key: KeyArray,
upper: RealArray,
shape: Optional[Union[Shape, NamedShape]] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample truncated standard normal random values with given shape and dtype.
+ r"""Sample truncated standard normal random values with given shape and dtype.
+
+ The values are returned according to the probability density function:
+
+ .. math::
+ f(x) \propto e^{-x^2/2}
+
+ on the domain :math:`\rm{lower} < x < \rm{upper}`.
Args:
key: a PRNG key used as the random key.
@@ -729,7 +751,14 @@ def _truncated_normal(key, lower, upper, shape, dtype) -> Array:
def bernoulli(key: KeyArray,
p: RealArray = np.float32(0.5),
shape: Optional[Union[Shape, NamedShape]] = None) -> Array:
- """Sample Bernoulli random values with given shape and mean.
+ r"""Sample Bernoulli random values with given shape and mean.
+
+ The values are distributed according to the probability mass function:
+
+ .. math::
+ f(k; p) = p^k(1 - p)^{1 - k}
+
+ where :math:`k \in \{0, 1\}` and :math:`0 \le p \le 1`.
Args:
key: a PRNG key used as the random key.
@@ -769,7 +798,14 @@ def beta(key: KeyArray,
b: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Beta random values with given shape and float dtype.
+ r"""Sample Beta random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x;a,b) \propto x^{a - 1}(1 - x)^{b - 1}
+
+ on the domain :math:`0 \le x \le 1`.
Args:
key: a PRNG key used as the random key.
@@ -820,7 +856,14 @@ def _beta(key, a, b, shape, dtype) -> Array:
def cauchy(key: KeyArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Cauchy random values with given shape and float dtype.
+ r"""Sample Cauchy random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x) \propto \frac{1}{x^2 + 1}
+
+ on the domain :math:`-\infty < x < \infty`
Args:
key: a PRNG key used as the random key.
@@ -852,7 +895,19 @@ def dirichlet(key: KeyArray,
alpha: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Dirichlet random values with given shape and float dtype.
+ r"""Sample Dirichlet random values with given shape and float dtype.
+
+ The values are distributed according the the probability density function:
+
+ .. math::
+ f(\{x_i\}; \{\alpha_i\}) = \propto \prod_{i=1}^k x_i^{\alpha_i}
+
+ Where :math:`k` is the dimension, and :math:`\{x_i\}` satisfies
+
+ .. math::
+ \sum_{i=1}^k x_i = 1
+
+ and :math:`0 \le x_i \le 1` for all :math:`x_i`.
Args:
key: a PRNG key used as the random key.
@@ -910,7 +965,14 @@ def _softmax(x, axis) -> Array:
def exponential(key: KeyArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Exponential random values with given shape and float dtype.
+ r"""Sample Exponential random values with given shape and float dtype.
+
+ The values are distributed according the the probability density function:
+
+ .. math::
+ f(x) = e^{-x}
+
+ on the domain :math:`0 \le x < \infty`.
Args:
key: a PRNG key used as the random key.
@@ -1074,9 +1136,16 @@ def gamma(key: KeyArray,
a: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Gamma random values with given shape and float dtype.
+ r"""Sample Gamma random values with given shape and float dtype.
+
+ The values are distributed according the the probability density function:
- This implements the standard gamma density, with a unit scale/rate parameter.
+ .. math::
+ f(x;a) \propto x^{a - 1} e^{-x}
+
+ on the domain :math:`0 \le x < \infty`, with :math:`a > 0`.
+
+ This is the standard gamma density, with a unit scale/rate parameter.
Dividing the sample output by the rate is equivalent to sampling from
*gamma(a, rate)*, and multiplying the sample output by the scale is equivalent
to sampling from *gamma(a, scale)*.
@@ -1254,7 +1323,14 @@ def poisson(key: KeyArray,
lam: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeInt = dtypes.int_) -> Array:
- """Sample Poisson random values with given shape and integer dtype.
+ r"""Sample Poisson random values with given shape and integer dtype.
+
+ The values are distributed according to the probability mass function:
+
+ .. math::
+ f(k; \lambda) = \frac{\lambda^k e^{-\lambda}}{k!}
+
+ Where `k` is a non-negative integer and :math:`\lambda > 0`.
Args:
key: a PRNG key used as the random key.
@@ -1291,6 +1367,11 @@ def gumbel(key: KeyArray,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
"""Sample Gumbel random values with given shape and float dtype.
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x) = e^{-(x + e^{-x})}
+
Args:
key: a PRNG key used as the random key.
shape: optional, a tuple of nonnegative integers representing the result
@@ -1361,7 +1442,12 @@ def categorical(key: KeyArray,
def laplace(key: KeyArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Laplace random values with given shape and float dtype.
+ r"""Sample Laplace random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x) = \frac{1}{2}e^{-|x|}
Args:
key: a PRNG key used as the random key.
@@ -1392,7 +1478,12 @@ def _laplace(key, shape, dtype) -> Array:
def logistic(key: KeyArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample logistic random values with given shape and float dtype.
+ r"""Sample logistic random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x) = \frac{e^{-x}}{(1 + e^{-x})^2}
Args:
key: a PRNG key used as the random key.
@@ -1423,7 +1514,14 @@ def pareto(key: KeyArray,
b: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Pareto random values with given shape and float dtype.
+ r"""Sample Pareto random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x; b) = b / x^{b + 1}
+
+ on the domain :math:`0 \le x < \infty` with :math:`b > 0`
Args:
key: a PRNG key used as the random key.
@@ -1464,12 +1562,19 @@ def t(key: KeyArray,
df: RealArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Student's t random values with given shape and float dtype.
+ r"""Sample Student's t random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(t; \nu) \propto \left(1 + \frac{t^2}{\nu}\right)^{-(\nu + 1)/2}
+
+ Where :math:`\nu > 0` is the degrees of freedom, given by the parameter ``df``.
Args:
key: a PRNG key used as the random key.
df: a float or array of floats broadcast-compatible with ``shape``
- representing the parameter of the distribution.
+ representing the degrees of freedom parameter of the distribution.
shape: optional, a tuple of nonnegative integers specifying the result
shape. Must be broadcast-compatible with ``df``. The default (None)
produces a result shape equal to ``df.shape``.
@@ -1508,7 +1613,15 @@ def chisquare(key: KeyArray,
df: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Chisquare random values with given shape and float dtype.
+ r"""Sample Chisquare random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x; \nu) \propto x^{k/2 - 1}e^{-x/2}
+
+ on the domain :math:`0 < x < \infty`, where :math:`\nu > 0` represents the
+ degrees of freedom, given by the parameter ``df``.
Args:
key: a PRNG key used as the random key.
@@ -1552,7 +1665,17 @@ def f(key: KeyArray,
dfden: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample F-distribution random values with given shape and float dtype.
+ r"""Sample F-distribution random values with given shape and float dtype.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x; \nu) \propto x^{\nu_1/2 - 1}\left(1 + \frac{\nu_1}{\nu_2}x\right)^{
+ -(\nu_1 + \nu_2) / 2}
+
+ on the domain :math:`0 < x < \infty`. Here :math:`\nu_1` is the degrees of
+ freedom of the numerator (``dfnum``), and :math:`\nu_2` is the degrees of
+ freedom of the denominator (``dfden``).
Args:
key: a PRNG key used as the random key.
@@ -1603,7 +1726,14 @@ def _f(key, dfnum, dfden, shape, dtype) -> Array:
def rademacher(key: KeyArray,
shape: Shape,
dtype: DTypeLikeInt = dtypes.int_) -> Array:
- """Sample from a Rademacher distribution.
+ r"""Sample from a Rademacher distribution.
+
+ The values are distributed according to the probability mass function:
+
+ .. math::
+ f(k) = \frac{1}{2}(\delta(k - 1) + \delta(k + 1))
+
+ on the domain :math:`k \in \{-1, 1}`, where `\delta(x)` is the dirac delta function.
Args:
key: a PRNG key.
@@ -1630,9 +1760,14 @@ def _rademacher(key, shape, dtype) -> Array:
def maxwell(key: KeyArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample from a one sided Maxwell distribution.
+ r"""Sample from a one sided Maxwell distribution.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x) \propto x^2 e^{-x^2 / 2}
- The scipy counterpart is `scipy.stats.maxwell`.
+ on the domain :math:`0 \le x < \infty`.
Args:
key: a PRNG key.
@@ -1666,10 +1801,15 @@ def double_sided_maxwell(key: KeyArray,
scale: RealArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample from a double sided Maxwell distribution.
+ r"""Sample from a double sided Maxwell distribution.
+
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x;\mu,\sigma) \propto z^2 e^{-z^2 / 2}
- Samples using:
- loc + scale* sgn(U-0.5)* one_sided_maxwell U~Unif;
+ where :math:`z = (x - \mu) / \sigma`, with the center :math:`\mu` specified by
+ ``loc`` and the scale :math:`\sigma` specified by ``scale``.
Args:
key: a PRNG key.
@@ -1712,9 +1852,15 @@ def weibull_min(key: KeyArray,
concentration: RealArray,
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample from a Weibull distribution.
+ r"""Sample from a Weibull distribution.
- The scipy counterpart is `scipy.stats.weibull_min`.
+ The values are distributed according to the probability density function:
+
+ .. math::
+ f(x;\sigma,c) \propto x^{c - 1} \exp(-(x / \sigma)^c)
+
+ on the domain :math:`0 < x < \infty`, where :math:`c > 0` is the concentration
+ parameter, and :math:`\sigma > 0` is the scale parameter.
Args:
key: a PRNG key.
@@ -1788,7 +1934,15 @@ def generalized_normal(
shape: Shape = (),
dtype: DTypeLikeFloat = dtypes.float_
) -> Array:
- """Sample from the generalized normal distribution.
+ r"""Sample from the generalized normal distribution.
+
+ The values are returned according to the probability density function:
+
+ .. math::
+ f(x;p) \propto e^{-|x|^p}
+
+ on the domain :math:`-\infty < x < \infty`, where :math:`p > 0` is the
+ shape parameter.
Args:
key: a PRNG key used as the random key.
@@ -1842,7 +1996,15 @@ def rayleigh(key: KeyArray,
scale: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Rayleigh random values with given shape and float dtype.
+ r"""Sample Rayleigh random values with given shape and float dtype.
+
+ The values are returned according to the probability density function:
+
+ .. math::
+ f(x;\sigma) \propto xe^{-x^2/(2\sigma^2)}
+
+ on the domain :math:`-\infty < x < \infty`, and where `\sigma > 0` is the scale
+ parameter of the distribution.
Args:
key: a PRNG key used as the random key.
@@ -1885,7 +2047,16 @@ def wald(key: KeyArray,
mean: RealArray,
shape: Optional[Shape] = None,
dtype: DTypeLikeFloat = dtypes.float_) -> Array:
- """Sample Wald random values with given shape and float dtype.
+ r"""Sample Wald random values with given shape and float dtype.
+
+ The values are returned according to the probability density function:
+
+ .. math::
+ f(x;\mu) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{(x - \mu)^2}{2\mu^2 x}\right)
+
+ on the domain :math:`-\infty < x < \infty`, and where :math:`\mu > 0` is the location
+ parameter of the distribution.
+
Args:
key: a PRNG key used as the random key.
| random.* docs are not specific enough about probability density function
### Description
jax.random.normal and other jax.random.* should specify the actual probability density function (see numpy.random.normal for an example). Otherwise there is the potential for confusion (see e.g. https://math.stackexchange.com/questions/1013575/continuous-probability-average-power-of-a-gaussian-random-variable-mathcaln)
For the doc fix, one possibility would be to put the PDF equation inline in the docs, as numpy.random.normal does. Another possibility would be to state that the PDF is the same as some numpy routine.
For jax.random.normal, that would be numpy.random.normal(loc=0.0, scale=1.0).
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks, this is a great suggestion. | 2023-03-22T17:57:56 |
|
google/jax | 15,192 | google__jax-15192 | [
"15190"
]
| 4cb3b011a0a7ba9d47049544d51da09dec48db72 | diff --git a/jax/_src/numpy/array_methods.py b/jax/_src/numpy/array_methods.py
--- a/jax/_src/numpy/array_methods.py
+++ b/jax/_src/numpy/array_methods.py
@@ -725,7 +725,7 @@ def max(self, values, *, indices_are_sorted=False, unique_indices=False,
"ravel": lax_numpy.ravel,
"repeat": lax_numpy.repeat,
"reshape": _reshape,
- "round": round,
+ "round": lax_numpy.round,
"searchsorted": lax_numpy.searchsorted,
"sort": lax_numpy.sort,
"squeeze": lax_numpy.squeeze,
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -858,6 +858,10 @@ def testOperatorRound(self, jit):
jround(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
+ def testRoundMethod(self):
+ # https://github.com/google/jax/issues/15190
+ (jnp.arange(3.) / 5.).round() # doesn't crash
+
@jtu.sample_product(shape=[(5,), (5, 2)])
def testOperatorReversed(self, shape):
rng = jtu.rand_default(self.rng())
| ndarray.round() stopped working without an argument.
### Description
Numpy `round()` as a ndarray member function used to work without an argument but it is no longer working.
```python
_ = jnp.zeros(1).round()
```
Expected to execute without an error, but fails with error message
```
TypeError: round() missing required argument 'number' (pos 1)
```
According to Numpy documentation, this should work without an argument.
https://numpy.org/doc/stable/reference/generated/numpy.ndarray.round.html
### What jax/jaxlib version are you using?
head
### Which accelerator(s) are you using?
cpu
### Additional system info
python 3.10, linux
### NVIDIA GPU info
_No response_
| Probably related to https://github.com/google/jax/pull/15172 | 2023-03-24T03:16:55 |
google/jax | 15,206 | google__jax-15206 | [
"15195"
]
| 670fba3a9191f4311ba550e3c2e27c5fca2831e6 | diff --git a/jax/_src/scipy/linalg.py b/jax/_src/scipy/linalg.py
--- a/jax/_src/scipy/linalg.py
+++ b/jax/_src/scipy/linalg.py
@@ -433,6 +433,16 @@ def triu(m: ArrayLike, k: int = 0) -> Array:
@_wraps(scipy.linalg.expm, lax_description=_expm_description)
@partial(jit, static_argnames=('upper_triangular', 'max_squarings'))
def expm(A: ArrayLike, *, upper_triangular: bool = False, max_squarings: int = 16) -> Array:
+ A, = promote_dtypes_inexact(A)
+
+ if A.ndim < 2 or A.shape[-1] != A.shape[-2]:
+ raise ValueError(f"Expected A to be a (batched) square matrix, got {A.shape=}.")
+
+ if A.ndim > 2:
+ return jnp.vectorize(
+ partial(expm, upper_triangular=upper_triangular, max_squarings=max_squarings),
+ signature="(n,n)->(n,n)")(A)
+
P, Q, n_squarings = _calc_P_Q(A)
def _nan(args):
| diff --git a/tests/linalg_test.py b/tests/linalg_test.py
--- a/tests/linalg_test.py
+++ b/tests/linalg_test.py
@@ -29,12 +29,15 @@
from jax import lax
from jax import numpy as jnp
from jax import scipy as jsp
+from jax._src.numpy.util import promote_dtypes_inexact
from jax._src import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
+scipy_version = tuple(map(int, scipy.version.version.split('.')[:3]))
+
T = lambda x: np.swapaxes(x, -1, -2)
@@ -1240,18 +1243,21 @@ def testTriangularSolveGradPrecision(self):
@jtu.sample_product(
n=[1, 4, 5, 20, 50, 100],
- dtype=float_types + complex_types,
+ batch_size=[(), (2,), (3, 4)] if scipy_version >= (1, 9, 0) else [()],
+ dtype=int_types + float_types + complex_types
)
- def testExpm(self, n, dtype):
+ def testExpm(self, n, batch_size, dtype):
rng = jtu.rand_small(self.rng())
- args_maker = lambda: [rng((n, n), dtype)]
+ args_maker = lambda: [rng((*batch_size, n, n), dtype)]
- osp_fun = lambda a: osp.linalg.expm(a)
- jsp_fun = lambda a: jsp.linalg.expm(a)
+ # Compare to numpy with JAX type promotion semantics.
+ def osp_fun(A):
+ return osp.linalg.expm(np.array(*promote_dtypes_inexact(A)))
+ jsp_fun = jsp.linalg.expm
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker)
self._CompileAndCheck(jsp_fun, args_maker)
- args_maker_triu = lambda: [np.triu(rng((n, n), dtype))]
+ args_maker_triu = lambda: [np.triu(rng((*batch_size, n, n), dtype))]
jsp_fun_triu = lambda a: jsp.linalg.expm(a, upper_triangular=True)
self._CheckAgainstNumpy(osp_fun, jsp_fun_triu, args_maker_triu)
self._CompileAndCheck(jsp_fun_triu, args_maker_triu)
| expm does not support ndarray
### Description
Hi, I tried to use jax.scipy.linalg.expm with 3 dimensional array but got error.
Docs says it's doable when last 2 dimensions have same length but failed. Can you please check?
https://jax.readthedocs.io/en/latest/_autosummary/jax.scipy.linalg.expm.html
```python
import jax
import jax.numpy as jnp
A = jnp.arange(2*11*11).reshape((2,11,11))
jax.scipy.linalg.expm(A)
```
> Traceback (most recent call last):
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 3378, in run_code
> exec(code_obj, self.user_global_ns, self.user_ns)
> File "<ipython-input-5-d2ac0f4d7ead>", line 1, in <module>
> jax.scipy.linalg.expm(A)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
> return fun(*args, **kwargs)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/api.py", line 622, in cache_miss
> execute = dispatch._xla_call_impl_lazy(fun_, *tracers, **params)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/dispatch.py", line 241, in _xla_call_impl_lazy
> return xla_callable(fun, device, backend, name, donated_invars, keep_unused,
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/linear_util.py", line 303, in memoized_fun
> ans = call(fun, *args)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/dispatch.py", line 357, in _xla_callable_uncached
> computation = sharded_lowering(fun, device, backend, name, donated_invars,
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/dispatch.py", line 348, in sharded_lowering
> return pxla.lower_sharding_computation(
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/profiler.py", line 314, in wrapper
> return func(*args, **kwargs)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/interpreters/pxla.py", line 2792, in lower_sharding_computation
> jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_final(
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/profiler.py", line 314, in wrapper
> return func(*args, **kwargs)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/interpreters/partial_eval.py", line 2065, in trace_to_jaxpr_final
> jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/interpreters/partial_eval.py", line 1998, in trace_to_subjaxpr_dynamic
> ans = fun.call_wrapped(*in_tracers_)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/linear_util.py", line 167, in call_wrapped
> ans = self.f(*args, **dict(self.params, **kwargs))
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 436, in expm
> P, Q, n_squarings = _calc_P_Q(A)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
> return fun(*args, **kwargs)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/api.py", line 626, in cache_miss
> top_trace.process_call(primitive, fun_, tracers, params))
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/interpreters/partial_eval.py", line 1739, in process_call
> jaxpr, out_type, consts = trace_to_subjaxpr_dynamic2(f, self.main, debug_info=dbg)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/interpreters/partial_eval.py", line 2027, in trace_to_subjaxpr_dynamic2
> ans = fun.call_wrapped(*in_tracers_)
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/linear_util.py", line 167, in call_wrapped
> ans = self.f(*args, **dict(self.params, **kwargs))
> File "/Users/yongha/miniforge3_m1/envs/rcwa/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 455, in _calc_P_Q
> raise ValueError('expected A to be a square matrix')
> ValueError: expected A to be a square matrix
### What jax/jaxlib version are you using?
jax 0.4.1, jaxlib 0.4.1
### Which accelerator(s) are you using?
CPU
### Additional system info
M1
### NVIDIA GPU info
_No response_
| Thanks for the report, that's definitely a bug!
Until we get it fixed, you can just batch it with `jax.vmap`, as in `jax.vmap(jax.scipy.linalg.expm)(A)` (though also it doesn't accept int32, which seems to be another bug, so you'll need to explicitly cast like `jax.vmap(jax.scipy.linalg.expm)(A.astype('float32'))`).
I found via test failures that batched expm was only added in recent scipy versions, which explains why it wasn't part of the original JAX implementation 😁 (https://github.com/scipy/scipy/pull/15079)
I made some tests with `expm` and the result seems slower than I expect.
For a 2D array `x`,
```python
def expm(x):
# option 1
res = jnp.diag(jnp.exp(jnp.diag(x)))
# option 2
# TODO: vmap is very slow
res = jax.vmap(jax.scipy.linalg.expm)(x[None, :, :])
res = res[0]
# option 3
res = jax.scipy.linalg.expm(x)
return res
```
This code was used in my project (Electromagnetic simulation) with jit option
option 1:
time at 1st run: 7.054819107055664
time at 2nd run: 0.008198022842407227
option 2:
time at 1st run: 106.56755089759827
time at 2nd run: 0.9272429943084717
option 3:
time at 1st run: 109.10750484466553
time at 2nd run: 0.758314847946167
I think it's slow even though I used `vmap` for a single operation.
For 3D array case,
```python
def expm_new(x):
# option 1
res = jnp.zeros(x.shape, dtype=jnp.complex128)
ix = jnp.diag_indices_from(x[0])
res = res.at[:, ix[0], ix[1]].set(jnp.exp(x[:, ix[0], ix[1]]))
# option 2
# res0 = jax.vmap(jax.scipy.linalg.expm)(x) # TODO: remove vmap when the bug is fixed
return res
```
option 1
time at 1st run: 0.32639575004577637
time at 2nd run: 0.00822305679321289
option 2
time at 1st run: 4.233128070831299
time at 2nd run: 0.92429518699646
Will it be improved or better to keep mine?
Thanks for the help!
If you're running microbenchmarks of JAX code, be sure to follow the advice at https://jax.readthedocs.io/en/latest/faq.html#benchmarking-jax-code (thinking about JIT compilation time, device transfer cost, asynchronous dispatch, etc.).
For example, running on a Colab CPU, I get these results (on GPU JAX is significantly faster, but it's hardly a fair comparison):
```python
import numpy as np
import scipy.linalg
import jax
import jax.numpy as jnp
from jax.scipy import linalg as jsp_linalg
# 2D Case
x_np = np.random.randn(200,200).astype('float32')
x_jnp = jnp.asarray(x_np)
jit_expm = jax.jit(jsp_linalg.expm)
_ = jit_expm(x_jnp) # trigger compilation
%timeit scipy.linalg.expm(x_np)
# 15.3 ms ± 4.93 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
%timeit jit_expm(x_jnp).block_until_ready()
# 6.74 ms ± 1.05 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
# 3D Case
y_np = np.random.randn(3, 200,200).astype('float32')
y_jnp = jnp.asarray(y_np)
jit_vmap_expm = jax.jit(jax.vmap(jsp_linalg.expm))
_ = jit_vmap_expm(y_jnp) # trigger compilation
%timeit scipy.linalg.expm(y_np)
# 56 ms ± 24.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
%timeit jit_vmap_expm(y_jnp).block_until_ready()
# 53.8 ms ± 6.11 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
Note that this is a situation (what amounts to basically a single LAPACK operation run on CPU) where I wouldn't necessarily expect JAX to be faster than SciPy; see [FAQ: is JAX faster than NumPy?](https://jax.readthedocs.io/en/latest/faq.html#is-jax-faster-than-numpy)
Thanks for the kind remind! I checked again but the same result. I was able to see the final calculation result in that time.
The process I used is bit more complex.
```python
@partial(jax.jit, static_argnums=(5, 6, 10, 11))
def field_dist_2d_vectorized_kji(wavelength, kx_vector, n_I, theta, phi, fourier_order_x, fourier_order_y, T1, layer_info_list, period,
resolution=(10, 10, 10), type_complex=jnp.complex128):
k0 = 2 * jnp.pi / wavelength
fourier_indices_y = jnp.arange(-fourier_order_y, fourier_order_y + 1)
ff_x = fourier_order_x * 2 + 1
ff_y = fourier_order_y * 2 + 1
ky_vector = k0 * (n_I * jnp.sin(theta) * jnp.sin(phi) + fourier_indices_y * (
wavelength / period[1])).astype(type_complex)
Kx = jnp.diag(jnp.tile(kx_vector, ff_y).flatten()) / k0
Ky = jnp.diag(jnp.tile(ky_vector.reshape((-1, 1)), ff_x).flatten()) / k0
resolution_z, resolution_y, resolution_x = resolution
field_cell = jnp.zeros((resolution_z * len(layer_info_list), resolution_y, resolution_x, 6), dtype=type_complex)
T_layer = T1
big_I = jnp.eye((len(T1))).astype(type_complex)
# From the first layer
for idx_layer, (E_conv_i, q, W_11, W_12, W_21, W_22, V_11, V_12, V_21, V_22, big_X, big_A_i, big_B, d)\
in enumerate(layer_info_list[::-1]):
c = jnp.block([[big_I], [big_B @ big_A_i @ big_X]]) @ T_layer
z_1d = jnp.arange(resolution_z).reshape((-1, 1, 1)) / resolution_z * d
ff = len(c) // 4
c1_plus = c[0 * ff:1 * ff]
c2_plus = c[1 * ff:2 * ff]
c1_minus = c[2 * ff:3 * ff]
c2_minus = c[3 * ff:4 * ff]
q1 = q[:len(q) // 2]
q2 = q[len(q) // 2:]
big_Q1 = jnp.diag(q1)
big_Q2 = jnp.diag(q2)
Sx = W_11 @ (expm_new(-k0 * big_Q1 * z_1d) @ c1_plus + expm_new(k0 * big_Q1 * (z_1d - d)) @ c1_minus) \
+ W_12 @ (expm_new(-k0 * big_Q2 * z_1d) @ c2_plus + expm_new(k0 * big_Q2 * (z_1d - d)) @ c2_minus)
Sy = W_21 @ (expm_new(-k0 * big_Q1 * z_1d) @ c1_plus + expm_new(k0 * big_Q1 * (z_1d - d)) @ c1_minus) \
+ W_22 @ (expm_new(-k0 * big_Q2 * z_1d) @ c2_plus + expm_new(k0 * big_Q2 * (z_1d - d)) @ c2_minus)
Ux = V_11 @ (-expm_new(-k0 * big_Q1 * z_1d) @ c1_plus + expm_new(k0 * big_Q1 * (z_1d - d)) @ c1_minus) \
+ V_12 @ (-expm_new(-k0 * big_Q2 * z_1d) @ c2_plus + expm_new(k0 * big_Q2 * (z_1d - d)) @ c2_minus)
Uy = V_21 @ (-expm_new(-k0 * big_Q1 * z_1d) @ c1_plus + expm_new(k0 * big_Q1 * (z_1d - d)) @ c1_minus) \
+ V_22 @ (-expm_new(-k0 * big_Q2 * z_1d) @ c2_plus + expm_new(k0 * big_Q2 * (z_1d - d)) @ c2_minus)
Sz = -1j * E_conv_i @ (Kx @ Uy - Ky @ Ux)
Uz = -1j * (Kx @ Sy - Ky @ Sx)
x_1d = jnp.arange(resolution_x).reshape((1, -1, 1))
y_1d = jnp.arange(resolution_y).reshape((-1, 1, 1))
x_1d = -1j * x_1d * period[0] / resolution_x
y_1d = -1j * y_1d * period[1] / resolution_y
x_2d = jnp.tile(x_1d, (resolution_y, 1, 1))
y_2d = jnp.tile(y_1d, (1, resolution_x, 1))
x_2d = x_2d * kx_vector
y_2d = y_2d * ky_vector
x_2d = x_2d.reshape((resolution_y, resolution_x, 1, len(kx_vector)))
y_2d = y_2d.reshape((resolution_y, resolution_x, len(ky_vector), 1))
exp_K = jnp.exp(x_2d) * jnp.exp(y_2d)
exp_K = exp_K.reshape((resolution_y, resolution_x, -1))
Ex = exp_K[:, :, None, :] @ Sx[:, None, None, :, :]
Ey = exp_K[:, :, None, :] @ Sy[:, None, None, :, :]
Ez = exp_K[:, :, None, :] @ Sz[:, None, None, :, :]
Hx = -1j * exp_K[:, :, None, :] @ Ux[:, None, None, :, :]
Hy = -1j * exp_K[:, :, None, :] @ Uy[:, None, None, :, :]
Hz = -1j * exp_K[:, :, None, :] @ Uz[:, None, None, :, :]
val = jnp.concatenate(
(Ex.squeeze(-1), Ey.squeeze(-1), Ez.squeeze(-1), Hx.squeeze(-1), Hy.squeeze(-1), Hz.squeeze(-1)),
axis=-1)
field_cell = field_cell.at[resolution_z * idx_layer:resolution_z * (idx_layer + 1)].set(val)
T_layer = big_A_i @ big_X @ T_layer
return field_cell
```
There is extensive matrix operation so I think this can be a reason that differs from your test result. Could be a compilation optimization thing?
Full code is uploaded here
https://github.com/kc-ml2/meent/blob/DEV_JAX_expm/benchmarks/jax_expm.py
and expm option is here
https://github.com/kc-ml2/meent/blob/DEV_JAX_expm/meent/on_jax/emsolver/field_distribution.py
```python
def expm(x):
# option 1
res = jnp.diag(jnp.exp(jnp.diag(x)))
# option 2
# res = jax.vmap(jax.scipy.linalg.expm)(x[None, :, :]) # vmap is very slow
# res = res[0]
# option 3
# res = jax.scipy.linalg.expm(x)
return res
def expm_new(x):
# option 1
res = jnp.zeros(x.shape, dtype=jnp.complex128)
ix = jnp.diag_indices_from(x[0])
res = res.at[:, ix[0], ix[1]].set(jnp.exp(x[:, ix[0], ix[1]]))
# option 2
# res = jax.vmap(jax.scipy.linalg.expm)(x) # remove vmap when the bug is fixed. But vmap is very slow.
return res
```
Thanks for the full example, but I'm unsure what your question is.
> Note that this is a situation (what amounts to basically a single LAPACK operation run on CPU) where I wouldn't necessarily expect JAX to be faster than SciPy; see [FAQ: is JAX faster than NumPy?](https://jax.readthedocs.io/en/latest/faq.html#is-jax-faster-than-numpy)
I just want to stress that option 1 (manual JAX expm) is much faster than option 2 (SciPy expm).
```python
def expm_new(x):
# option 1
res = jnp.zeros(x.shape, dtype=jnp.complex128)
ix = jnp.diag_indices_from(x[0])
res = res.at[:, ix[0], ix[1]].set(jnp.exp(x[:, ix[0], ix[1]]))
# option 2
# res0 = jax.vmap(jax.scipy.linalg.expm)(x) # TODO: remove vmap when the bug is fixed
return res
```
Thanks - it makes sense that in a jit-compiled sequence of operations in JAX would be faster than a similar sequence of operations in numpy/scipy.
Also, if you're directly comparing jax performance to scipy performance, differing dtypes might play a role. numpy/scipy default to float64 operations, while JAX will do everything in float32 unless you explicitly enable float64 (see [Sharp Bits: 64-bit Precision](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision)).
yep. That was the same condition - 64bit.
Thanks!
Please be careful of the fact that the `jax_enable_x64` config should be set at the start of the script, before running any other JAX code. This is discussed at the link I shared above.
OK. Thanks for the kind remind :)
But now I'm confused.
>Thanks - it makes sense that in a jit-compiled sequence of operations in JAX would be faster than a similar sequence of operations in numpy/scipy.
Then is it recommended to write manual JAX function rather using existing jax.numpy or jax.scipy in case of jit-compiled sequence?
Sorry, I think I'm confused about what you're asking. Which two operations are you comparing? I thought you were comparing `scipy.linalg.expm` to `jax.scipy.linalg.expm`. Is that not the case?
Yes I thought there was miscommunication :)
I compared JAX manual expm function and `jax.scipy.linalg.expm`.
```python
# option 1
res = jnp.zeros(x.shape, dtype=jnp.complex128)
ix = jnp.diag_indices_from(x[0])
res = res.at[:, ix[0], ix[1]].set(jnp.exp(x[:, ix[0], ix[1]]))
# option 2
res0 = jax.vmap(jax.scipy.linalg.expm)(x) # TODO: remove vmap when the bug is fixed
```
option 1 and 2 are `expm` function but option 1 is what I wrote and option 2 is from jax.scipy.
> option 1
> time at 1st run: 0.32639575004577637
> time at 2nd run: 0.00822305679321289
>
> option 2
> time at 1st run: 4.233128070831299
> time at 2nd run: 0.92429518699646
and option 1 is much faster.
I see, thanks for the clarification. Those are entirely different operations, The first is extracting the diagonal component of the matrix and computing its exponent. This is a relatively fast computation. The second computes the full expm of the matrix. This is a relatively costly computation.
These are entirely different operations, but they will agree if `x` happens to be a diagonal matrix. If you know *a priori* that the input is a diagonal matrix, you should use the faster approach. If you do not know this *a priori* and your intent is to compute the matrix exponent, then the faster approach will in general return the incorrect result.
Oh my... You are right. I misunderstood `expm`.
Really appreciate for your kindness. | 2023-03-24T18:31:34 |
google/jax | 15,323 | google__jax-15323 | [
"15320"
]
| c978df5dbbe5ac3d0c2cfc3a7388eca7710d1bc9 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -2036,6 +2036,7 @@ def _rayleigh(key, scale, shape, dtype) -> Array:
_check_shape("rayleigh", shape, np.shape(scale))
u = uniform(key, shape, dtype)
scale = scale.astype(dtype)
+ scale = jnp.broadcast_to(scale, shape)
log_u = lax.log(u)
n_two = _lax_const(scale, -2)
sqrt_u = lax.sqrt(lax.mul(log_u, n_two))
| Rayleigh sampling doesn't support broadcasting
### Description
```python
import jax.numpy as jnp
from jax.random import rayleigh, PRNGKey
key = PRNGKey(123)
sigma = jnp.ones(10)
rayleigh(key, sigma, (3, 10))
```
I'm happy to submit a PR for this.
| Thanks - the entire random module treats shape and dtype arguments inconsistently, and needs to be overhauled. I have some work in progress to do this more comprehensively (first PR is #15177 and addresses dtype handling). | 2023-03-30T20:39:12 |
|
google/jax | 15,397 | google__jax-15397 | [
"13769"
]
| ffa9d018d6ffc1318fc696cb56775ecdca91c147 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1154,7 +1154,8 @@ def broadcast_to(array: ArrayLike, shape: Shape) -> Array:
return util._broadcast_to(array, shape)
-def _split(op: str, ary: ArrayLike, indices_or_sections: Union[int, ArrayLike],
+def _split(op: str, ary: ArrayLike,
+ indices_or_sections: Union[int, Sequence[int], ArrayLike],
axis: int = 0) -> List[Array]:
util.check_arraylike(op, ary)
ary = asarray(ary)
@@ -1193,12 +1194,13 @@ def _split(op: str, ary: ArrayLike, indices_or_sections: Union[int, ArrayLike],
for start, end in zip(split_indices[:-1], split_indices[1:])]
@util._wraps(np.split, lax_description=_ARRAY_VIEW_DOC)
-def split(ary: ArrayLike, indices_or_sections: Union[int, ArrayLike], axis: int = 0) -> List[Array]:
+def split(ary: ArrayLike, indices_or_sections: Union[int, Sequence[int], ArrayLike],
+ axis: int = 0) -> List[Array]:
return _split("split", ary, indices_or_sections, axis=axis)
def _split_on_axis(op: str, axis: int) -> Callable[[ArrayLike, Union[int, ArrayLike]], List[Array]]:
@util._wraps(getattr(np, op), update_doc=False)
- def f(ary: ArrayLike, indices_or_sections: Union[int, ArrayLike]) -> List[Array]:
+ def f(ary: ArrayLike, indices_or_sections: Union[int, Sequence[int], ArrayLike]) -> List[Array]:
# for 1-D array, hsplit becomes vsplit
nonlocal axis
util.check_arraylike(op, ary)
@@ -1213,7 +1215,8 @@ def f(ary: ArrayLike, indices_or_sections: Union[int, ArrayLike]) -> List[Array]
dsplit = _split_on_axis("dsplit", axis=2)
@util._wraps(np.array_split)
-def array_split(ary: ArrayLike, indices_or_sections: Union[int, ArrayLike], axis: int = 0) -> List[Array]:
+def array_split(ary: ArrayLike, indices_or_sections: Union[int, Sequence[int], ArrayLike],
+ axis: int = 0) -> List[Array]:
return _split("array_split", ary, indices_or_sections, axis=axis)
@util._wraps(np.clip, skip_params=['out'])
| Improve jnp.split API
Sorry for the very uninformative title for this issue, couldn't think of a better one.
While reading the code for `split` in `jax/_src/numpy/lax_numpy.py`, I noticed the following block of code for the function `lax_numpy._split`:
https://github.com/google/jax/blob/2b716f292d814fdfadca43e108b5fedc122c0d2c/jax/_src/numpy/lax_numpy.py#L1178-L1193
Accounting to the type signature of `indices_or_sections`
https://github.com/google/jax/blob/2b716f292d814fdfadca43e108b5fedc122c0d2c/jax/_src/numpy/lax_numpy.py#L1172-L1173
We should be able to remove the first branch (i.e. `(tuple, list)`), in the spirit of #7737. Was about to send a pull request. But I also noticed that `indices_or_sections` is always concrete, and `Array`s are not hashable. Keeping the current type also prevents us from marking `indices_or_sections` as static in `jit`. One might argue removing the second branch(i.e. `(np.ndarray, ndarray)`) and make `indices_or_sections: int | Sequence[int]` made more sense, but that's a deviation from the original numpy api.
Not sure what's the best action to take here.
| I suspect it's the type annotations that are incorrect, not the implementation. The latter has much more test coverage than the former.
I would hesitate to apply #7737 here, as the argument must be static and so there's no performance footgun in passing a list instead of an array, and erroring on list/tuple input would break a lot of downstream code.
> I suspect it's the type annotations that are incorrect, not the implementation.
Totally agree. Can we error on array input and mark `split` as `static`?
Erroring on array input would be an API change that would require a deprecation cycle and also potentially a large number of fixes to downstream packages, and I can't think of any advantage that such a depracation would bring about (aside from one less conditional block in the impelmentation), so I lean toward no.
Should we at least relax the type annotation then? Feel free to close this issue.
> I can't think of any advantage that such a deprecation would bring about.
Even with the relax typing, use of `split` under `jit` still requires special treatment (other `jnp` functions will at least work under `jit` with correct `static_argnums`). Just pointing out the awkwardness, won't argue too strongly about it.
```python
a: Array
i: Array
def f(a, i):
...
r = split(a, i)
...
f(a, i) # type checks, works, let me try to jit it
jit(f)(a, i) # doesn't work, one could argue `i` should be marked as static
jit(f, static_argnums=(1,))(a, i) # still doesn't work
jit(f, static_argnums=(1,))(a, tuple(i)) # finally
```
Yeah, understood that this does not work with JIT, and I think that's handled about as well as it can be in the code (explicit concreteness checks with associated error messages). We could probably adjust the type annotation to make clear that sequences are supported - what do you think?
> I think that's handled about as well as it can be in the code
That's fair, and agreed. Let's just change the annotation.
As a general question, should one expect `jit` (with correct `static_argnums` marking, for naked `jit` are known to change strictness of arguments) to change the apparent ~strictness~ staticness of some arguments of `f` for `f` in `jax.numpy`?
Can you elaborate on what you mean by "apparent strictness"?
Ah, there was a typo, I meant staticness. Or, maybe "appeared un-staticness".
<details>
Staticness is enforced at runtime in two ways:
- E1: Trying to concretise, example: in native control-flow
- E2: Disallow certain types, example: `jit(jnp.sum, static_argnums=(1,))(a, axis=jnp.int_(0))` errors out
Inside `jit`, (E1) catch error related to other aspect of staticness, like the one in `jit(jnp.nonzero)(a)`, i.e. some extra information needed, but the argument itself remains 'dynamic'(no type conversion).
Outside of `jit`, one usually only have (E1), inside of `jit` (with correct staticness marking), both. Knowing (E2), supposedly, I could deduce staticness (or rather un-staticness), from type annotation. But it's 'appeared' for sometimes it is really only enforced by (1) at runtime:
```python
jnp.sum(a, axis=jnp.int_(0)) # runs, but type error
```
We could debug problem regarding staticness of a function `f` traversing following graph breadth-first, and would ideally back out before reaching state `d`:
```mermaid
flowchart TB
A["a. Use knowledge about it's functionality"]
B["b. Plug in some arrays and check for static type error (E2)"]
C["c. Wrap in jit and check for runtime error (E1)"]
D["d. Wrap the *outer* function with jit(static_argnums) and brute force"]
A-->B
A-->C
B-->D
subgraph jitted
C-->D
end
B-->C
```
For most functions in `jax.numpy`, following `a → b` is enough, and only occasionally, `a → c`. Noticeably, even if `b` checks out and `c` doesn't (`a → b → c`, think `jnp.nonzero`), the fix in the end doesn't involve type conversion which is inherently contradict to the knowledge we aquired at state `b`.
The relaxed annotation for `jnp.split` forces us to reach `d`, and the staticness one *can* infer from the type is different from the real one acquires at the end. One might as well conclude it's `jit` that changes the staticness of argument `indices_or_sections`.
Of course, trade-offs, ease-of-use, python-typing-weak, etc. So I don't stand my ground 100%. Just really want to try out the flowchart functionality. :)
<details/>
> As a general question, should one expect `jit` (with correct `static_argnums` marking, for naked `jit` are known to change strictness of arguments) to change the apparent ~strictness~ staticness of some arguments of `f` for `f` in `jax.numpy`?
I believe `jit` will always cause a function's arguments to be non-static unless they're explicitly marked static using `static_argnames` or `static_argnums`. We might imagine a world where JIT tries to infer whether arguments should be static or traced, but JAX's current JIT implementation does not work that way.
I ran into this issue. I want to pass a `list` of plain Python `int`s to a `jnp.split` operation inside a `jit`. (The list is constructed by a complex series of function calls.) If I pass the list directly, I get a mypy error:
```
error: Argument 2 to "split" has incompatible type "List[int]"; expected "Union[int, Union[Array, ndarray[Any, Any], bool_, number[Any], bool, int, float, complex]]" [arg-type]
```
If I apply `jnp.array` to the list, I get a runtime error:
```
jax._src.errors.ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(int64[])>with<DynamicJaxprTrace(level=6/0)>
in jax.numpy.split argument 1
```
What's the recommended solution to this? Is there any plan to relax the type annotation of `jnp.split` to allow `Sequence`s? For now, I'm using `typing.cast(jax.Array, indices)` as a workaround to satisfy mypy.
Try passing `np.array(indices)` instead: this will type-check as `ArrayLike` but remain static.
But I think the type annotation is wrong on `jnp.split`, it should be `indices_or_sections: Union[int, ArrayLike, Sequence[int]]` I think. The implementation explicitly handles `tuple` or `list` of ints.
@jakevdp I think so, too. I'm looking at https://github.com/google/jax/blob/3c1f3abba2c2e9b2506dc6bd19a132f60e2c320b/jax/_src/numpy/lax_numpy.py#L1163 | 2023-04-04T23:17:49 |
|
google/jax | 15,401 | google__jax-15401 | [
"15400"
]
| bf50551e0fed037c359f1feee863864d3ec7ef50 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -355,6 +355,7 @@ def complex(x: ArrayLike, y: ArrayLike) -> Array:
def conj(x: ArrayLike) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
+ # TODO(mattjj): remove input_dtype, not needed anymore
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: ArrayLike) -> Array:
@@ -1896,7 +1897,9 @@ def _conj_impl(x, **kw):
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
- if dtypes.issubdtype(input_dtype, np.complexfloating):
+ if type(t) is ad_util.Zero:
+ return [ad_util.Zero(x.aval)]
+ elif dtypes.issubdtype(input_dtype, np.complexfloating):
return [conj(t)]
else:
return [real(t)]
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -4214,6 +4214,12 @@ def outer_fn(x):
self.assertEqual(inner_count, 1)
self.assertEqual(outer_count, 1)
+ def test_grad_conj_symbolic_zeros(self):
+ # https://github.com/google/jax/issues/15400
+ f = lambda x: jax.jit(lambda x, y: (x, y))(x, jax.lax.conj(x))[0]
+ out = jax.grad(f)(3.0) # doesn't crash
+ self.assertAllClose(out, 1., check_dtypes=False)
+
class RematTest(jtu.JaxTestCase):
| TypeError: Cannot determine dtype of Zero(ShapedArray(complex128[]))
### Description
Hi,
My code has complex numbers and backprop.
I used `jax.value_and_grad()` function to get gradient but got TypeError
`TypeError: Cannot determine dtype of Zero(ShapedArray(complex128[121]))`
It came from `.conj()` function so I replaced it with manual conjugation and it worked.
So in summary,
1. jax.value_and_grad
1. jit, with `arr.conj()`: Type Error
2. jit, with `arr.real + arr.imag * -1j`: OK
3. no jit, with `arr.conj()` : OK
4. no jit, with `arr.real + arr.imag * -1j`: OK
Can you please take a look?
> /Users/yongha/miniconda3/envs/meent/bin/python /Users/yongha/project/rcwa/meent/QA/grad_complex.py
> Traceback (most recent call last):
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/dtypes.py", line 611, in dtype
> dt = np.result_type(x)
> File "<__array_function__ internals>", line 200, in result_type
> TypeError: Cannot interpret 'Zero(ShapedArray(complex128[121]))' as a data type
>
> The above exception was the direct cause of the following exception:
>
> Traceback (most recent call last):
> File "/Users/yongha/project/rcwa/meent/QA/grad_complex.py", line 66, in <module>
> grad = jmee.grad(pois, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 53, in grad
> _, grads = self._grad(params, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 48, in _grad
> loss_value, grads = jax.value_and_grad(forward_pass)(params, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 44, in forward_pass
> result = forward(**params)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/_base.py", line 217, in wrap
> res = func(*args, **kwargs)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/rcwa.py", line 196, in conv_solve
> de_ri, de_ti, layer_info_list, T1, kx_vector = self._conv_solve_jit()
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/rcwa.py", line 150, in _conv_solve_jit
> return self._conv_solve()
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/rcwa.py", line 145, in _conv_solve
> de_ri, de_ti, layer_info_list, T1, kx_vector = self._solve(self.wavelength, E_conv_all, o_E_conv_all)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/rcwa.py", line 114, in _solve
> de_ri, de_ti, layer_info_list, T1 = self.solve_2d(wavelength, e_conv_all, o_e_conv_all)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/_base.py", line 217, in wrap
> res = func(*args, **kwargs)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/_base.py", line 389, in solve_2d
> = transfer_2d_1(ff_x, ff_y, ff_xy, k0, self.n_I, self.n_II, self.kx_vector, self.period, fourier_indices_y,
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/transfer_method.py", line 272, in transfer_2d_1
> k_I_z = conj(k_I_z) # manual conjugate
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/emsolver/primitives.py", line 9, in conj
> return arr.conj()
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/numpy/ufuncs.py", line 599, in conjugate
> return lax.conj(x) if np.iscomplexobj(x) else _asarray(x)
> jax._src.source_info_util.JaxStackTraceBeforeTransformation: TypeError: Cannot determine dtype of Zero(ShapedArray(complex128[121]))
>
> The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.
>
> --------------------
>
> The above exception was the direct cause of the following exception:
>
> Traceback (most recent call last):
> File "/Users/yongha/project/rcwa/meent/QA/grad_complex.py", line 66, in <module>
> grad = jmee.grad(pois, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 53, in grad
> _, grads = self._grad(params, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 48, in _grad
> loss_value, grads = jax.value_and_grad(forward_pass)(params, forward, loss_fn)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
> return fun(*args, **kwargs)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/api.py", line 741, in value_and_grad_f
> g = vjp_py(lax_internal._one(ans))
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/tree_util.py", line 303, in __call__
> return self.fun(*args, **kw)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/api.py", line 2183, in _vjp_pullback_wrapper
> ans = fun(*args)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/tree_util.py", line 303, in __call__
> return self.fun(*args, **kw)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/interpreters/ad.py", line 146, in unbound_vjp
> arg_cts = backward_pass(jaxpr, reduce_axes, True, consts, dummy_args, cts)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/interpreters/ad.py", line 250, in backward_pass
> cts_out = reducing_transposes[eqn.primitive](
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/pjit.py", line 1740, in _pjit_transpose
> transpose_jaxpr = _pjit_transpose_trace(body, global_cts_in_avals)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/linear_util.py", line 322, in memoized_fun
> ans = call(fun, *args)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/pjit.py", line 1717, in _pjit_transpose_trace
> transpose_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(fun, in_avals)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/profiler.py", line 314, in wrapper
> return func(*args, **kwargs)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/interpreters/partial_eval.py", line 2049, in trace_to_jaxpr_dynamic
> jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/interpreters/partial_eval.py", line 2066, in trace_to_subjaxpr_dynamic
> ans = fun.call_wrapped(*in_tracers_)
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/linear_util.py", line 166, in call_wrapped
> ans = self.f(*args, **dict(self.params, **kwargs))
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/interpreters/ad.py", line 264, in closed_backward_pass
> return backward_pass(jaxpr.jaxpr, reduce_axes, transform_stack, jaxpr.consts,
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/interpreters/ad.py", line 253, in backward_pass
> cts_out = get_primitive_transpose(eqn.primitive)(
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/lax/lax.py", line 1895, in _conj_transpose_rule
> return [conj(t)]
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/lax/lax.py", line 350, in conj
> return conj_p.bind(x, input_dtype=_dtype(x))
> File "/Users/yongha/miniconda3/envs/meent/lib/python3.10/site-packages/jax/_src/dtypes.py", line 613, in dtype
> raise TypeError(f"Cannot determine dtype of {x}") from err
> jax._src.traceback_util.UnfilteredStackTrace: TypeError: Cannot determine dtype of Zero(ShapedArray(complex128[121]))
>
> The stack trace below excludes JAX-internal frames.
> The preceding is the original exception that occurred, unmodified.
>
> --------------------
>
> The above exception was the direct cause of the following exception:
>
> Traceback (most recent call last):
> File "/Users/yongha/project/rcwa/meent/QA/grad_complex.py", line 66, in <module>
> grad = jmee.grad(pois, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 53, in grad
> _, grads = self._grad(params, forward, loss_fn)
> File "/Users/yongha/project/rcwa/meent/meent/on_jax/optimizer/optimizer.py", line 48, in _grad
> loss_value, grads = jax.value_and_grad(forward_pass)(params, forward, loss_fn)
> TypeError: Cannot determine dtype of Zero(ShapedArray(complex128[121]))
>
> Process finished with exit code 1
>
### What jax/jaxlib version are you using?
jax 0.4.1, jaxlib 0.4.1
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.10, M1 Mac
### NVIDIA GPU info
_No response_
| Thanks for the report! Could you provide a runnable repro?
Actually, I think I see the issue.
That's great. I tried to make a runnable code but wasn't that easy.
So if you can clone my repo and run one file, it will be faster. It that's not an option, I'll try to make sample code.
Please let me know. | 2023-04-05T03:45:49 |
google/jax | 15,458 | google__jax-15458 | [
"15453"
]
| b15ebb1bc59caa22b1f9c483093dd045fa7797b7 | diff --git a/jax/debug.py b/jax/debug.py
--- a/jax/debug.py
+++ b/jax/debug.py
@@ -11,6 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+__all__ = ["callback", "print", "DebugEffect", "visualize_array_sharding",
+ "inspect_array_sharding", "visualize_sharding", "breakpoint"]
from jax._src.debugging import debug_callback as callback
from jax._src.debugging import debug_print as print
from jax._src.debugging import DebugEffect
| Add missing exports to jax.config and jax.debug
### Description
Add missing exports to [jax.config](https://github.com/google/jax/blob/main/jax/config.py) and [jax.debug](https://github.com/google/jax/blob/main/jax/debug.py) (and any other such files). See #7570 for context.
Example:
```python3
from jax.config import config
from jax.debug import print, callback, breakpoint
```
mypy yields
```
example.py:1: error: Module "jax.config" does not explicitly export attribute "config" [attr-defined]
example.py:2: error: Module "jax.debug" does not explicitly export attribute "print" [attr-defined]
example.py:2: error: Module "jax.debug" does not explicitly export attribute "callback" [attr-defined]
example.py:2: error: Module "jax.debug" does not explicitly export attribute "breakpoint" [attr-defined]
```
pyright yields
```
/Users/carlos/Desktop/example.py:1:24 - error: "config" is not exported from module "jax.config"
Import from "jax._src.config" instead (reportPrivateImportUsage)
/Users/carlos/Desktop/example.py:2:23 - error: "print" is not exported from module "jax.debug"
Import from "jax._src.debugging" instead (reportPrivateImportUsage)
/Users/carlos/Desktop/example.py:2:30 - error: "callback" is not exported from module "jax.debug"
Import from "jax._src.debugging" instead (reportPrivateImportUsage)
/Users/carlos/Desktop/example.py:2:40 - error: "breakpoint" is not exported from module "jax.debug"
Import from "jax._src.debugger.core" instead (reportPrivateImportUsage)
```
### What jax/jaxlib version are you using?
jax 0.4.8, jaxlib 0.4.7
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.11.2, macOS 11.7.4
### NVIDIA GPU info
_No response_
| > ```python
> from jax.config import config
> ```
I think this is not meant to be a supported import. This is the supported way to get the same thing:
```python
from jax import config
```
(yes, it's strange that we rewrite the `config` module with the `config` object: we need to clean that up at some point)
> ```python
> from jax.debug import print, callback, breakpoint
> ```
We should fix these. `breakpoint` is easy (we just need to add `as breakpoint`), but I'm not sure how to tell `mypy` that `callback` and `print` are meant to be exported, because they are renamed on import: https://github.com/google/jax/blob/87a1fea1c71b9b059b286c86bda4942cea766e3a/jax/debug.py#L14-L20
Do you know how to tell mypy that the renamed imports are meant as exported names? | 2023-04-07T14:26:28 |
|
google/jax | 15,479 | google__jax-15479 | [
"15471"
]
| c625a3e0cc0dafca65701f3dedee5f3d21a764fc | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3144,6 +3144,7 @@ def einsum(
out=None,
optimize="optimal",
precision=None,
+ preferred_element_type=None,
_use_xeinsum=False,
_dot_general=lax.dot_general,
):
@@ -3176,7 +3177,8 @@ def einsum(
_einsum_computation = jax.named_call(
_einsum, name=spec) if spec is not None else _einsum
- return _einsum_computation(operands, contractions, precision, _dot_general)
+ return _einsum_computation(operands, contractions, precision,
+ preferred_element_type, _dot_general)
# Enable other modules to override einsum_contact_path.
@@ -3201,11 +3203,12 @@ def _removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
-@partial(jit, static_argnums=(1, 2, 3))
+@partial(jit, static_argnums=(1, 2, 3, 4), inline=True)
def _einsum(
operands: Sequence,
contractions: Sequence[Tuple[Tuple[int, ...], FrozenSet[str], str]],
precision,
+ preferred_element_type,
_dot_general=lax.dot_general,
):
operands = list(util.promote_dtypes(*operands))
@@ -3320,11 +3323,13 @@ def filter_singleton_dims(operand, names, other_shape, other_names):
names = batch_names_str + remaining_rhs_names + remaining_lhs_names
if names == result_names:
dimension_numbers = ((rhs_cont, lhs_cont), (rhs_batch, lhs_batch))
- operand = _dot_general(rhs, lhs, dimension_numbers, precision)
+ operand = _dot_general(rhs, lhs, dimension_numbers, precision,
+ preferred_element_type=preferred_element_type)
else:
names = batch_names_str + remaining_lhs_names + remaining_rhs_names
dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))
- operand = _dot_general(lhs, rhs, dimension_numbers, precision)
+ operand = _dot_general(lhs, rhs, dimension_numbers, precision,
+ preferred_element_type=preferred_element_type)
else:
raise NotImplementedError # if this is actually reachable, open an issue!
| diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py
--- a/tests/lax_numpy_einsum_test.py
+++ b/tests/lax_numpy_einsum_test.py
@@ -348,6 +348,21 @@ def test_no_unnecessary_transpose(self):
jaxpr = jax.make_jaxpr(partial(jnp.einsum, "ijk,kl->ijl"))(x, y)
self.assertNotIn('transpose', str(jaxpr))
+ def test_preferred_element_type(self):
+ r = self.rng()
+ x = r.randn(2, 2).astype('bfloat16')
+ y = r.randn(2).astype('bfloat16')
+ pattern = "ij,j->i"
+ f1 = partial(jnp.einsum, pattern)
+ jaxpr = jax.make_jaxpr(f1)(x, y)
+ self.assertLen(jaxpr.eqns, 1)
+ self.assertIsNone(jaxpr.eqns[0].params['preferred_element_type'])
+
+ f2 = partial(jnp.einsum, pattern, preferred_element_type='float32')
+ jaxpr = jax.make_jaxpr(f2)(x, y)
+ self.assertLen(jaxpr.eqns, 1)
+ self.assertEqual(jaxpr.eqns[0].params['preferred_element_type'], 'float32')
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| Add preferred_element_type to jax.numpy.einsum
The `jax.lax.dot_general`, `jax.lax.conv_general_dilated`, etc. functions have a `preferred_element_type` parameter to set their output types. Since `jax.numpy.einsum` is a user-friendly way of accessing these, it would be nice to have the `preferred_element_type` parameter there as well.
| Thanks! Seems like a reasonable addition. | 2023-04-08T00:32:16 |
google/jax | 15,485 | google__jax-15485 | [
"15484"
]
| 053affd173711b64f19ede27a53f17bf528d8374 | diff --git a/jax/random.py b/jax/random.py
--- a/jax/random.py
+++ b/jax/random.py
@@ -106,17 +106,18 @@
.. table::
:widths: auto
- ================================= ======== === ========== ======= ==============
- Property Threefry rbg unsafe_rbg rbg (*) unsafe_rbg (*)
- ================================= ======== === ========== ======= ==============
- Fastest on TPU ✅ ✅ ✅ ✅
- efficiently shardable (w/ pjit) ✅ ✅
- identical across shardings ✅ ✅ ✅
- identical across CPU/GPU/TPU ✅
- identical across JAX/XLA versions ✅
- ================================= ======== === ========== ======= ==============
-
-(*): with XLA_FLAGS=xla_tpu_spmd_rng_bit_generator_unsafe=1 set
+ ================================= ======== ========= === ========== ===== ============
+ Property Threefry Threefry* rbg unsafe_rbg rbg** unsafe_rbg**
+ ================================= ======== ========= === ========== ===== ============
+ Fastest on TPU ✅ ✅ ✅ ✅
+ efficiently shardable (w/ pjit) ✅ ✅ ✅
+ identical across shardings ✅ ✅ ✅ ✅
+ identical across CPU/GPU/TPU ✅ ✅
+ identical across JAX/XLA versions ✅ ✅
+ ================================= ======== ========= === ========== ===== ============
+
+(*): with jax_threefry_partitionable=1 set
+(**): with XLA_FLAGS=--xla_tpu_spmd_rng_bit_generator_unsafe=1 set
The difference between "rbg" and "unsafe_rbg" is that while "rbg" uses a less
robust/studied hash function for random value generation (but not for
@@ -124,6 +125,9 @@
robust hash functions for `jax.random.split` and `jax.random.fold_in`. Therefore
less safe in the sense that the quality of random streams it generates from
different keys is less well understood.
+
+For more about jax_threefry_partitionable, see
+https://jax.readthedocs.io/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html#generating-random-numbers
"""
from jax._src.prng import PRNGKeyArray as _PRNGKeyArray
| Efficiently shardable PRNG algorithm for GPU
Please:
- [x] Check for duplicate requests.
- [x] Describe your goal, and if possible provide a code snippet with a motivating example.
https://jax.readthedocs.io/en/latest/jax.random.html#advanced-rng-configuration
In here, the efficiently shardable through jit is only for TPUs. Could you make it for GPU?
| Thanks for raising this!
Actually, those docs do _not_ mean to say that the efficiently shardable options, namely `rbg` and `unsafe_rbg`, only work on TPU; they work on CPU and GPU also!
Instead, those docs mean to say that:
* `rbg` and `unsafe_rbg` are the absolute fastest way to generate random values on TPUs, compared to other options on TPUs, and
* the values produced by `rbg` and `unsafe_rbg` on TPU are different than what they are on CPU, even given the same key value, and those both are also different from the values that are produced on GPU. (Perhaps the remarkable thing is that the default RNG, `Threefry`, produces the same raw random bits given the same key on CPU, GPU, and TPU, though numerical floating point differences could lead to downstream differences in values coming out of float-producing random samplers.)
There's another efficiently shardable option which I just noticed isn't listed there, but it's described [here](https://jax.readthedocs.io/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html#generating-random-numbers). Basically, if you set the option `jax_threefry_partitionable` then the default Threefry PRNG becomes efficiently partitionable. It basically checks all the boxes except "Fastest on TPU", since nothing is as fast as `rbg` and `unsafe_rbg` on TPU.
Does that make sense? | 2023-04-08T05:54:23 |
|
google/jax | 15,497 | google__jax-15497 | [
"15385"
]
| 90d58f45729fa6a7755ea99349997c2070d6e9c3 | diff --git a/jax/_src/dispatch.py b/jax/_src/dispatch.py
--- a/jax/_src/dispatch.py
+++ b/jax/_src/dispatch.py
@@ -320,7 +320,7 @@ def _names_to_pspec(names):
ndmin = max(names) + 1 if names else 0
return PartitionSpec(*(names.get(i) for i in range(ndmin)))
yield from ((NamedSharding(eqn.params['mesh'], _names_to_pspec(names)), source_info)
- for names in eqn.params['in_names'])
+ for names in [*eqn.params['in_names'], *eqn.params['out_names']])
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_shardings(subjaxpr)
| diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -684,6 +684,23 @@ def foo(x):
with self.assertRaisesRegex(NotImplementedError, 'axis_index'):
g(x)
+ def test_jaxpr_shardings_with_no_outputs(self):
+ # https://github.com/google/jax/issues/15385
+ mesh = jtu.create_global_mesh((4,), ('i',))
+
+ @jax.jit
+ @partial(shard_map, mesh=mesh, in_specs=(), out_specs=P('i'))
+ def f():
+ return jax.lax.iota(jnp.dtype('int32'), 4)
+ f() # don't crash
+
+ @partial(shard_map, mesh=mesh, in_specs=(P('i'),), out_specs=P('i'))
+ def g(a_block):
+ i = jnp.arange(a_block.shape[0])
+ return i + a_block
+
+ g(np.arange(32)) # don't crash
+
class FunSpec(NamedTuple):
name: str
| jnp.arange is broken inside shmap unless using jit
### Description
This may be expected, but I found the error message quite confusing:
```python
from functools import partial
import numpy as np
import chex
import jax
import jax.numpy as jnp
from jax.sharding import Mesh, PartitionSpec as P
from jax.experimental import mesh_utils
from jax.experimental.shard_map import shard_map
chex.set_n_cpu_devices(8)
devices = mesh_utils.create_device_mesh((8,))
mesh = Mesh(devices, axis_names=('i',))
# uncomment this to make it work!
# @jax.jit
@partial(shard_map, mesh=mesh, in_specs=(P('i'),), out_specs=P('i'))
def f(a_block):
i = jnp.arange(a_block.shape[0])
return i + a_block
f(np.zeros(32))
```
Raises:
```
XlaRuntimeError Traceback (most recent call last)
[<ipython-input-2-9c0c4e291861>](https://localhost:8080/#) in <cell line: 20>()
18 return i + a_block
19
---> 20 f(np.zeros(32))
[... skipping hidden 5 frame]
2 frames
[<ipython-input-2-9c0c4e291861>](https://localhost:8080/#) in f(a_block)
15 @partial(shard_map, mesh=mesh, in_specs=(P('i'),), out_specs=P('i'))
16 def f(a_block):
---> 17 i = jnp.arange(a_block.shape[0])
18 return i + a_block
19
[/usr/local/lib/python3.9/dist-packages/jax/_src/numpy/lax_numpy.py](https://localhost:8080/#) in arange(start, stop, step, dtype)
2243 ceil_ = ufuncs.ceil if isinstance(start, core.Tracer) else np.ceil
2244 start = ceil_(start).astype(int) # type: ignore
-> 2245 return lax.iota(dtype, start)
2246 else:
2247 if step is None and start == 0 and stop is not None:
[... skipping hidden 16 frame]
[/usr/local/lib/python3.9/dist-packages/jax/_src/dispatch.py](https://localhost:8080/#) in backend_compile(backend, built_c, options, host_callbacks)
469 # TODO(sharadmv): remove this fallback when all backends allow `compile`
470 # to take in `host_callbacks`
--> 471 return backend.compile(built_c, compile_options=options)
472
473 _ir_dump_counter = itertools.count()
XlaRuntimeError: INTERNAL: RET_CHECK failure (external/xla/xla/hlo/ir/hlo_instruction.cc:2828) ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()) s32[8,4]{1,0} is not compatible with s32[1,4]{1,0}
```
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Also fails if the body
```python
def f(a_block):
i = jnp.arange(a_block.shape[0])
return i + a_block
```
is replaced with
```python
def f(a_block):
jnp.arange(4) # or even jax.lax.iota(jnp.dtype('int32'), 4)
```
Yet replacing `jnp.arange(4)` with `jnp.zeros(4)` or similar works fine... I wonder if this is an XLA bug.
Does it look like there's anything wrong with this HLO?
```
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={()->f32[8,4]{1,0}}
ENTRY main.5 {
iota.1 = f32[4]{0} iota(), iota_dimension=0
reshape.2 = f32[1,4]{1,0} reshape(iota.1)
custom-call.3 = f32[1,4]{1,0} custom-call(reshape.2), custom_call_target="Sharding", sharding={manual}
ROOT custom-call.4 = f32[8,4]{1,0} custom-call(custom-call.3), custom_call_target="SPMDShardToFullShape", sharding={
devices=[8,1]0,1,2,3,4,5,6,7}
}
```
Interestingly, `jax.jit` doesn't always fix it: it's only a `jax.jit` _and at least one argument passed into the shmap_ which fixes it, e.g. this fails:
```python
@jax.jit
@partial(shard_map, mesh=mesh, in_specs=(), out_specs=P('i'))
def f():
return jax.lax.iota(jnp.dtype('int32'), 4)
f()
```
I think there's a bug in XLA "manual spmd mode" with zero-input functions. (The code in the OP hits this case because eager shmap basically means op-by-op shmap, so we hit the iota/arange by itself and hence build a computation with no inputs, whereas adding `jax.jit` fixes the code in the OP because it happens to have an input argument.)
I filed internal bug b/277140784
Oops, this is a JAX bug after all; somehow in the problematic code we're telling XLA to compile with `num_partitions=1`. (Thanks @tongfei-guo for patiently correcting me!) | 2023-04-09T06:05:05 |
google/jax | 15,520 | google__jax-15520 | [
"15505"
]
| 8abe03daed429d2c75b5f776ad66dbf3c53f273d | diff --git a/jax/_src/ops/scatter.py b/jax/_src/ops/scatter.py
--- a/jax/_src/ops/scatter.py
+++ b/jax/_src/ops/scatter.py
@@ -64,9 +64,13 @@ def _scatter_update(x, idx, y, scatter_op, indices_are_sorted,
Returns:
An ndarray representing an updated `x` after performing the scatter-update.
"""
-
x = jnp.asarray(x)
- y = jnp.asarray(y)
+ if (isinstance(y, int) and np.issubdtype(x.dtype, np.integer) and
+ np.iinfo(x.dtype).min <= y <= np.iinfo(x.dtype).max):
+ y = jnp.asarray(y, dtype=x.dtype)
+ else:
+ y = jnp.asarray(y)
+
# XLA gathers and scatters are very similar in structure; the scatter logic
# is more or less a transpose of the gather equivalent.
treedef, static_idx, dynamic_idx = jnp._split_index_for_jit(idx, x.shape)
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -1467,5 +1467,16 @@ def testIndexedUpdateAliasingBug(self):
y = jnp.zeros(8)
self.assertArraysEqual(fn(y), jax.jit(fn)(y))
+ def testScatterValuesCastToTargetDType(self):
+ # https://github.com/google/jax/issues/15505
+ a = jnp.zeros(1, dtype=jnp.uint32)
+ val = 2**32 - 1 # too large for int32
+
+ b = a.at[0].set(jnp.uint32(val))
+ self.assertEqual(int(b[0]), val)
+
+ c = a.at[0].set(val)
+ self.assertEqual(int(c[0]), val)
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| OverflowError on assignment of valid value to uint32 array
### Description
```python
import jax.numpy as jnp
a = jnp.zeros((1), dtype=jnp.uint32)
val = 2**32 - 1
# val and its uint32 representation are identical
print(val, jnp.uint32(val))
# assignment works with an explicit cast to uint32
b = a.at[1].set(jnp.uint32(val))
# assignment with an implicit cast throws an exception
b = a.at[1].set(val) # returns OverflowError
```
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks for reporting this, and the clear repro!
#15506 might fix it, but it currently causes a "deprecation warning" test to fail (i.e. we no longer raise a warning), and so I need advice from @jakevdp on how to proceed.
(Unlikely to be relevant, but I just noticed that I wrote `a.at[1]` while `a.at[0]` would have made more sense, since `a` is length 1.)
@mattjj - is this something we can fix?
At the JIT boundary, Python scalars like `1` have to be converted to objects of specific dtypes, and we made the decision long ago to not use value-dependent semantics when choosing that dtype. So, unless `jax_enable_x64` is set to True, every Python integer will be represented as an `int32`, regardless of whether it eventually is intended to be converted to `uint32`.
With that in mind, isn't this working as expected?
I should spell-out the subtext of my comment. We want
```python
x.at[0].set(1)
```
to act the same inside and outside JIT, so it follows that this should produce the same result:
```python
jax.jit(x.at[0].set)(1)
```
But when `1` hits a `jit` boundary, it must be converted to a JAX array with a particular `dtype`. We always choose `int32`, and so passing a python scalar that does not fit into `int32` should result in an error.
Thanks for spelling it out clearly! If I understand correctly, I think consistency across the jit boundary is a separate issue from whether assignment works without a jit boundary. Especially, I think assignment of uint32 is already handled differently when it crosses a jit boundary. See this snippet:
```Python
def gen_a(val):
a = jnp.zeros((1), dtype=jnp.uint32)
return a.at[0].set(jnp.uint32(val))
val = 2**32 - 1
gen_a(val) # executes
jit(gen_a)(val) # throws OverflowError
```
Yeah, that's a good point. We should be able to support this kind of assignment in the case that the scalar does not cross a tracing boundary. | 2023-04-10T21:25:06 |
google/jax | 15,569 | google__jax-15569 | [
"15504"
]
| 777480c257ee1d2d4a859e1c45259b2c25620c39 | diff --git a/jax/_src/checkify.py b/jax/_src/checkify.py
--- a/jax/_src/checkify.py
+++ b/jax/_src/checkify.py
@@ -382,20 +382,6 @@ def out_axes_thunk():
def get_shaped_aval(val):
return core.raise_to_shaped(core.get_aval(val))
-def initial_style_jaxpr(
- fun: Callable, in_tree: PyTreeDef, in_avals: Sequence[core.AbstractValue]
- ) -> Tuple[core.Jaxpr, List[Any], PyTreeDef]:
- return _initial_style_jaxpr(fun, in_tree, tuple(in_avals))
-
-@weakref_lru_cache
-def _initial_style_jaxpr(fun, in_tree, in_avals):
- # like control_flow._initial_style_jaxpr, but use flatten_fun not _nokwargs
- fun_, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
- debug = pe.debug_info(fun, in_tree, out_tree, False, 'checkify')
- jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(fun_, in_avals, debug)
- return jaxpr, consts, out_tree()
-
-
def checkify_jaxpr(jaxpr: core.ClosedJaxpr, enabled_errors,
error: Error, *args) -> Tuple[Error, List[core.Value]]:
err_vals, err_tree = jtu.tree_flatten(error)
@@ -1065,16 +1051,17 @@ def checkify(f: Callable[..., Out],
"""
@traceback_util.api_boundary
def checked_fun(*args, **kwargs):
+ # close over all arguments so they're not turned into abstract values.
+ in_tree = jtu.tree_structure(((), {}))
+ closed_f = lambda: f(*args, **kwargs)
# stage:
- flat_args, in_tree = tree_flatten((args, kwargs))
- in_avals = map(get_shaped_aval, flat_args)
- jaxpr_, consts, out_tree = initial_style_jaxpr(f, in_tree, in_avals)
+ fun_, out_tree = flatten_fun(lu.wrap_init(closed_f), in_tree)
+ debug = pe.debug_info(closed_f, in_tree, out_tree, False, 'checkify')
+ jaxpr_, _, consts = pe.trace_to_jaxpr_dynamic(fun_, (), debug)
jaxpr = pe.close_jaxpr(pe.convert_constvars_jaxpr(jaxpr_))
# checkify:
- flat_args = jtu.tree_leaves((args, kwargs))
- error, out_flat = checkify_jaxpr(jaxpr, errors, init_error,
- *consts, *flat_args)
- return error, jtu.tree_unflatten(out_tree, out_flat)
+ error, out_flat = checkify_jaxpr(jaxpr, errors, init_error, *consts)
+ return error, jtu.tree_unflatten(out_tree(), out_flat)
return checked_fun
def check(pred: Bool, msg: str, *fmt_args, **fmt_kwargs) -> None:
| diff --git a/tests/checkify_test.py b/tests/checkify_test.py
--- a/tests/checkify_test.py
+++ b/tests/checkify_test.py
@@ -753,7 +753,7 @@ def f(x, y):
cf = checkify.checkify(f, errors=checkify.nan_checks)
err, _ = cf(jnp.array([-jnp.inf, 0, jnp.inf]), jnp.ones((3, 2)))
self.assertIsNotNone(err.get())
- self.assertStartsWith(err.get(), "nan generated by primitive: psum")
+ self.assertStartsWith(err.get(), "nan generated by primitive")
def test_different_payload_effects(self):
def f(x, y):
@@ -1214,6 +1214,18 @@ def test_fmt_args_array_type_error(self):
trees_ok = lambda: checkify.check(False, "{}", {"hello": jnp.array(1.)})
checkify.checkify(trees_ok)()
+ def test_checkify_non_jax_type_input(self):
+ _ = checkify.checkify(lambda x: 1.)("hi") # does not crash
+
+ def test_checkify_static_args(self):
+ @checkify.checkify
+ def f(x):
+ if x:
+ return
+
+ _ = jax.jit(f, static_argnums=(0,))(True)
+
+
class LowerableChecksTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
| Support static argnames/argnums to checkify
I tried using checkify today but it's not compatible with my code base because I'm relying on static argnames/argnums for some of the jitted functions. After wrapping the jitted functions with checkify, those inputs are misinterpreted as dynamic arguments and JAX raises an error because they aren't JAX types.
| Thanks for raising this!
> those inputs are misinterpreted as dynamic arguments
Well, `static_argnums`/`static_argnames` only affects the particular `jit` call to which those arguments are passed. In particular, they don't change the meaning of the body. So if I understand correctly, this issue isn't checkify-specific: for any `f` where `jit(checkify(f), static_argnums=(0,))` would fail with the error you describe, I believe `jit(jit(f), static_argnums=(0,))` would fail in the same way. In both cases, the function `g` to which the `jit(g, static_argnums=(0,))` is applied only accepts pytrees of jax types (ie arrays) as arguments.
So, there are (at least) two possible problem statements here, and I just want to figure out which one you have in mind:
1. make both `jit(jit(f), static_argnums=(0,))` and `jit(checkify(f), static_argnums=(0,))` (and `scan` etc) Just Work, by changing the meaning of the outer `jit`'s `static_argnums` to affect the body as well; or
2. change `checkify` only so that this issue doesn't arise (e.g. maybe `checkify` should try to avoid assuming _any_ inputs are jaxtypes, independent of any caller's `static_argnums`).
Did you have one of these in mind?
cc @LenaMartens
I was thinking of your option 2, changing `checkify` to avoid assuming that all its inputs must be jaxtypes.
More generally, there could be a solution along the lines of this?
```python3
def with_static(transform, argname):
specialized = {}
def wrapped(fn, *args, **kwargs):
value = kwargs.pop(argname, '_default')
if value not in specialized:
specialized[value] = transform(functools.partial(fn, **{argname: value}))
return specialized[value](*args, **kwargs)
return wrapped
fn = with_static(jit, 'string_arg')(fn) # Same as jit(fn, static_argnames='string_arg')
fn = with_static(checkify, 'string_arg')(fn)
fn(array_arg=jnp.zeros(), string_arg='bar')
```
Actually, this line isnt quite right:
```python
fn = with_static(jit, 'string_arg')(fn) # Same as jit(fn, static_argnames='string_arg')
```
`jit`'s `static_argnums`/`static_argnames` can't be implemented in user code (i.e. on top of a `jax.jit` API which doesn't itself have those options). If you try using the solution quoted here, you'll always get retraces/recompiles between two applications of `fn` even when passing the same value for `string_arg`, because you're creating a fresh callable object (the `partial` instance) every time. That's why `jit` has `static_argnums`/`static_argnames` built in: not as a convenience but because it's the only way to get the caching behavior we want.
This kind of caching approach can work better with `checkify`, though I'm a bit certain because it now operates differently from e.g. `jax.grad` (basically it has some internal caching which makes it a bit different). I'd have to think about it.
I guess a third option is:
3. add `static_argnums` / `static_argnames` to `checkify`, perhaps just as a convenience (though it's not as convenient as it Just Working).
I'd rather make it Just Work if we can!
I don't quite follow. The `functools.partial()` in my call only happens when the specialization value isn't already in the cache dictionary. So it would only retrace once per unique value for `string_arg`.
Sorry, I misread your code. (Actually I think it's a bit buggy in that you want to curry the `wrapped` fun on extra level and not get both `fn` and `args`/`kwargs` at the same time.)
You're right that you can get cache hits for equal values with this approach, so that part of my comment was mistaken. But now you have separate jit caches (so e.g. we can't get the same eviction logic we would get from having a single cache).
In general my only point was just that `with_static(jit, 'string_arg')(fn)` isn't _exactly_ the same as `jit(fn, static_argnames='string_arg')`.
Luckily, #15569 will solve the issue by making things Just Work! With that `checkify` will no longer place any constraints on the arguments passed to the `checkify`-decorated function. (Internally all the arguments are just closed over; we checked that the caching issues I was concerned about, basically the things #14250 and #14291 were fixing, no longer apply thanks to 6ec9082.) | 2023-04-12T17:31:23 |
google/jax | 15,619 | google__jax-15619 | [
"15613"
]
| 849e47f79ac64ccba1a762804217c00a9905025b | diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -875,6 +875,10 @@ def threefry_seed(seed: typing.Array) -> typing.Array:
bit-casting to a pair of uint32 values (or from a 32-bit seed by
first padding out with zeros).
"""
+ return _threefry_seed(seed)
+
+@partial(jit, inline=True)
+def _threefry_seed(seed: typing.Array) -> typing.Array:
if seed.shape:
raise TypeError(f"PRNG key seed must be a scalar; got {seed!r}.")
if not np.issubdtype(seed.dtype, np.integer):
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -527,6 +527,13 @@ def test_isinstance(self):
self.assertIsInstance(key, jax.Array)
+class ThreefryPrngTest(jtu.JaxTestCase):
+ def test_seed_no_implicit_transfers(self):
+ # See https://github.com/google/jax/issues/15613
+ with jax.transfer_guard('disallow'):
+ random.threefry2x32_key(jax.device_put(42)) # doesn't crash
+
+
class LaxRandomTest(jtu.JaxTestCase):
def _CheckCollisions(self, samples, nbits):
| Can jax.random.PRNGKey work with jax_transfer_guard?
It seems that PRNGKey isn't compatible with jax_transfer_guard? I'm getting a `Disallowed host-to-device transfer` error regardless of whether I'm passing in a np or jnp seed.
- Is there a way to use PRNGKey together with jax_transfer_guard? Otherwise, would this be feasible to add?
- As a workaround, can I just replace PRNGKey by creating an array of shape (2,) that contains two uniform random int32 values myself on the outside, or are there more restrictions on what PRNGKey returns?
Example 1:
```python3
import jax
import jax.numpy as jnp
jax.config.update('jax_transfer_guard', 'disallow')
seed = jax.device_put(42)
key = jax.random.PRNGKey(seed)
```
<details>
```
Traceback (most recent call last):
File "/Users/danijar/temp/example.py", line 7, in <module>
key = jax.random.PRNGKey(seed)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/random.py", line 128, in PRNGKey
key = prng.seed_with_impl(impl, seed)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 262, in seed_with_impl
return random_seed(seed, impl=impl)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 555, in random_seed
return random_seed_p.bind(seeds_arr, impl=impl)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 325, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 328, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 686, in process_primitive
return primitive.impl(*tracers, **params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 567, in random_seed_impl
base_arr = random_seed_impl_base(seeds, impl=impl)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 572, in random_seed_impl_base
return seed(seeds)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 807, in threefry_seed
lax.shift_right_logical(seed, lax_internal._const(seed, 32)))
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/lax/lax.py", line 500, in shift_right_logical
return shift_right_logical_p.bind(x, y)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 325, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 328, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 686, in process_primitive
return primitive.impl(*tracers, **params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 113, in apply_primitive
return compiled_fun(*args)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 198, in <lambda>
return lambda *args, **kw: compiled(*args, **kw)[0]
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 825, in _execute_compiled
in_flat = flatten(device_put(x, device) for i, x in enumerate(args)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/util.py", line 113, in concatenate
return list(it.chain.from_iterable(xs))
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 825, in <genexpr>
in_flat = flatten(device_put(x, device) for i, x in enumerate(args)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 1115, in device_put
return device_put_handlers[type(x)](x, device)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 1126, in _device_put_array
return (backend.buffer_from_pyval(x, device),)
jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: Disallowed host-to-device transfer: type=<class 'numpy.ndarray'>, shape=(), dtype=int32, dst_device=TFRT_CPU_0
```
</details>
Example 2:
```python3
import jax
import jax.numpy as jnp
jax.config.update('jax_transfer_guard', 'disallow')
seed = 42
key = jax.random.PRNGKey(seed)
```
<details>
```
Traceback (most recent call last):
File "/Users/danijar/temp/example.py", line 7, in <module>
key = jax.random.PRNGKey(seed)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/random.py", line 128, in PRNGKey
key = prng.seed_with_impl(impl, seed)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 262, in seed_with_impl
return random_seed(seed, impl=impl)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/prng.py", line 552, in random_seed
seeds_arr = jnp.asarray(np.int64(seeds))
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py", line 1921, in asarray
return array(a, dtype=dtype, copy=False, order=order)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py", line 1902, in array
out = lax_internal._convert_element_type(out, dtype, weak_type=weak_type)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/lax/lax.py", line 579, in _convert_element_type
return convert_element_type_p.bind(operand, new_dtype=new_dtype,
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 325, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 328, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/core.py", line 686, in process_primitive
return primitive.impl(*tracers, **params)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 113, in apply_primitive
return compiled_fun(*args)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 198, in <lambda>
return lambda *args, **kw: compiled(*args, **kw)[0]
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 881, in _execute_trivial
return [_copy_device_array_to_device(x, device) if device_array.type_is_device_array(x)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 882, in <listcomp>
else h(None, *device_put(x, device)) for h, x in zip(handlers, outs)]
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 1115, in device_put
return device_put_handlers[type(x)](x, device)
File "/Users/danijar/homebrew/lib/python3.9/site-packages/jax/_src/dispatch.py", line 1126, in _device_put_array
return (backend.buffer_from_pyval(x, device),)
jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: Disallowed host-to-device transfer: type=<class 'numpy.ndarray'>, shape=(), dtype=int32, dst_device=TFRT_CPU_0
```
</details>
| 2023-04-15T17:16:07 |
|
google/jax | 15,621 | google__jax-15621 | [
"14856"
]
| 20896c1b2d156f94e091e442c8c00cb15ec271d4 | diff --git a/jax/_src/dtypes.py b/jax/_src/dtypes.py
--- a/jax/_src/dtypes.py
+++ b/jax/_src/dtypes.py
@@ -617,10 +617,10 @@ def dtype(x: Any, *, canonicalize: bool = False) -> DType:
dt = np.result_type(x)
except TypeError as err:
raise TypeError(f"Cannot determine dtype of {x}") from err
- if dt not in _jax_dtype_set:
+ if dt not in _jax_dtype_set and not core.is_opaque_dtype(dt):
raise TypeError(f"Value '{x}' with dtype {dt} is not a valid JAX array "
"type. Only arrays of numeric types are supported by JAX.")
- return canonicalize_dtype(dt) if canonicalize else dt
+ return canonicalize_dtype(dt, allow_opaque_dtype=True) if canonicalize else dt
def _lattice_result_type(*args: Any) -> Tuple[DType, bool]:
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -526,6 +526,11 @@ def test_isinstance(self):
key = random.PRNGKey(0)
self.assertIsInstance(key, jax.Array)
+ def test_key_output_vjp(self):
+ # See https://github.com/google/jax/issues/14856
+ def f(seed): return random.PRNGKey(seed)
+ jax.vjp(f, 1) # doesn't crash
+
class ThreefryPrngTest(jtu.JaxTestCase):
def test_seed_no_implicit_transfers(self):
| bad error on VJP of functions returning typed key arrays
Found by @NeilGirdhar in #14046!
```python
from jax import enable_custom_prng, vjp
from jax.random import PRNGKey
with enable_custom_prng():
def f(i):
return PRNGKey(i)
out, f_vjp = vjp(f, 1) # Fails!
```
Error is:
```
TypeError: Value 'PRNGKeyArray[fry] { [0 1] }' with dtype key<fry> is not a valid JAX array type. Only arrays of numeric types are supported by JAX.
```
cc #9263
| 2023-04-16T03:08:13 |
|
google/jax | 15,643 | google__jax-15643 | [
"14923"
]
| 1277f284ceb8f20c2e66ad34de37abd696b7d298 | diff --git a/jax/_src/lax/slicing.py b/jax/_src/lax/slicing.py
--- a/jax/_src/lax/slicing.py
+++ b/jax/_src/lax/slicing.py
@@ -514,6 +514,7 @@ def scatter_apply(
operand: Array, scatter_indices: Array,
func: Callable[[Array], Array],
dimension_numbers: ScatterDimensionNumbers, *,
+ update_shape: Shape = (),
indices_are_sorted: bool = False, unique_indices: bool = False,
mode: Optional[Union[str, GatherScatterMode]] = None) -> Array:
"""Scatter-apply operator.
@@ -539,6 +540,7 @@ def scatter_apply(
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
+ update_shape: the shape of the updates at the given indices.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the elements to be updated in ``operand`` are
@@ -555,7 +557,7 @@ def scatter_apply(
An array containing the result of applying `func` to `operand` at the given indices.
"""
# TODO: can we implement this without a placeholder?
- unused = lax.full(scatter_indices.shape[:1], 0, operand.dtype)
+ unused = lax.full(update_shape, 0, operand.dtype)
_apply = lambda x, _: func(x)
try:
_apply = _scatter_apply_cache.setdefault(func, _apply)
diff --git a/jax/_src/numpy/array_methods.py b/jax/_src/numpy/array_methods.py
--- a/jax/_src/numpy/array_methods.py
+++ b/jax/_src/numpy/array_methods.py
@@ -541,8 +541,8 @@ def apply(self, func, *, indices_are_sorted=False, unique_indices=False,
See :mod:`jax.ops` for details.
"""
- def _scatter_apply(x, indices, _, dims, **kwargs):
- return lax.scatter_apply(x, indices, func, dims, **kwargs)
+ def _scatter_apply(x, indices, y, dims, **kwargs):
+ return lax.scatter_apply(x, indices, func, dims, update_shape=y.shape, **kwargs)
return scatter._scatter_update(self.array, self.index,
lax_internal._zero(self.array.dtype),
_scatter_apply,
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -452,10 +452,25 @@ def np_op(x, idx):
return y
def jnp_op(x, idx):
return jnp.asarray(x).at[idx].apply(jnp_func)
+
+ # Test with traced integer index
args_maker = lambda: [rng(size, dtype), idx_rng(size, int)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
+ # Test with slice index
+ idx = slice(1, 5)
+ np_op_idx = partial(np_op, idx=idx)
+ jnp_op_idx = partial(jnp_op, idx=idx)
+ args_maker = lambda: [rng(size, dtype)]
+ self._CheckAgainstNumpy(np_op_idx, jnp_op_idx, args_maker)
+ self._CompileAndCheck(jnp_op_idx, args_maker)
+
+ def testIndexUpdateScalarBug(self):
+ # https://github.com/google/jax/issues/14923
+ a = jnp.arange(10.)
+ out = a.at[0].apply(jnp.cos)
+ self.assertArraysEqual(out, a.at[0].set(1))
@jtu.sample_product(
[dict(name=name, shape=shape, indexer=indexer, mode=mode)
| `a.at[i].apply(...)` fails when `i: int`
### Description
```python
a = jnp.arange(10.)
a = a.at[0].apply(jnp.sin)
```
fails with
```python
TypeError: Updates tensor must be of rank 0; got 1.
```
I think it should work, as `at[0:1]` works. The error message is also not that helpful. It can produce more cryptic error (`MLIRError`) when `a.dtype == np.int_`. NumPy seems to cast the result back to int:
```python
a = np.arange(10)
np.cos.at(a, np.arange(3))
# a == array([1, 0, 0, 3, 4, 5, 6, 7, 8, 9])
```
Not sure we want the same behaviour, but I think we could at least raise a better error than `MLIRError`.
| 2023-04-17T22:29:10 |
|
google/jax | 15,658 | google__jax-15658 | [
"15598"
]
| 933d695170246efaba2f949b774235e407f1ea08 | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -114,23 +114,40 @@ def expit(x: ArrayLike) -> Array:
logsumexp = _wraps(osp_special.logsumexp, module='scipy.special')(ops_special.logsumexp)
+@custom_derivatives.custom_jvp
@_wraps(osp_special.xlogy, module='scipy.special')
def xlogy(x: ArrayLike, y: ArrayLike) -> Array:
+ # Note: xlogy(0, 0) should return 0 according to the function documentation.
x, y = promote_args_inexact("xlogy", x, y)
x_ok = x != 0.
safe_x = jnp.where(x_ok, x, 1.)
safe_y = jnp.where(x_ok, y, 1.)
return jnp.where(x_ok, lax.mul(safe_x, lax.log(safe_y)), jnp.zeros_like(x))
+def _xlogy_jvp(primals, tangents):
+ (x, y) = primals
+ (x_dot, y_dot) = tangents
+ result = xlogy(x, y)
+ return result, (x_dot * lax.log(y) + y_dot * x / y).astype(result.dtype)
+xlogy.defjvp(_xlogy_jvp)
+
+@custom_derivatives.custom_jvp
@_wraps(osp_special.xlog1py, module='scipy.special', update_doc=False)
def xlog1py(x: ArrayLike, y: ArrayLike) -> Array:
+ # Note: xlog1py(0, -1) should return 0 according to the function documentation.
x, y = promote_args_inexact("xlog1py", x, y)
x_ok = x != 0.
safe_x = jnp.where(x_ok, x, 1.)
safe_y = jnp.where(x_ok, y, 1.)
return jnp.where(x_ok, lax.mul(safe_x, lax.log1p(safe_y)), jnp.zeros_like(x))
+def _xlog1py_jvp(primals, tangents):
+ (x, y) = primals
+ (x_dot, y_dot) = tangents
+ result = xlog1py(x, y)
+ return result, (x_dot * lax.log1p(y) + y_dot * x / (1 + y)).astype(result.dtype)
+xlog1py.defjvp(_xlog1py_jvp)
@_wraps(osp_special.entr, module='scipy.special')
def entr(x: ArrayLike) -> Array:
| diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py
--- a/tests/lax_scipy_test.py
+++ b/tests/lax_scipy_test.py
@@ -13,7 +13,6 @@
# limitations under the License.
-import functools
from functools import partial
import itertools
import unittest
@@ -220,15 +219,29 @@ def testXlogyShouldReturnZero(self):
self.assertAllClose(lsp_special.xlogy(0., 0.), 0., check_dtypes=False)
def testGradOfXlogyAtZero(self):
- partial_xlogy = functools.partial(lsp_special.xlogy, 0.)
- self.assertAllClose(jax.grad(partial_xlogy)(0.), 0., check_dtypes=False)
+ # https://github.com/google/jax/issues/15598
+ x0, y0 = 0.0, 3.0
+ d_xlog1py_dx = jax.grad(lsp_special.xlogy, argnums=0)(x0, y0)
+ self.assertAllClose(d_xlog1py_dx, lax.log(y0))
+
+ d_xlog1py_dy = jax.grad(lsp_special.xlogy, argnums=1)(x0, y0)
+ self.assertAllClose(d_xlog1py_dy, 0.0)
+
+ jtu.check_grads(lsp_special.xlogy, (x0, y0), order=2)
def testXlog1pyShouldReturnZero(self):
self.assertAllClose(lsp_special.xlog1py(0., -1.), 0., check_dtypes=False)
def testGradOfXlog1pyAtZero(self):
- partial_xlog1py = functools.partial(lsp_special.xlog1py, 0.)
- self.assertAllClose(jax.grad(partial_xlog1py)(-1.), 0., check_dtypes=False)
+ # https://github.com/google/jax/issues/15598
+ x0, y0 = 0.0, 3.0
+ d_xlog1py_dx = jax.grad(lsp_special.xlog1py, argnums=0)(x0, y0)
+ self.assertAllClose(d_xlog1py_dx, lax.log1p(y0))
+
+ d_xlog1py_dy = jax.grad(lsp_special.xlog1py, argnums=1)(x0, y0)
+ self.assertAllClose(d_xlog1py_dy, 0.0)
+
+ jtu.check_grads(lsp_special.xlog1py, (x0, y0), order=2)
@jtu.sample_product(
[dict(order=order, z=z, n_iter=n_iter)
| Incorrect gradient for `jax.scipy.stats.beta.logpdf`
### Description
Hi all. During some testing for an inference problem, I noticed that the gradients computed for `jax.scipy.stats.beta.logpdf` were incorrect when using alpha/beta parameters at 1. Please find a MWE below.
```python
import jax
import jax.numpy as jnp
import jax.scipy.special as jaxspec
import jax.scipy.stats as jaxstats
from jax import Array
from jax.config import config
from jax.scipy.special import gammaln
from jax.typing import ArrayLike
config.update("jax_enable_x64", True)
# custom loglikelihood
def jax_loglik(params: ArrayLike, p: ArrayLike) -> Array:
k, n = params
return (
(k - 1) * jnp.sum(jnp.log(p))
+ (n - 1) * jnp.sum(jnp.log1p(-p))
- len(p) * (gammaln(k) + gammaln(n) - gammaln(k + n))
)
# log likelihood using direct logpdf sum
def jax_logpdf(params: ArrayLike, p: ArrayLike) -> Array:
k, n = params
return jnp.sum(jaxstats.beta.logpdf(p, k, n))
# fake data
x = 0.5 * jnp.ones(2)
# problematic parameters
bad_params = jnp.ones(2)
# non-problematic parameters
fine_params = 2 * jnp.ones(2)
for name, param in [("bad param", bad_params), ("fine param", fine_params)]:
print(f"Evaluating functions using {name}")
# eval each log likelihood definition using alpha = 1., beta = 1., x1 = 0.5, x2 = 0.5
print(f"Bespoke ll = {jax_loglik(param, x)}")
print(f"JAX beta.logpdf = {jax_logpdf(param, x)}")
# eval gradient of log likelihood wrt alpha, beta, at alpha = beta = 1.
print(f"Grad bespoke ll = {jax.grad(jax_loglik)(param, x)}")
print(f"Grad JAX beta.logpdf {jax.grad(jax_logpdf)(param, x)}")
print("")
```
The above code generates the following output,
```
Evaluating functions using bad param
Bespoke ll = -0.0
JAX beta.logpdf = 0.0
Grad bespoke ll = [0.61370564 0.61370564]
Grad JAX beta.logpdf [2. 2.]
Evaluating functions using fine param
Bespoke ll = 0.8109302162163301
JAX beta.logpdf = 0.8109302162163301
Grad bespoke ll = [0.28037231 0.28037231]
Grad JAX beta.logpdf [0.28037231 0.28037231]
```
### What jax/jaxlib version are you using?
jax v0.4.8
### Which accelerator(s) are you using?
CPU
### Additional system info
Mac M1
### NVIDIA GPU info
_No response_
| I've edited the above example to be a bit more minimal and include a positive example where gradients agree for non-problematic parameters.
After a bit of digging, it looks like this is related to the use of `xlogy` and `xlog1py` in the definition of `logpdf`. Here is a reduced MWE that pertains to those parts:
```python
import jax
import jax.numpy as jnp
import jax.scipy.stats as jaxstats
from jax import Array
from jax.config import config
from jax.scipy.special import betaln, gammaln, polygamma, xlogy, xlog1py
from jax.typing import ArrayLike
config.update("jax_enable_x64", True)
def jax_norm(params: ArrayLike, p: ArrayLike) -> Array:
k, n = params
return -len(p) * betaln(k, n)
def custom_norm_grad(params: ArrayLike, p: ArrayLike) -> Array:
k, n = params
return jnp.array(
[
-len(p) * polygamma(0, k) + len(p) * polygamma(0, k + n),
-len(p) * polygamma(0, n) + len(p) * polygamma(0, k + n),
]
)
def custom_unscaled_loglike_grad(params: ArrayLike, p: ArrayLike) -> Array:
k, n = params
return jnp.array([jnp.sum(jnp.log(p)), jnp.sum(jnp.log1p(-p))])
def jax_unscaled_loglike(params: ArrayLike, p: ArrayLike) -> Array:
k, n = params
return jnp.sum(xlogy((k - 1), jnp.log(p)) + xlog1py((n - 1), -p))
# fake data
x = 0.5 * jnp.ones(2)
# problematic parameters
param = jnp.ones(2)
print(
f"Bespoke gradient of unscaled log-likelihood {custom_unscaled_loglike_grad(param, x)}"
)
print(
f"JAX Gradient of unscaled log-likelihood {jax.grad(jax_unscaled_loglike)(param, x)}"
)
print(f"Bespoke gradient of log-likelihood normalizing term {custom_norm_grad(param, x)}")
print(f"JAX Gradient of log-likelihood normalizing term {jax.grad(jax_norm)(param, x)}")
```
which outputs,
```
Bespoke gradient of unscaled log-likelihood [-1.38629436 -1.38629436]
JAX Gradient of unscaled log-likelihood [0. 0.]
Bespoke gradient of log-likelihood normalizing term [2. 2.]
JAX Gradient of log-likelihood normalizing term [2. 2.]
```
So something is happening (maybe on forward pass?) that first evaluates the `xlogy` (or `xlog1py`) terms to 0, since `k - 1` and `n - 1` are 0 when `k = n = 1`, but the gradients should still evaluate to non-zero using `xlogy` (or `xlog1py`) with `x=1`.
Thanks - I think you're right in your assessment that `xlogy` and `xlog1py` have incorrect gradients at x=0. An easier way to demonstrate this:
```python
print(jax.grad(lambda x: xlogy(x, 2.0))(0.0))
# 0.0
print(jax.grad(lambda x: x * jnp.log(2.0))(0.0))
# 0.6931472
```
These probably need a custom JVP given their current implementation. | 2023-04-18T21:00:07 |
google/jax | 15,671 | google__jax-15671 | [
"12592"
]
| a2fbd59e63d30603e611ebec829882faf6fdc3f6 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2883,8 +2883,18 @@ def append(arr, values, axis: Optional[int] = None):
return concatenate([arr, values], axis=axis)
-@util._wraps(np.delete)
-def delete(arr, obj, axis=None):
+@util._wraps(np.delete,
+ lax_description=_dedent("""
+ delete() usually requires the index specification to be static. If the index
+ is an integer array that is guaranteed to contain unique entries, you may
+ specify ``assume_unique_indices=True`` to perform the operation in a
+ manner that does not require static indices."""),
+ extra_params=_dedent("""
+ assume_unique_indices : int, optional (default=False)
+ In case of array-like integer (not boolean) indices, assume the indices are unique,
+ and perform the deletion in a way that is compatible with JIT and other JAX
+ transformations."""))
+def delete(arr, obj, axis=None, *, assume_unique_indices=False):
util.check_arraylike("delete", arr)
if axis is None:
arr = ravel(arr)
@@ -2910,6 +2920,18 @@ def delete(arr, obj, axis=None):
# Case 3: obj is an array
# NB: pass both arrays to check for appropriate error message.
util.check_arraylike("delete", arr, obj)
+
+ # Case 3a: unique integer indices; delete in a JIT-compatible way
+ if issubdtype(_dtype(obj), integer) and assume_unique_indices:
+ obj = asarray(obj).ravel()
+ obj = clip(where(obj < 0, obj + arr.shape[axis], obj), 0, arr.shape[axis])
+ obj = sort(obj)
+ obj -= arange(len(obj))
+ i = arange(arr.shape[axis] - obj.size)
+ i += (i[None, :] >= obj[:, None]).sum(0)
+ return arr[(slice(None),) * axis + (i,)]
+
+ # Case 3b: non-unique indices: must be static.
obj = core.concrete_or_error(np.asarray, obj, "'obj' array argument of jnp.delete()")
if issubdtype(obj.dtype, integer):
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -1526,6 +1526,29 @@ def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
+ @jtu.sample_product(
+ [dict(shape=shape, axis=axis)
+ for shape in nonempty_nonscalar_array_shapes
+ for axis in [None] + list(range(-len(shape), len(shape)))
+ ],
+ dtype=all_dtypes,
+ idx_shape=all_shapes,
+ )
+ def testDeleteUniqueIndices(self, shape, dtype, axis, idx_shape):
+ rng = jtu.rand_default(self.rng())
+ max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
+ idx_size = np.zeros(idx_shape).size
+ if idx_size > max_idx:
+ self.skipTest("Too many indices to be unique")
+ def args_maker():
+ x = rng(shape, dtype)
+ idx = self.rng().choice(max_idx, idx_shape, replace=False)
+ return x, idx
+ np_fun = partial(np.delete, axis=axis)
+ jnp_fun = partial(jnp.delete, axis=axis, assume_unique_indices=True)
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
+ self._CompileAndCheck(jnp_fun, args_maker)
+
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in nonempty_nonscalar_array_shapes
| Please make jnp.delete jittable.
Current workaround:
```
@jax.jit
def apply_except_one(x, i):
idx = jnp.arange(x.shape[0])
idx = jnp.where(idx >= i, idx + 1, idx)[:-1]
return _complex_jittable_function(x[idx])
```
Desired state:
```
@jax.jit
def apply_except_one(x, i):
return _complex_jittable_function(jnp.delete(x, i))
```
| ```
>>> jax.jit(jnp.delete, static_argnums=1)(jnp.arange(5), 2)
DeviceArray([0, 1, 3, 4], dtype=int32)
```
See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError and https://jax.readthedocs.io/en/latest/_autosummary/jax.jit.html#jax.jit
Sorry, there's chat context here! We don't want to use `static_argnums`, and indeed we don't really have to. (See the JAXers thread with Blake.)
Just saw that thread :)
Assigning to you instead :)
Assign Peter to everything, like we talked about! ;)
> Sorry, there's chat context here! We don't want to use `static_argnums`, and indeed we don't really have to. (See the JAXers thread with Blake.)
@mattjj Hi matthew, I think I miss the chat or thread ? so how exactly make jnp.delete jittable without static_argnums? :) | 2023-04-19T19:34:44 |
google/jax | 15,679 | google__jax-15679 | [
"15676"
]
| fa5915b34df5de87ed6e388d0f52796226adced1 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -542,7 +542,10 @@ def _convert_element_type(operand: ArrayLike, new_dtype: Optional[DTypeLike] = N
operand = np.asarray(operand).astype(new_dtype)
old_weak_type = False
- if (old_dtype, old_weak_type) == (new_dtype, weak_type) and isinstance(operand, Array):
+ if ((old_dtype, old_weak_type) == (new_dtype, weak_type) and
+ isinstance(operand, Array) and
+ not (isinstance(operand, core.Tracer) and
+ isinstance(core.get_aval(operand), core.ConcreteArray))):
return type_cast(Array, operand)
else:
return convert_element_type_p.bind(operand, new_dtype=new_dtype,
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -4358,6 +4358,12 @@ def test_jit_custom_floats(self, dtype):
args_maker = lambda: [jnp.ones((), dtype=dtype)]
self._CompileAndCheck(f, args_maker)
+ def test_jvp_asarray_returns_array(self):
+ # https://github.com/google/jax/issues/15676
+ p, t = jax.jvp(jax.numpy.asarray, (1.,), (2.,))
+ _check_instance(self, p)
+ _check_instance(self, t)
+
class RematTest(jtu.JaxTestCase):
| `jax.numpy.asarray` does not function under autodiff
### Description
```python
import jax
p, t = jax.jvp(jax.numpy.asarray, (1.,), (2.,))
print(p, type(p))
print(t, type(t))
# 1.0 <class 'float'>
# 2.0 <class 'float'>
```
In particular the primal computation produces a different type than is produced without autodiff, via just `jax.numpy.asarray(1.)`.
### What jax/jaxlib version are you using?
JAX 0.4.8
| Thanks for noticing this and reporting it so clearly!
For anyone else still getting bit by this, my workaround is pretty simple:
```python
@jax.custom_jvp
def fixed_asarray(x):
return jnp.asarray(x)
@fixed_asarray.defjvp
def _fixed_asarray_jvp(x, tx):
(x,) = x
(tx,) = tx
return fixed_asarray(x), fixed_asarray(tx)
```
I just hit this in refactoring `jnp.dot` in #16721. @mattjj - any plans on this? | 2023-04-20T03:51:04 |
google/jax | 15,698 | google__jax-15698 | [
"15691"
]
| 5647d5db9865298bc6b1df5c4f02aadbbbe2e06b | diff --git a/jax/_src/errors.py b/jax/_src/errors.py
--- a/jax/_src/errors.py
+++ b/jax/_src/errors.py
@@ -14,6 +14,8 @@
from __future__ import annotations
from jax._src import core
+from jax._src.util import set_module
+
class _JAXErrorMixin:
"""Mixin for JAX-specific errors"""
@@ -29,14 +31,17 @@ def __init__(self, message: str):
super().__init__(error_msg) # type: ignore
+@set_module('jax.errors')
class JAXTypeError(_JAXErrorMixin, TypeError):
pass
+@set_module('jax.errors')
class JAXIndexError(_JAXErrorMixin, IndexError):
pass
+@set_module('jax.errors')
class ConcretizationTypeError(JAXTypeError):
"""
This error occurs when a JAX Tracer object is used in a context where a
@@ -180,6 +185,7 @@ def __init__(self, tracer: core.Tracer, context: str = ""):
f"{tracer}\n{context}{tracer._origin_msg()}\n")
+@set_module('jax.errors')
class NonConcreteBooleanIndexError(JAXIndexError):
"""
This error occurs when a program attempts to use non-concrete boolean indices
@@ -271,6 +277,7 @@ def __init__(self, tracer: core.Tracer):
f"Array boolean indices must be concrete; got {tracer}\n")
+@set_module('jax.errors')
class TracerArrayConversionError(JAXTypeError):
"""
This error occurs when a program attempts to convert a JAX Tracer object into
@@ -351,6 +358,7 @@ def __init__(self, tracer: core.Tracer):
f"the JAX Tracer object {tracer}{tracer._origin_msg()}")
+@set_module('jax.errors')
class TracerIntegerConversionError(JAXTypeError):
"""
This error can occur when a JAX Tracer object is used in a context where a
@@ -443,6 +451,7 @@ def __init__(self, tracer: core.Tracer):
f"The __index__() method was called on the JAX Tracer object {tracer}")
+@set_module('jax.errors')
class UnexpectedTracerError(JAXTypeError):
"""
This error occurs when you use a JAX value that has leaked out of a function.
diff --git a/jax/_src/util.py b/jax/_src/util.py
--- a/jax/_src/util.py
+++ b/jax/_src/util.py
@@ -547,6 +547,14 @@ def _original_func(f):
return f
+def set_module(module):
+ def wrapper(func):
+ if module is not None:
+ func.__module__ = module
+ return func
+ return wrapper
+
+
if TYPE_CHECKING:
def use_cpp_class(cpp_cls: Any) -> Callable[[T], T]:
def wrapper(cls: T) -> T:
| exceptions that reach the user should have public type (e.g. ConcretizationTypeError)
Consider `except jax._src.errors.ConcretizationTypeError: # pylint: disable=protected-access`. Lame!
| Actually, these already have a public name, and are [documented](https://jax.readthedocs.io/en/latest/errors.html), e.g. `jax.errors.ConcretizationTypeError` after we `import jax`.
We can improve this situation by making the repr of the errors print their public path; #15698 should take care of that | 2023-04-21T15:14:48 |
|
google/jax | 15,737 | google__jax-15737 | [
"15709"
]
| e3a9de7472af6db720726178c647c3027519b0d5 | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -155,12 +155,24 @@ def _xlog1py_jvp(primals, tangents):
return result, (x_dot * lax.log1p(y) + y_dot * x / (1 + y)).astype(result.dtype)
xlog1py.defjvp(_xlog1py_jvp)
+@custom_derivatives.custom_jvp
+def _xlogx(x):
+ """Compute x log(x) with well-defined derivatives."""
+ return xlogy(x, x)
+
+def _xlogx_jvp(primals, tangents):
+ x, = primals
+ x_dot, = tangents
+ return _xlogx(x), x_dot * (lax.log(x) + 1)
+_xlogx.defjvp(_xlogx_jvp)
+
+
@_wraps(osp_special.entr, module='scipy.special')
def entr(x: ArrayLike) -> Array:
x, = promote_args_inexact("entr", x)
return lax.select(lax.lt(x, _lax_const(x, 0)),
lax.full_like(x, -np.inf),
- lax.neg(xlogy(x, x)))
+ lax.neg(_xlogx(x)))
@_wraps(osp_special.multigammaln, update_doc=False)
| diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py
--- a/tests/lax_scipy_test.py
+++ b/tests/lax_scipy_test.py
@@ -243,6 +243,11 @@ def testGradOfXlog1pyAtZero(self):
jtu.check_grads(lsp_special.xlog1py, (x0, y0), order=2)
+ def testGradOfEntrAtZero(self):
+ # https://github.com/google/jax/issues/15709
+ self.assertEqual(jax.jacfwd(lsp_special.entr)(0.0), jnp.inf)
+ self.assertEqual(jax.jacrev(lsp_special.entr)(0.0), jnp.inf)
+
@jtu.sample_product(
[dict(order=order, z=z, n_iter=n_iter)
for order, z, n_iter in zip(
| forward mode differentiation issue with scipy.special.xlogy
### Description
A few days ago, a [commit](https://github.com/google/jax/commit/dd023e266e6616494cdd590959103ddc646109c4) to jax.scipy.special seems to be causing my previously-working code to bomb out with NaN errors. I believe the root problem is that you've added a custom jvp to xlogy and xlog1py which need the same safety features as the original functions.
```@custom_derivatives.custom_jvp
@_wraps(osp_special.xlogy, module='scipy.special')
def xlogy(x: ArrayLike, y: ArrayLike) -> Array:
# Note: xlogy(0, 0) should return 0 according to the function documentation.
x, y = promote_args_inexact("xlogy", x, y)
x_ok = x != 0.
safe_x = jnp.where(x_ok, x, 1.)
safe_y = jnp.where(x_ok, y, 1.)
return jnp.where(x_ok, lax.mul(safe_x, lax.log(safe_y)), jnp.zeros_like(x))
def _xlogy_jvp(primals, tangents):
(x, y) = primals
(x_dot, y_dot) = tangents
result = xlogy(x, y)
return result, (x_dot * lax.log(y) + y_dot * x / y).astype(result.dtype)
xlogy.defjvp(_xlogy_jvp)
```
My code is obtaining NaNs in _xlogy_jvp when I call scipy.special.entr(p) which then calls xlogy(p,p) and one of the elements of p is zero.
### What jax/jaxlib version are you using?
jax v0.4.9
### Which accelerator(s) are you using?
TPU
### Additional system info
Colab
### NVIDIA GPU info
_No response_
| Thanks for raising this, and the diagnosis!
@jakevdp is it okay to assign to you since you have some context on that commit?
@mcmozer-google IIUC $(x, y) \mapsto x \log y$ is not differentiable with respect to its second argument at $(0,0)$; indeed I don't think $0$ is in the domain of $\log$. (We may be able to say $x \mapsto x \log x$ has domain $[0, \infty)$ and further that it is differentiable on its domain (not all of $\mathbb{R}$) at $0$ with derivative $-\infty$...)
What answer were you expecting?
I think perhaps before it was incorrectly returning `0`, but isn't `nan` a better value at a point of non-differentiability (over the reals, not just nonnegative reals)?
You have a completely valid point. I am looking at it from the following perspective, which also seems valid: `jax.scipy.special.xlogy` defines `xlogy(0,y)=0` for all `y`. Also, `jax.scipy.special.entr`, which calls `xlogy`, defines `entr(0)=0`. Don't these definitions imply that the derivative of `xlogy(0,y)` is 0 for all `y` and that the derivative of `entr(0)` is 0?
From a practical perspective, the previous version of jax did treat these derivatives as 0 (thanks to `safe_x` and `safe_y`), and existing code that uses `entr` and `xlogy` may break (as happened to me). If it broke just for weird outlier cases, I wouldn't sweat it, but it seems common to need to compute entropy over a distribution where one probability is 0 (i.e., `xlogy(0,0)`).
Yes, I think directional derivatives exist (like along $(0, y)$ for $y > 0$, as you say), but there's not one linear map which works for all directions (e.g. the slope is different in the direction $(y, y)$ for $y > 0$), which is what we'd need for computing Jacobians or for reverse mode, or for forward mode if we're not willing to conditionally switch on the direction. (Since we use the same underlying `jvp` for Jacobians as well as reverse mode, we generally don't write JVP rules which depend on the tangent vector direction, though `custom_jvp` rules could.)
I think for the `xlogy` function we shouldn't define the derivative at $(0, 0)$ in general (i.e. we should keep it as a `nan`), though users can always apply their own `custom_jvp` to a wrapper to adopt any convention makes sense for their own applications.
I think the issue here may actually be with `jax.scipy.special.entr`, in that while it calls the more general function `xlogy` it actually only needs the function $x \mapsto x \log x$, which _is_ differentiable at $x=0$, though with derivative $-\infty$. Thinking about the plot of the binary entropy function, that seems to give the right answer of `\infty` for `grad(entr)(0.)`. I can't think of a good reason to make it 0 though...
So, what if we make `grad(entr)(0.) == jnp.inf`? Would that have worked in your application, or would it still have blown something up?
`grad(entr)(0)==jnp.inf` is the right thing to do. (I convinced myself with a finite difference approximation, but I appreciate your insight in just visualizing the entropy function.) I don't believe this solution would help for my application, where I am computing `entr(softmax(logits))`, and the argument to `entr` is exactly 0 only for the unusual case where floating point precision isn't adequate. I don't understand the magic of forward differentiation, but won't reverse mode end up with `0 * jnp.inf` and result in nans anyhow? (In my application, I am happy to just zero out and lose the gradients in this unusual case. I just made a custom `entr` function that does this.) Thanks much for your patience, mattjj. | 2023-04-25T15:34:32 |
google/jax | 15,755 | google__jax-15755 | [
"15729"
]
| a51c8cc9523ea545fe91a663b14699732c6f0ba5 | diff --git a/jax/_src/pjit.py b/jax/_src/pjit.py
--- a/jax/_src/pjit.py
+++ b/jax/_src/pjit.py
@@ -1757,6 +1757,21 @@ def _resolve_wsc_args(axis_resources, shardings):
# period is finished. The deprecation period expires 3 months from Feb 13, 2023.
def with_sharding_constraint(x, shardings=UNSPECIFIED,
axis_resources=UNSPECIFIED):
+ """Mechanism to constrain the sharding of an Array inside a jitted computation
+
+ This is a strict constraint for the GSPMD partitioner and not a hint. For examples
+ of how to use this function, see `Distributed arrays and automatic parallelization`_.
+
+ Args:
+ x: PyTree of jax.Arrays which will have their shardings constrainted
+ shardings: PyTree of sharding specifications. Valid values are the same as for
+ the ``in_shardings`` argument of :func:`jax.experimental.pjit`.
+ axis_resources: (deprecated) use shardings instead.
+ Returns:
+ x_with_shardings: PyTree of jax.Arrays with specified sharding constraints.
+
+ .. _Distributed arrays and automatic parallelization: https://jax.readthedocs.io/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html
+ """
final_shardings = _resolve_wsc_args(axis_resources, shardings)
x_flat, tree = tree_flatten(x)
user_shardings, _, _ = prepare_axis_resources(
diff --git a/jax/lax/__init__.py b/jax/lax/__init__.py
--- a/jax/lax/__init__.py
+++ b/jax/lax/__init__.py
@@ -368,7 +368,7 @@
from jax._src.ad_util import stop_gradient_p as stop_gradient_p
from jax.lax import linalg as linalg
-from jax._src.pjit import with_sharding_constraint
-from jax._src.dispatch import device_put_p
+from jax._src.pjit import with_sharding_constraint as with_sharding_constraint
+from jax._src.dispatch import device_put_p as device_put_p
from math import prod # TODO(phawkins): remove this accidental export
| Missing in reference docs: `jax.lax.with_sharding_constraint`
This page should have everything in `jax.lax`: https://jax.readthedocs.io/en/latest/jax.lax.html#
but it's missing `jax.lax.with_sharding_constraint`, which is part of the public API, because it's mentioned [here](https://jax.readthedocs.io/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html#constraining-shardings-of-intermediates-in-jitted-code)
| 2023-04-26T16:29:19 |
||
google/jax | 15,804 | google__jax-15804 | [
"13949"
]
| b981b4f68fd361fba6185a6abadb2d2e71104959 | diff --git a/jax/_src/interpreters/mlir.py b/jax/_src/interpreters/mlir.py
--- a/jax/_src/interpreters/mlir.py
+++ b/jax/_src/interpreters/mlir.py
@@ -1671,9 +1671,13 @@ def _layout_to_mlir_layout(minor_to_major: Optional[Sequence[int]]):
layout = np.array(minor_to_major, dtype="int64")
return ir.DenseIntElementsAttr.get(layout, type=ir.IndexType.get())
-def _aval_to_default_layout(aval):
+def _aval_to_default_layouts(aval):
+ if core.is_opaque_dtype(aval.dtype):
+ avals = aval.dtype._rules.physical_avals(aval)
+ else:
+ avals = [aval]
# Row major order is default for `NumPy`.
- return list(range(aval.ndim - 1, -1, -1))
+ return [list(range(aval.ndim - 1, -1, -1)) for aval in avals]
def emit_python_callback(
ctx: LoweringRuleContext, callback, token: Optional[Any],
@@ -1695,17 +1699,12 @@ def emit_python_callback(
[xla.aval_to_xla_shapes(op_aval) for op_aval in operand_avals])
# Handling layouts
if operand_layouts is None:
- operand_layouts = map(_aval_to_default_layout, operand_avals)
- operand_mlir_layouts = [
- _layout_to_mlir_layout(_aval_to_default_layout(layout)) if layout is None
- else _layout_to_mlir_layout(layout) for layout, aval
- in zip(operand_layouts, operand_avals)]
+ operand_layouts = util.concatenate(
+ map(_aval_to_default_layouts, operand_avals))
+ operand_mlir_layouts = map(_layout_to_mlir_layout, operand_layouts)
if result_layouts is None:
- result_layouts = map(_aval_to_default_layout, result_avals)
- result_mlir_layouts = [
- _layout_to_mlir_layout(_aval_to_default_layout(aval)) if layout is None
- else _layout_to_mlir_layout(layout) for layout, aval
- in zip(result_layouts, result_avals)]
+ result_layouts = util.concatenate(map(_aval_to_default_layouts, result_avals))
+ result_mlir_layouts = map(_layout_to_mlir_layout, result_layouts)
# First we apply checks to ensure output shapes and dtypes match the expected
# ones.
diff --git a/jax/_src/interpreters/xla.py b/jax/_src/interpreters/xla.py
--- a/jax/_src/interpreters/xla.py
+++ b/jax/_src/interpreters/xla.py
@@ -54,11 +54,15 @@ def identity(x): return x
_scalar_types = dtypes.python_scalar_dtypes.keys()
-def _make_array_shape(a: ShapedArray) -> Sequence[xc.Shape]:
- if a.dtype == dtypes.float0:
- return (xc.Shape.array_shape(np.dtype('bool'), a.shape),)
+def _make_array_shape(aval: ShapedArray) -> Sequence[xc.Shape]:
+ def dt(aval):
+ return np.dtype('bool') if aval.dtype == dtypes.float0 else aval.dtype
+
+ if core.is_opaque_dtype(aval.dtype):
+ avals = aval.dtype._rules.physical_avals(aval)
else:
- return (xc.Shape.array_shape(a.dtype, a.shape),)
+ avals = [aval]
+ return tuple(xc.Shape.array_shape(dt(a), a.shape) for a in avals)
def get_canonical_source_file(frame: source_info_util.Frame):
source_file = frame.file_name
| diff --git a/tests/host_callback_test.py b/tests/host_callback_test.py
--- a/tests/host_callback_test.py
+++ b/tests/host_callback_test.py
@@ -1908,6 +1908,15 @@ def func(x, transforms, y):
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
+ def test_tap_id_tap_random_key(self):
+ # See https://github.com/google/jax/issues/13949
+ with jax.enable_custom_prng():
+ @jax.jit
+ def f(x):
+ def tap(tap_x, _): pass
+ return hcb.id_tap(tap, x, result=x)
+ f(jax.random.PRNGKey(123))
+
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
| id_tap crashes with new custom PRNG key
### Description
```python
from jax import enable_custom_prng, jit
from jax.experimental.host_callback import id_tap
from jax.random import KeyArray, PRNGKey
with enable_custom_prng():
key = PRNGKey(123)
@jit
def f(x: KeyArray) -> KeyArray:
def tap(tap_x, transforms):
print(tap_x)
return id_tap(tap, x, result=x)
f(key)
```
produces
```shell
Traceback (most recent call last):
File "/home/neil/src/tjax/a.py", line -1, in <module>
File "/home/neil/src/tjax/a.py", line -1, in f
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/experimental/host_callback.py", line -1, in id_tap
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/experimental/host_callback.py", line -1, in _call
jax._src.source_info_util.JaxStackTraceBeforeTransformation: TypeError: array_shape(): incompatible function arguments. The following argument types are supported:
1. (type: jaxlib.xla_extension.PrimitiveType, dims: object, layout: Optional[object] = None, dynamic_dimensions: Optional[List[bool]] = None) -> StatusOr[jaxlib.xla_extension.Shape]
2. (type: dtype, dims: object, layout: Optional[object] = None, dynamic_dimensions: Optional[List[bool]] = None) -> StatusOr[jaxlib.xla_extension.Shape]
Invoked with: key<fry>, ()
The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/neil/src/tjax/a.py", line 13, in <module>
f(key)
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/_src/api.py", line 622, in cache_miss
execute = dispatch._xla_call_impl_lazy(fun_, *tracers, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/_src/dispatch.py", line 241, in _xla_call_impl_lazy
return xla_callable(fun, device, backend, name, donated_invars, keep_unused,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/linear_util.py", line 303, in memoized_fun
ans = call(fun, *args)
^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/_src/dispatch.py", line 357, in _xla_callable_uncached
computation = sharded_lowering(fun, device, backend, name, donated_invars,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/_src/dispatch.py", line 348, in sharded_lowering
return pxla.lower_sharding_computation(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/_src/profiler.py", line 314, in wrapper
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/pxla.py", line 2936, in lower_sharding_computation
lowering_result = mlir.lower_jaxpr_to_module(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/mlir.py", line 707, in lower_jaxpr_to_module
lower_jaxpr_to_fun(
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/mlir.py", line 988, in lower_jaxpr_to_fun
out_vals, tokens_out = jaxpr_subcomp(ctx.replace(name_stack=callee_name_stack),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/mlir.py", line 1122, in jaxpr_subcomp
ans = rule(rule_ctx, *map(_unwrap_singleton_ir_values, in_nodes),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/experimental/host_callback.py", line 1186, in _outside_call_lowering
results, next_token, keep_alive = mlir.emit_python_callback(ctx,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/mlir.py", line 1747, in emit_python_callback
[xla.aval_to_xla_shapes(op_aval) for op_aval in operand_avals])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/mlir.py", line 1747, in <listcomp>
[xla.aval_to_xla_shapes(op_aval) for op_aval in operand_avals])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/xla.py", line 222, in aval_to_xla_shapes
return xla_shape_handlers[type(aval)](aval)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/interpreters/xla.py", line 91, in _make_array_shape
return (xc.Shape.array_shape(a.dtype, a.shape),)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
jax._src.traceback_util.UnfilteredStackTrace: TypeError: array_shape(): incompatible function arguments. The following argument types are supported:
1. (type: jaxlib.xla_extension.PrimitiveType, dims: object, layout: Optional[object] = None, dynamic_dimensions: Optional[List[bool]] = None) -> StatusOr[jaxlib.xla_extension.Shape]
2. (type: dtype, dims: object, layout: Optional[object] = None, dynamic_dimensions: Optional[List[bool]] = None) -> StatusOr[jaxlib.xla_extension.Shape]
Invoked with: key<fry>, ()
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/neil/src/tjax/a.py", line 13, in <module>
f(key)
File "/home/neil/.cache/pypoetry/virtualenvs/tjax-DRMj3ydM-py3.11/lib/python3.11/site-packages/jax/experimental/host_callback.py", line 1186, in _outside_call_lowering
results, next_token, keep_alive = mlir.emit_python_callback(ctx,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: array_shape(): incompatible function arguments. The following argument types are supported:
1. (type: jaxlib.xla_extension.PrimitiveType, dims: object, layout: Optional[object] = None, dynamic_dimensions: Optional[List[bool]] = None) -> StatusOr[jaxlib.xla_extension.Shape]
2. (type: dtype, dims: object, layout: Optional[object] = None, dynamic_dimensions: Optional[List[bool]] = None) -> StatusOr[jaxlib.xla_extension.Shape]
Invoked with: key<fry>, ()
```
### What jax/jaxlib version are you using?
0.4.1
| Incidentally, there are a couple stack frames that should probably be marked as belonging Jax.
Assinging @sharadmv as the callback expert, and @froystig as the custom PRNG expert | 2023-05-01T15:19:12 |
google/jax | 15,818 | google__jax-15818 | [
"13685"
]
| a2ac510dc3ae90702f6cf8d8ed697625d9983f82 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -16,7 +16,7 @@
from functools import partial
import math
from operator import index
-from typing import Optional, Sequence, Union
+from typing import Optional, Sequence, Tuple, Union
import warnings
import numpy as np
@@ -48,6 +48,7 @@
# TODO: Import or define these to match
# https://github.com/numpy/numpy/blob/main/numpy/typing/_dtype_like.py.
DTypeLikeInt = DTypeLike
+DTypeLikeUInt = DTypeLike
DTypeLikeFloat = DTypeLike
Shape = Sequence[int]
@@ -65,7 +66,7 @@ def _isnan(x: ArrayLike) -> Array:
return lax.ne(x, x)
-def _check_prng_key(key):
+def _check_prng_key(key) -> Tuple[prng.PRNGKeyArray, bool]:
# TODO(frostig): remove once we always enable_custom_prng
if isinstance(key, prng.PRNGKeyArray):
return key, False
@@ -240,6 +241,35 @@ def _check_shape(name: str, shape: Union[Shape, NamedShape], *param_shapes) -> N
raise ValueError(msg.format(name, shape_, shape))
+def bits(key: KeyArray,
+ shape: Shape = (),
+ dtype: Optional[DTypeLikeUInt] = None) -> Array:
+ """Sample uniform bits in the form of unsigned integers.
+
+ Args:
+ key: a PRNG key used as the random key.
+ shape: optional, a tuple of nonnegative integers representing the result
+ shape. Default ``()``.
+ dtype: optional, an unsigned integer dtype for the returned values (default
+ ``uint64`` if ``jax_enable_x64`` is true, otherwise ``uint32``).
+
+ Returns:
+ A random array with the specified shape and dtype.
+ """
+ key, _ = _check_prng_key(key)
+ if dtype is None:
+ dtype = dtypes.canonicalize_dtype(jnp.uint)
+ else:
+ dtypes.check_user_dtype_supported(dtype)
+ if not dtypes.issubdtype(dtype, np.unsignedinteger):
+ raise ValueError("dtype argument to `bits` must be an unsigned int dtype, "
+ f"got {dtype}")
+ dtype = dtypes.canonicalize_dtype(dtype)
+ shape = core.canonicalize_shape(shape)
+ bit_width = dtype.itemsize * 8
+ return _random_bits(key, bit_width, shape)
+
+
def uniform(key: KeyArray,
shape: Union[Shape, NamedShape] = (),
dtype: DTypeLikeFloat = dtypes.float_,
diff --git a/jax/random.py b/jax/random.py
--- a/jax/random.py
+++ b/jax/random.py
@@ -155,6 +155,7 @@
ball as ball,
bernoulli as bernoulli,
beta as beta,
+ bits as bits,
categorical as categorical,
cauchy as cauchy,
chisquare as chisquare,
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
+import enum
from functools import partial
import math
from unittest import SkipTest, skipIf
@@ -67,6 +68,11 @@ def _maybe_unwrap(key):
('unsafe_rbg', prng.unsafe_rbg_prng_impl)]
+class OnX64(enum.Enum):
+ ALSO = enum.auto()
+ SKIP = enum.auto()
+ ONLY = enum.auto()
+
class RandomValuesCase(NamedTuple):
name: str
prng_impl: str
@@ -74,7 +80,7 @@ class RandomValuesCase(NamedTuple):
dtype: Any
params: dict
expected: np.ndarray
- skip_on_x64: bool = False
+ on_x64: OnX64 = OnX64.ALSO
atol: Optional[float] = None
rtol: Optional[float] = None
@@ -97,13 +103,24 @@ def _seed(self):
_RANDOM_VALUES_CASES = [
# TODO(jakevdp) add coverage for other distributions.
RandomValuesCase("bernoulli", "threefry2x32", (5,), None, {'p': 0.5},
- np.array([False, True, True, True, False]), skip_on_x64=True),
+ np.array([False, True, True, True, False]), on_x64=OnX64.SKIP),
RandomValuesCase("bernoulli", "rbg", (5,), None, {'p': 0.5},
- np.array([True, True, True, True, True]), skip_on_x64=True),
+ np.array([True, True, True, True, True]), on_x64=OnX64.SKIP),
RandomValuesCase("beta", "threefry2x32", (5,), np.float32, {'a': 0.8, 'b': 0.9},
np.array([0.533685, 0.843179, 0.063495, 0.573444, 0.459514], dtype='float32')),
RandomValuesCase("beta", "rbg", (5,), np.float32, {'a': 0.8, 'b': 0.9},
np.array([0.841308, 0.669989, 0.731763, 0.985127, 0.022745], dtype='float32')),
+ # TODO(frostig,jakevdp) add coverage for non-threefry bits
+ RandomValuesCase("bits", "threefry2x32", (5,), np.uint8, {},
+ np.array([10, 158, 82, 54, 158], dtype='uint8')),
+ RandomValuesCase("bits", "threefry2x32", (5,), np.uint16, {},
+ np.array([6738, 38161, 50695, 57337, 61600], dtype='uint16')),
+ RandomValuesCase("bits", "threefry2x32", (5,), np.uint32, {},
+ np.array([1978747883, 4134381225, 3628107870, 689687174, 2788938207], dtype='uint32')),
+ RandomValuesCase("bits", "threefry2x32", (5,), np.uint64, {},
+ np.array([17649965731882839947, 1415307058040849897, 8282622628079774249,
+ 14024425113645909402, 2012979996110532418], dtype='uint64'),
+ on_x64=OnX64.ONLY),
RandomValuesCase("cauchy", "threefry2x32", (5,), np.float32, {},
np.array([ -0.088416, -10.169713, 3.49677, -1.18056, 0.34556], dtype='float32'), rtol=1E-5),
RandomValuesCase("cauchy", "rbg", (5,), np.float32, {},
@@ -113,9 +130,9 @@ def _seed(self):
RandomValuesCase("dirichlet", "rbg", (2,), np.float32, {'alpha': np.array([0.5, 0.6, 0.7], dtype='float32')},
np.array([[0.024769, 0.002189, 0.973041], [0.326, 0.00244, 0.67156]], dtype='float32')),
RandomValuesCase("double_sided_maxwell", "threefry2x32", (5,), np.float32, {"loc": 1, "scale": 2},
- np.array([-2.408914, -3.370437, 3.235352, -0.907734, -1.708732], dtype='float32'), skip_on_x64=True),
+ np.array([-2.408914, -3.370437, 3.235352, -0.907734, -1.708732], dtype='float32'), on_x64=OnX64.SKIP),
RandomValuesCase("double_sided_maxwell", "rbg", (5,), np.float32, {"loc": 1, "scale": 2},
- np.array([4.957495, 3.003086, 5.33935, 2.942878, -1.203524], dtype='float32'), skip_on_x64=True),
+ np.array([4.957495, 3.003086, 5.33935, 2.942878, -1.203524], dtype='float32'), on_x64=OnX64.SKIP),
RandomValuesCase("exponential", "threefry2x32", (5,), np.float32, {},
np.array([0.526067, 0.043046, 0.039932, 0.46427 , 0.123886], dtype='float32')),
RandomValuesCase("exponential", "rbg", (5,), np.float32, {},
@@ -145,9 +162,9 @@ def _seed(self):
RandomValuesCase("maxwell", "rbg", (5,), np.float32, {},
np.array([2.048746, 0.470027, 1.053105, 1.01969, 2.710645], dtype='float32')),
RandomValuesCase("multivariate_normal", "threefry2x32", (2,), np.float32, {"mean": np.ones((1, 3)), "cov": np.eye(3)},
- np.array([[ 1.067826, 1.215599, 0.234166], [-0.237534, 1.32591, 1.413987]], dtype='float32'), skip_on_x64=True),
+ np.array([[ 1.067826, 1.215599, 0.234166], [-0.237534, 1.32591, 1.413987]], dtype='float32'), on_x64=OnX64.SKIP),
RandomValuesCase("multivariate_normal", "rbg", (2,), np.float32, {"mean": np.ones((1, 3)), "cov": np.eye(3)},
- np.array([[-0.036897, 0.770969, 0.756959], [1.755091, 2.350553, 0.627142]], dtype='float32'), skip_on_x64=True),
+ np.array([[-0.036897, 0.770969, 0.756959], [1.755091, 2.350553, 0.627142]], dtype='float32'), on_x64=OnX64.SKIP),
RandomValuesCase("normal", "threefry2x32", (5,), np.float32, {},
np.array([-1.173234, -1.511662, 0.070593, -0.099764, 1.052845], dtype='float32')),
RandomValuesCase("normal", "rbg", (5,), np.float32, {},
@@ -160,9 +177,9 @@ def _seed(self):
np.array([7, 3, 6, 11, 6], dtype='int32')),
# Note: poisson not implemented for rbg sampler.
RandomValuesCase("rademacher", "threefry2x32", (5,), np.int32, {},
- np.array([-1, -1, -1, -1, 1], dtype='int32'), skip_on_x64=True),
+ np.array([-1, -1, -1, -1, 1], dtype='int32'), on_x64=OnX64.SKIP),
RandomValuesCase("rademacher", "rbg", (5,), np.int32, {},
- np.array([1, 1, 1, -1, -1], dtype='int32'), skip_on_x64=True),
+ np.array([1, 1, 1, -1, -1], dtype='int32'), on_x64=OnX64.SKIP),
RandomValuesCase("randint", "threefry2x32", (5,), np.int32, {"minval": 0, "maxval": 10},
np.array([0, 5, 7, 7, 5], dtype='int32')),
RandomValuesCase("randint", "rbg", (5,), np.int32, {"minval": 0, "maxval": 10},
@@ -328,8 +345,10 @@ def testRandomDistributionValues(self, case):
this test should involve a deprecation cycle following the procedures outlined at
https://jax.readthedocs.io/en/latest/api_compatibility.html
"""
- if config.x64_enabled and case.skip_on_x64:
+ if config.x64_enabled and case.on_x64 == OnX64.SKIP:
self.skipTest("test produces different values when jax_enable_x64=True")
+ if not config.x64_enabled and case.on_x64 == OnX64.ONLY:
+ self.skipTest("test only valid when jax_enable_x64=True")
with jax.default_prng_impl(case.prng_impl):
func = getattr(random, case.name)
key = random.PRNGKey(case._seed())
@@ -369,6 +388,13 @@ def testPRNGValues(self):
_prng_key_as_array(random.fold_in(k, 4)),
np.array([2285895361, 433833334], dtype='uint32'))
+ def test_random_bits_error(self):
+ msg = 'dtype argument .* must be an unsigned int dtype'
+ with self.assertRaisesRegex(ValueError, msg):
+ random.bits(random.PRNGKey(0), (3, 4), np.dtype('int8'))
+ with self.assertRaisesRegex(ValueError, msg):
+ random.bits(random.PRNGKey(0), (3, 4), np.dtype('float16'))
+
@skipIf(not config.jax_threefry_partitionable, 'enable after upgrade')
def test_threefry_split_fold_in_symmetry(self):
with jax.default_prng_impl('threefry2x32'):
| Add public `jax.random.random_bits` API
It's currently underscored, and is useful for implementing things like stochastic rounding.
| 2023-05-02T02:18:48 |
|
google/jax | 15,920 | google__jax-15920 | [
"15869"
]
| 236c74cad77614b4bfa00d30efeeb0b733ed6a44 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -3372,6 +3372,12 @@ def _select_jvp(primals, tangents):
def _select_hlo_lowering(ctx, which, *cases):
which_aval = ctx.avals_in[0]
+ aval_out, = ctx.avals_out
+
+ if core.is_opaque_dtype(aval_out.dtype):
+ return [aval_out.dtype._rules.select_mlir(
+ ctx, ctx.avals_in, aval_out, which, *cases)]
+
if which_aval.dtype == np.dtype(np.bool_):
assert len(cases) <= 2
if len(cases) == 1: return cases
diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -578,6 +578,28 @@ def _comparison_mlir(direction, reduction_op, identity,
ne_mlir = staticmethod(partial(_comparison_mlir, 'NE', hlo.OrOp,
lax_internal._get_bitwise_or_identity))
+ @staticmethod
+ def select_mlir(ctx, avals_in, aval_out, which, *cases):
+ assert all(aval_case == aval_out for aval_case in avals_in[1:])
+ assert avals_in[0].ndim == aval_out.ndim
+ select_lower = lax_internal._select_hlo_lowering
+ key_shape = aval_out.dtype.impl.key_shape
+
+ aval_which = avals_in[0]
+ aval_which_bcast = core.ShapedArray(
+ (*aval_which.shape, *key_shape), aval_which.dtype)
+ aval_out_raw = core.ShapedArray(
+ (*aval_out.shape, *key_shape), np.dtype('uint32'))
+ aval_cases_raw = [aval_out_raw] * (len(avals_in) - 1)
+
+ bcast_dims = list(range(aval_which.ndim))
+ which_bcast = mlir.broadcast_in_dim(
+ ctx, which, aval_which_bcast, broadcast_dimensions=bcast_dims)
+
+ return mlir.delegate_lowering(ctx, select_lower, which_bcast, *cases,
+ avals_in=[aval_which_bcast, *aval_cases_raw],
+ avals_out=[aval_out_raw])[0]
+
@staticmethod
def device_put_sharded(vals, aval, sharding, devices):
physical_aval = keys_aval_to_base_arr_aval(aval)
| diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2925,6 +2925,24 @@ def gather_mlir(ctx, avals_in, aval_out, x, indices, *,
avals_in=[aval_x_raw, aval_indices],
avals_out=[aval_y_raw])[0]
+ @staticmethod
+ def select_mlir(ctx, avals_in, aval_out, which, *cases):
+ assert all(aval_case == aval_out for aval_case in avals_in[1:])
+ assert avals_in[0].ndim == aval_out.ndim
+ select_lower = lax_internal._select_hlo_lowering
+ aval_which = avals_in[0]
+ aval_which_bcast = core.ShapedArray(
+ (*aval_which.shape, 2), aval_which.dtype)
+ aval_out_raw = core.ShapedArray(
+ (*aval_out.shape, 2), np.dtype('uint32'))
+ aval_cases_raw = [aval_out_raw] * (len(avals_in) - 1)
+ bcast_dims = list(range(aval_which.ndim))
+ which_bcast = mlir.broadcast_in_dim(
+ ctx, which, aval_which_bcast, broadcast_dimensions=bcast_dims)
+ return mlir.delegate_lowering(ctx, select_lower, which_bcast, *cases,
+ avals_in=[aval_which_bcast, *aval_cases_raw],
+ avals_out=[aval_out_raw])[0]
+
class FooTy:
name = 'foo'
@@ -3197,6 +3215,13 @@ def test_gather(self):
self.assertIsInstance(ys, FooArray)
self.assertEqual(ys.shape, (3, 2, 1))
+ def test_select(self):
+ ks = jax.jit(lambda: make((3,)))()
+ cs = jnp.array([True, False, False])
+ ys = jax.jit(lax.select)(cs, ks, ks)
+ self.assertIsInstance(ys, FooArray)
+ self.assertEqual(ys.shape, (3,))
+
def test_xla_reverse_bug(self):
# Regression test for b/248295786
# This was an XLA bug related to an incorrect optimization of reverse
diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1835,6 +1835,25 @@ def test_gather(self):
self.assertIsInstance(ys, random.KeyArray)
self.assertEqual(ys.shape, (3, 2, 1))
+ @skipIf(not config.jax_enable_custom_prng,
+ 'requires config.jax_enable_custom_prng')
+ def test_select(self):
+ ks = self.make_keys(3, 2)
+ cs = jnp.array([True, False, False, True, False, True]).reshape(3, 2)
+ ys = jax.jit(lax.select)(cs, ks, ks)
+ self.assertIsInstance(ys, random.KeyArray)
+ self.assertEqual(ys.shape, (3, 2))
+
+ @skipIf(not config.jax_enable_custom_prng,
+ 'requires config.jax_enable_custom_prng')
+ def test_select2(self):
+ # See https://github.com/google/jax/issues/15869
+ def f(x):
+ keys = lax.broadcast(jax.random.PRNGKey(0), x.shape)
+ return lax.select(x, keys, keys)
+ x = jnp.array([True, False, False])
+ f(x) # doesn't crash
+
def test_device_put(self):
device = jax.devices()[0]
keys = self.make_keys(4)
| stablehlo failed to infer result types when using vmapped cond of KeyArray
Minimal repro:
```python
import jax
jax.config.update('jax_enable_custom_prng', True)
def func(x):
return jax.lax.cond(x, jax.random.PRNGKey, jax.random.PRNGKey, 0)
x = jax.numpy.array([True, False])
print(jax.vmap(func)(x))
```
Result:
```pytb
Traceback (most recent call last):
File "/Users/vanderplas/github/google/jax/jax2tf_failure.py", line 8, in <module>
print(jax.vmap(func)(x))
^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax2tf_failure.py", line 5, in func
return jax.lax.cond(x, jax.random.PRNGKey, jax.random.PRNGKey, 0)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.11/site-packages/jaxlib/mlir/dialects/_stablehlo_ops_gen.py", line 4402, in __init__
results = _ods_ir.InferTypeOpInterface(SelectOp).inferReturnTypes(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ValueError: Failed to infer result types
```
| 2023-05-09T01:48:08 |
|
google/jax | 15,998 | google__jax-15998 | [
"15660"
]
| 7aefc9a5b46695e78cdaed39d0cb6f7494698b84 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -2250,6 +2250,7 @@ def linear_transpose(fun: Callable, *primals, reduce_axes=()) -> Callable:
in_pvals = map(pe.PartialVal.unknown, in_avals)
jaxpr, out_pvals, const = pe.trace_to_jaxpr_nounits(flat_fun, in_pvals,
instantiate=True)
+ jaxpr, _ = pe.dce_jaxpr(jaxpr, [True] * len(jaxpr.outvars), True)
out_avals, _ = unzip2(out_pvals)
out_dtypes = map(dtypes.dtype, out_avals)
if not (all(dtypes.issubdtype(d, np.inexact) for d in in_dtypes + out_dtypes)
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -2076,6 +2076,12 @@ def test_linear_transpose_integer(self):
expected = 6
self.assertEqual(actual, expected)
+ def test_linear_transpose_dce(self):
+ # https://github.com/google/jax/issues/15660
+ f = jit(lambda x: (2 * x, x > 0))
+ g = lambda x: f(x)[0]
+ api.linear_transpose(g, 1.)(1.)
+
def test_linear_transpose_error(self):
with self.assertRaisesRegex(
TypeError, "linear_transpose only supports"):
| `jit` + `linear_transpose` raises error due to missing DCE call
### Description
```python
import jax
def f1(x):
return 2 * x, x > 0
def g1(x):
return f1(x)[0]
jax.linear_transpose(g1, 1.)(1.) # works
@jax.jit
def f2(x):
return 2 * x, x > 0
def g2(x):
return f2(x)[0]
jax.linear_transpose(g2, 1.)(1.) # error!
# jax._src.traceback_util.UnfilteredStackTrace: NotImplementedError: Transpose rule (for reverse-mode differentiation) for 'gt' not implemented
```
It looks like `jax.linear_transpose` is missing a call to `pe.dce_jaxpr` prior to attempting the transpose.
I checked that `jax.grad` doesn't have the same issue. I assume it has a DCE call in there already somewhere.
### What jax/jaxlib version are you using?
JAX 0.4.8
| 2023-05-14T01:27:05 |
|
google/jax | 16,022 | google__jax-16022 | [
"16019"
]
| ec7a939d18254d299e84032589bfc4db8858649d | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -567,7 +567,7 @@ def choice(key: KeyArray,
if replace:
p_cuml = jnp.cumsum(p_arr)
r = p_cuml[-1] * (1 - uniform(key, shape, dtype=p_cuml.dtype))
- ind = jnp.searchsorted(p_cuml, r)
+ ind = jnp.searchsorted(p_cuml, r).astype(int)
else:
# Gumbel top-k trick: https://timvieira.github.io/blog/post/2019/09/16/algorithms-for-sampling-without-replacement/
g = -gumbel(key, (n_inputs,), dtype=p_arr.dtype) - jnp.log(p_arr)
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -754,8 +754,10 @@ def testChoice(self, dtype, input_range_or_shape, shape, replace, weighted, axis
sample = rand(key, x)
if not is_range:
self.assertEqual(dtype, sample.dtype)
- np_shape = np.shape(np_choice(x, shape or None, replace, p, axis))
- self.assertEqual(np_shape, sample.shape)
+ expected_shape = np.shape(np_choice(x, shape or None, replace, p, axis))
+ self.assertEqual(expected_shape, sample.shape)
+ expected_dtype = dtypes.result_type(int if is_range else x)
+ self.assertEqual(expected_dtype, sample.dtype)
if not replace and shape:
def lsort(x):
if not math.prod(x.shape): return x
| Inconsistent dtype output in jax.random.choice for double precision enabled
### Description
When using double precision, `jax.random.choice` returns inconsistent dtypes. When using a weight vector, the output dtype is set using `searchsorted` rules, i.e., based on the size of the array of weights at hand, while when none is used (uniform weights), the dtype is set using `jax.random.randint` rules (defaulting to `jnp.int_`). I believe the behaviour should be homogenous, no matter what the choice ends up being.
```python
import jax
import jax.numpy as jnp
jax.config.update("jax_enable_x64", True)
key = jax.random.PRNGKey(42)
w = jnp.array([1/3, 1/3, 1/3])
print(jax.random.choice(key, 3).dtype)
print(jax.random.choice(key, 3, p=w).dtype)
```
In practice, I believe that the implementation choice made in https://github.com/google/jax/blob/c22302ae2e0913efd5be23e136f2ff14b520d9cf/jax/_src/numpy/lax_numpy.py#L4824 is perhaps a bit redundant. Indeed, if the config has not enables double precision, `int64` will default to `int32` anyway, while, if it has, you were happy to pay the price in the first place.
WDYT?
### What jax/jaxlib version are you using?
jax v0.4.10
### Which accelerator(s) are you using?
CPU
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| 2023-05-16T15:52:41 |
|
google/jax | 16,046 | google__jax-16046 | [
"10818"
]
| ee14ca262808b963f4314248dd6df155160478e3 | diff --git a/jax/_src/lax/convolution.py b/jax/_src/lax/convolution.py
--- a/jax/_src/lax/convolution.py
+++ b/jax/_src/lax/convolution.py
@@ -417,11 +417,11 @@ def _conv_general_dilated_shape_rule(
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, preferred_element_type, **unused_kwargs):
- input_dtype = lax.naryop_dtype_rule(lax._input_dtype, [lax._any, lax._any],
- 'conv_general_dilated', lhs, rhs)
+ result_dtype = lax.naryop_dtype_rule(lax._input_dtype, [lax._any, lax._any],
+ 'conv_general_dilated', lhs, rhs)
if preferred_element_type is None:
- return input_dtype
- lax._validate_preferred_element_type(input_dtype, preferred_element_type)
+ return result_dtype
+ lax._validate_preferred_element_type(result_dtype, preferred_element_type)
return preferred_element_type
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -2399,23 +2399,30 @@ def _bitcast_convert_type_lower(ctx, operand, *, new_dtype):
def _validate_preferred_element_type(input_dtype, preferred_element_type):
-
- if dtypes.issubdtype(input_dtype, np.integer) and dtypes.issubdtype(preferred_element_type, np.floating):
+ if (dtypes.issubdtype(input_dtype, np.integer) and
+ dtypes.issubdtype(preferred_element_type, np.floating)):
# Special-case integer->float multiply. This is allowed, and also allows
# different signedness between input and output.
pass
else:
allowed_types = (np.integer, np.floating, np.complexfloating)
- if any(dtypes.issubdtype(input_dtype, t) and not dtypes.issubdtype(preferred_element_type, t) for t in allowed_types):
- raise TypeError("Input type is incompatible with `preferred_element_type`. The compatible combinations of "
- "(input_type, preferred_element_type) are (integral, integral), (integral, floating), "
+ if any(dtypes.issubdtype(input_dtype, t) and not
+ dtypes.issubdtype(preferred_element_type, t) for t in allowed_types):
+ raise TypeError("Input type is incompatible with "
+ "`preferred_element_type`. The compatible combinations "
+ "of (input_type, preferred_element_type) are "
+ "(integral, integral), (integral, floating), "
"(floating, floating), (complex, complex.")
- if dtypes.issubdtype(input_dtype, np.signedinteger) and not dtypes.issubdtype(preferred_element_type, np.signedinteger):
- raise TypeError("`preferred_element_type` must have the same signedness as the original type.")
+ if (dtypes.issubdtype(input_dtype, np.signedinteger) and
+ not dtypes.issubdtype(preferred_element_type, np.signedinteger)):
+ raise TypeError("`preferred_element_type` must have the same signedness "
+ "as the original type.")
input_bitwidth = np.dtype(input_dtype).itemsize
preferred_bitwidth = np.dtype(preferred_element_type).itemsize
if preferred_bitwidth < input_bitwidth:
- raise TypeError("`preferred_element_type` must not be narrower than the original type.")
+ raise TypeError("`preferred_element_type` must not be narrower than the "
+ "original type.")
+
def _precision_config(precision):
if precision is not None:
@@ -2506,10 +2513,46 @@ def tuple_delete(tup, idx):
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision,
preferred_element_type: Optional[DTypeLike]):
- input_dtype = naryop_dtype_rule(_input_dtype, [_any, _any], 'dot_general', lhs, rhs)
- if preferred_element_type is None:
- return input_dtype
- _validate_preferred_element_type(input_dtype, preferred_element_type)
+ # We're mostly matching XLA's logic here, namely in shape_inference.cc and
+ # primitive_util.h's HigherPrecisionType, e.g.
+ # https://github.com/openxla/xla/blob/ea3a841768d0dcf192e5820c9b25c34c73f2226a/xla/primitive_util.h#L329
+ def type_properties(dt):
+ c = _real_dtype(dt) if dtypes.issubdtype(dt, np.complexfloating) else dt
+ return (dtypes.issubdtype(dt, np.complexfloating),
+ dtypes.finfo(c).maxexp if dtypes.issubdtype(c, np.floating) else -1,
+ dtypes.finfo(c).nmant if dtypes.issubdtype(c, np.floating) else -1,
+ _bit_width(c),
+ not dtypes.issubdtype(c, np.unsignedinteger))
+ lhs_prop, rhs_prop = type_properties(lhs.dtype), type_properties(rhs.dtype)
+ if lhs_prop > rhs_prop:
+ result_dtype = lhs.dtype
+ elif rhs_prop > lhs_prop:
+ result_dtype = rhs.dtype
+ else:
+ if lhs.dtype != rhs.dtype:
+ raise TypeError(
+ f"lax.dot_general argument type error: {lhs.dtype}, {rhs.dtype}")
+ result_dtype = lhs.dtype
+
+ return _maybe_upcast(result_dtype, preferred_element_type)
+
+def _bit_width(d):
+ if dtypes.issubdtype(d, np.inexact): return dtypes.finfo(d).bits
+ elif dtypes.issubdtype(d, np.integer): return dtypes.iinfo(d).bits
+ elif d == np.dtype('bool'): return 1
+ else: assert False, d # should be unreachable, open an issue!
+
+def _maybe_upcast(result_dtype, preferred_element_type):
+ # replicates the logic in shape_inference.cc's MaybeUpcast
+ if (preferred_element_type is None or
+ result_dtype == preferred_element_type):
+ return result_dtype
+ if (not dtypes.issubdtype(result_dtype, np.floating) and
+ _bit_width(preferred_element_type) < _bit_width(result_dtype)):
+ raise TypeError("`preferred_element_type` must not be narrower than the "
+ "original type, got preferred_element_type of "
+ f"{preferred_element_type} for result type of "
+ f"{result_dtype}.")
return preferred_element_type
def _dot_general_transpose_lhs(g, x, y, *, dimension_numbers, precision,
@@ -2526,19 +2569,24 @@ def _dot_general_transpose_lhs(g, x, y, *, dimension_numbers, precision,
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(np.take(x_contract, np.argsort(y_contract))) # type: ignore[arg-type]
out_axes = np.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
- return transpose(dot_general(g, y, dims, precision=precision,
- preferred_element_type=preferred_element_type),
- tuple(out_axes))
+ x_bar = transpose(dot_general(g, y, dims, precision=precision,
+ preferred_element_type=preferred_element_type),
+ tuple(out_axes))
+ if x_bar.dtype != x.aval.dtype:
+ x_bar = _convert_element_type(x_bar, x.aval.dtype, x.aval.weak_type)
+ return x_bar
def _dot_general_transpose_rhs(g, x, y, *, dimension_numbers, precision,
preferred_element_type: Optional[DTypeLike]):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
- return _dot_general_transpose_lhs(
+ y_bar = _dot_general_transpose_lhs(
g, y, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
preferred_element_type=preferred_element_type,
swap_ans=True)
-
+ if y_bar.dtype != y.aval.dtype:
+ y_bar = _convert_element_type(y_bar, y.aval.dtype, y.aval.weak_type)
+ return y_bar
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision,
@@ -2667,19 +2715,47 @@ def _dot_general_lower(ctx, lhs, rhs, *, dimension_numbers,
precision, preferred_element_type: Optional[np.dtype]):
del preferred_element_type # Implied by the output aval
lhs_aval, rhs_aval = ctx.avals_in
+ lhs_dtype, rhs_dtype = lhs_aval.dtype, rhs_aval.dtype
aval_out, = ctx.avals_out
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
+ # TODO(b/...): JAX's dot_general primitive accepts the same input dtype
+ # combinations that are accepted in XLA's shape_inference.cc (the canonical
+ # reference for the HLO type system), but actually different XLA platforms
+ # fail on codegen for different accepted cases. To handle those cases, we
+ # insert ConvertOps on the input, in a platform-dependent way.
+ if lhs_dtype != rhs_dtype:
+ if ctx.module_context.platform == "tpu":
+ handled = lambda dt: (dtypes.issubdtype(dt, np.floating) or
+ dtypes.issubdtype(dt, np.integer))
+ if not (handled(lhs_dtype) and handled(rhs_dtype)):
+ dt = mlir.dtype_to_ir_type(aval_out.dtype)
+ lhs = hlo.ConvertOp(ir.RankedTensorType.get(lhs_aval.shape, dt), lhs
+ ).result
+ rhs = hlo.ConvertOp(ir.RankedTensorType.get(rhs_aval.shape, dt), rhs
+ ).result
+ lhs_dtype = rhs_dtype = aval_out.dtype
+ else: # cpu and gpu
+ dt = mlir.dtype_to_ir_type(aval_out.dtype)
+ lhs = hlo.ConvertOp(ir.RankedTensorType.get(lhs_aval.shape, dt), lhs
+ ).result
+ rhs = hlo.ConvertOp(ir.RankedTensorType.get(rhs_aval.shape, dt), rhs
+ ).result
+ lhs_dtype = rhs_dtype = aval_out.dtype
+
# TODO(b/195364460): Work around slow XLA/CPU implementation of float16 matmul
if ctx.module_context.platform == "cpu":
- if lhs_aval.dtype == np.float16:
+ if lhs_dtype == np.float16:
f32 = mlir.dtype_to_ir_type(np.dtype(np.float32))
lhs = hlo.ConvertOp(ir.RankedTensorType.get(lhs_aval.shape, f32),
lhs).result
- if rhs_aval.dtype == np.float16:
+ lhs_dtype = np.dtype('float32')
+ if rhs_dtype == np.float16:
f32 = mlir.dtype_to_ir_type(np.dtype(np.float32))
rhs = hlo.ConvertOp(ir.RankedTensorType.get(rhs_aval.shape, f32),
rhs).result
+ rhs_dtype = np.dtype('float32')
+
dot_dnums = hlo.DotDimensionNumbers.get(
lhs_batching_dimensions=list(lhs_batch),
rhs_batching_dimensions=list(rhs_batch),
| diff --git a/tests/lax_autodiff_test.py b/tests/lax_autodiff_test.py
--- a/tests/lax_autodiff_test.py
+++ b/tests/lax_autodiff_test.py
@@ -424,6 +424,14 @@ def testDotGeneralContractAndBatchGrads(self, lhs_shape, rhs_shape, dtype,
s = str(jax.make_jaxpr(pullback)(gresult))
assert "Precision.HIGHEST" in s
+ def testDotPreferredElementType(self):
+ # https://github.com/google/jax/issues/10818
+ x = jax.numpy.ones((), jax.numpy.float16)
+ def f(x):
+ return jax.lax.dot_general(x, x, (((), ()), ((), ())),
+ preferred_element_type=jax.numpy.float32)
+ jax.jacrev(f)(x) # don't crash!
+
@jtu.sample_product(
shape=[(), (2, 3)],
dtype=float_dtypes,
diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -976,12 +976,17 @@ def testConvInvalidPadding(self):
padding=(3, 3))
@jtu.sample_product(
- [
- dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
- for lhs_shape in [(3,), (4, 3)]
- for rhs_shape in [(3,), (3, 6)]
- ],
- dtype=lax_test_util.all_dtypes,
+ [dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
+ for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]],
+ [dict(lhs_dtype=lhs_dtype, rhs_dtype=rhs_dtype)
+ for lhs_dtype, rhs_dtype in
+ itertools.chain(
+ itertools.product(lax_test_util.int_dtypes +
+ lax_test_util.float_dtypes +
+ lax_test_util.complex_dtypes +
+ lax_test_util.uint_dtypes,
+ repeat=2),
+ zip(lax_test_util.bool_dtypes, lax_test_util.bool_dtypes))],
precision=[
None,
lax.Precision.DEFAULT,
@@ -990,9 +995,9 @@ def testConvInvalidPadding(self):
(lax.Precision.DEFAULT, lax.Precision.HIGHEST),
],
)
- def testDot(self, lhs_shape, rhs_shape, dtype, precision):
+ def testDot(self, lhs_shape, rhs_shape, lhs_dtype, rhs_dtype, precision):
rng = jtu.rand_default(self.rng())
- args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
+ args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
self._CompileAndCheck(partial(lax.dot, precision=precision), args_maker)
@jtu.sample_product(
@@ -1001,7 +1006,8 @@ def testDot(self, lhs_shape, rhs_shape, dtype, precision):
[dict(dtype=d, preferred_element_type=p)
for d, p in preferred_type_combinations],
)
- def testDotPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element_type):
+ def testDotPreferredElement(self, lhs_shape, rhs_shape, dtype,
+ preferred_element_type):
if (not config.x64_enabled and
(dtype == np.float64 or preferred_element_type == np.float64
or dtype == np.int64 or preferred_element_type == np.int64)):
@@ -1011,7 +1017,8 @@ def testDotPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element
raise SkipTest("np.complex128 is not yet supported on TPU")
if jtu.device_under_test() == "gpu":
# TODO(b/189287598)
- raise SkipTest("dot_general with preferred_element_type returns NaN non-deterministically on GPU")
+ raise SkipTest("dot_general with preferred_element_type returns NaN "
+ "non-deterministically on GPU")
rng = jtu.rand_default(self.rng())
x = rng(lhs_shape, dtype)
y = rng(rhs_shape, dtype)
| Type error in reverse-mode AD of `lax.dot_general` with `preferred_element_type`
Colab:
https://colab.research.google.com/gist/romanngg/64bae6efa7d4d364c68cd341f1be8c85/reverse_mode_dot_general_fail.ipynb
```python
import jax, jax.numpy as np
x = np.ones((), np.float16)
def f(x):
return jax.lax.dot_general(x, x, (((), ()), ((), ())), preferred_element_type=np.float32)
jax.jacrev(f)(x)
```
causes
```
TypeError: lax.dot_general requires arguments to have the same dtypes, got float32, float16.
```
whereas forward pass and forward-mode AD work.
| +1, any updates?
```
x = jnp.einsum(
"blhd,bmd->bhlm",
q,
k,
preferred_element_type=jnp.float32
)
````
where q and k are bf16 crashes on backward pass with the
```TypeError: lax.dot_general requires arguments to have the same dtypes, got float32, bfloat16.```
you can work around this if you do q.astype(jnp.float32), k.astype(jnp.float32) before the einsum
Thanks for the ping!
This should be easy to fix once we figure out the logic to implement. But I'm unsure of the logic we want. Maybe you can tell me!
Say we transpose (essentially, reverse-mode differentiate) a dot of the form
```
z:f32[3,5] = dot_general [preferred_element_type=f32] x:bf16[3,4] y:bf16[4,5]
```
with respect to its second argument, where the contraction is happening over the axes of the same size. That means we need to take a `z_bar:f32[3,5]` and an `x:bf16[3,4]` and ultimately produce a `y_bar:bf16[4,5]`.
At one point, probably in the ancient past, I think HLO didn't accept `dot_general` applications with different element types. Let's say that constraint is not present. Here are two different things we could transpose the above to:
```
# Option 1: change the preferred_element_type, so accumulation (and output) is now in bf16
y_bar:bf16[4,5] = dot_general [preferred_element_type=bf16] x:bf16[3,4] z_bar:f32[3,5]
# Option 2: keep the preferred_element_type (so accum is in f32), do a cast afterward
y_bar_temp:f32[4,5] = dot_general [preferred_element_type=f32] x:bf16[3,4] z_bar:f32[3,5]
y_bar:bf16[4,5] = convert_element_type[new_dtype=bf16] y_bar_temp:f32[4,5]
```
Is it clear to you which one of those is better? Is it ambiguous?
I think `preferred_element_type` doesn't always mean that accumulation happens in that precision (e.g. accumulation of bf16 matmuls on TPU is always in f32)
But I asked an LLM and that's what it told me!
Just kidding, I was going off of our docs [here](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.conv_with_general_padding.html):
> preferred_element_type (Optional[Any]) – Optional. Either None, which means the default accumulation type for the input types, or a datatype, indicating to accumulate results to and return a result with that datatype.
Maybe our docs are wrong (EDIT: or I'm misunderstanding them).
It should be just as fast to cast the inputs to f32, right? Wouldn't XLA do some magic optimization to avoid materializing the f32 input in HBM?
That seems likely. But I'm not familiar enough (with what XLA does, on every backend) to know the best JAX-level policy (or if it even matters given XLA magic).
Let me ask the wizard Blake...
Blake and his fellow wizard @hawkinsp advised! TL;DR we will try Option 2 above.
I'll make a fix for this today, unless it's more urgent and you need it ASAP.
it is not super urgent. thank you for your help! | 2023-05-17T21:27:04 |
google/jax | 16,068 | google__jax-16068 | [
"16066"
]
| bb775c7ce1cce36fd3858406a5614cad8c8ab2c3 | diff --git a/jax/_src/config.py b/jax/_src/config.py
--- a/jax/_src/config.py
+++ b/jax/_src/config.py
@@ -742,7 +742,7 @@ def update_thread_local_jit_state(**kw):
'option is set, the log level is WARNING; otherwise the level is '
'DEBUG.'))
-log_compiles = config.define_bool_state(
+log_checkpoint_residuals = config.define_bool_state(
name='jax_log_checkpoint_residuals',
default=False,
help=('Log a message every time jax.checkpoint (aka jax.remat) is '
| Potential typo for `jax.log_compiles`
### Description
`jax.log_compiles()` currently sets the flag for `jax_log_checkpoint_residuals` instead of `jax_log_compiles`, which looks like a typo.
This looks like a copy-paste error introduced by 6b4262d9f6ee36aea16ba7cfac639904b499661c.
https://github.com/google/jax/blob/9da52e890537c507b4d559ee6e3d8e8358e77f3f/jax/_src/config.py#L737-L750
### What jax/jaxlib version are you using?
jax v0.4.10, jaxlib v0.4.10
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| 2023-05-19T14:07:41 |
||
google/jax | 16,209 | google__jax-16209 | [
"16208"
]
| ae9d1498e5f31e2506f53c6f22233c7d856f2c63 | diff --git a/jax/_src/nn/functions.py b/jax/_src/nn/functions.py
--- a/jax/_src/nn/functions.py
+++ b/jax/_src/nn/functions.py
@@ -317,7 +317,10 @@ def log_softmax(x: Array,
shifted = x - lax.stop_gradient(x_max)
shifted_logsumexp = jnp.log(
jnp.sum(jnp.exp(shifted), axis, where=where, keepdims=True))
- return shifted - shifted_logsumexp
+ result = shifted - shifted_logsumexp
+ if where is not None:
+ return jnp.where(where, result, -jnp.inf)
+ return result
# TODO(phawkins): this jit was found to change numerics in a test. Debug this.
@@ -357,7 +360,10 @@ def _softmax(
initial: Optional[Array] = None) -> Array:
x_max = jnp.max(x, axis, where=where, initial=initial, keepdims=True)
unnormalized = jnp.exp(x - x_max)
- return unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)
+ result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)
+ if where is not None:
+ result = jnp.where(where, result, 0)
+ return result
@_softmax.defjvp
def _softmax_jvp(axis, primals, tangents):
@@ -368,7 +374,10 @@ def _softmax_jvp(axis, primals, tangents):
def _softmax_deprecated(x, axis, where, initial):
x_max = jnp.max(x, axis, where=where, initial=initial, keepdims=True)
unnormalized = jnp.exp(x - lax.stop_gradient(x_max))
- return unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)
+ result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)
+ if where is not None:
+ result = jnp.where(where, result, 0)
+ return result
@partial(jax.jit, static_argnames=("axis",))
| diff --git a/tests/nn_test.py b/tests/nn_test.py
--- a/tests/nn_test.py
+++ b/tests/nn_test.py
@@ -133,13 +133,12 @@ def testHardTanhMemory(self):
def testSoftmaxWhereMask(self, fn):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
- x_filtered = jnp.take(x, jnp.array([0, 2, 3]))
- out_masked = jnp.take(
- fn(x, where=m, initial=-jnp.inf), jnp.array([0, 2, 3]))
- out_filtered = fn(x_filtered)
+ out = fn(x, where=m, initial=-jnp.inf)
+ self.assertAllClose(out[m], fn(x[m]))
- self.assertAllClose(out_masked, out_filtered)
+ probs = out if fn is nn.softmax else jnp.exp(out)
+ self.assertAllClose(probs.sum(), 1.0)
# TODO(mattjj): include log_softmax in these extra tests if/when we add a
# custom_jvp rule for it (since otherwise it doesn't pass the numerical
| Bug with masking in jax.nn.log_softmax
### Description
I recently wanted to use jax.nn.log_softmax together with a mask (where = mask).
https://jax.readthedocs.io/en/latest/_autosummary/jax.nn.log_softmax.html
When I masked out some output probabilities from the log_sofmax I found out that masked out entries did not have zero probability.
The problem can be reproduced with the following example, where I also added my code that I used as an work around.
```
def log_softmax( logits, mask, initial = jnp.finfo(jnp.float32).min):
masked_logits = jnp.where(mask, logits, initial * jnp.ones_like(logits))
max_logits = jax.lax.stop_gradient(jnp.max(masked_logits, axis=-1))
shifted = masked_logits - max_logits[:, jnp.newaxis]
shifted_logsumexp = jnp.log( jnp.sum(jnp.exp(shifted), axis = -1, keepdims=True))
res = shifted - shifted_logsumexp
return res
def log_softmax_bug():
initial = jnp.finfo(jnp.float32).min
key = jax.random.PRNGKey(0)
key, subkey = jax.random.split(key)
shape = (3,10)
logits = jax.random.normal(subkey, shape= shape)
key, subkey = jax.random.split(key)
mask = jax.random.randint(key, (3,10), 0, 2)
masked_log_probs = jax.nn.log_softmax(logits, where = mask, initial = initial)
my_masked_log_probs = log_softmax(logits, mask, initial = initial)
print("jax nn probs", np.exp(masked_log_probs))
print("sum probs",np.sum(np.exp(masked_log_probs), axis=-1))
print("my probs", np.exp(my_masked_log_probs))
print("sum probs",np.sum(np.exp(my_masked_log_probs), axis = -1))
```
The output of the code is down below, where it is clear that the probabilities of the probabilities that are produced by jax.nn.log_softmax do not sum to one.
jax nn probs [[0.65082145 0.12884209 0.09259639 0.0914019 0.01950807 0.11102043
0.03726786 0.07620961 0.06663671 0.10840429]
[0.21390831 0.28618205 0.34040952 0.12929024 0.2339981 0.6408521
0.18717511 0.0577253 0.15870889 0.2091582 ]
[0.22656752 0.26329792 0.39771214 0.15017666 0.04520763 0.02898957
0.09993661 0.41019788 0.11462383 0.11412007]]
sum probs [1.3827088 2.4574077 1.85083 ]
my probs [[0.65082145 0. 0.09259639 0.0914019 0.01950807 0.
0.03726786 0. 0. 0.10840429]
[0. 0. 0.34040952 0. 0.2339981 0.
0. 0.0577253 0.15870889 0.2091582 ]
[0.22656752 0.26329792 0. 0. 0. 0.
0.09993661 0.41019788 0. 0. ]]
sum probs [0.99999994 0.99999994 0.99999994]
### What jax/jaxlib version are you using?
jax v0.3.13; jaxlib v0.3.10+cuda11.cudnn82
### Which accelerator(s) are you using?
GPU
### Additional system info
Linux
### NVIDIA GPU info
_No response_
| Thanks for the report! It looks like `softmax` has the same issue. | 2023-06-01T08:14:46 |
google/jax | 16,217 | google__jax-16217 | [
"15773"
]
| e99045381de5e159f52594a49ba95c5944d341af | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3712,35 +3712,50 @@ def argpartition(a: ArrayLike, kth: int, axis: int = -1) -> Array:
@partial(jit, static_argnums=(2,))
-def _roll(a, shift, axis):
- a_shape = shape(a)
- if axis is None:
- return lax.reshape(_roll(ravel(a), shift, axis=0), a_shape)
- shift = asarray(shift)
- a_ndim = len(a_shape)
- axis = np.asarray(axis)
- b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))
+def _roll_dynamic(a: Array, shift: Array, axis: Sequence[int]) -> Array:
+ b_shape = lax.broadcast_shapes(shift.shape, np.shape(axis))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
for x, i in zip(broadcast_to(shift, b_shape),
np.broadcast_to(axis, b_shape)):
- i = _canonicalize_axis(i, a_ndim)
- a_shape_i = array(a_shape[i], dtype=np.int32)
+ a_shape_i = array(a.shape[i], dtype=np.int32)
x = ufuncs.remainder(lax.convert_element_type(x, np.int32),
- lax.max(a_shape_i, np.int32(1)))
- a = lax.concatenate((a, a), i)
- a = lax.dynamic_slice_in_dim(a, a_shape_i - x, a_shape[i], axis=i)
+ lax.max(a_shape_i, np.int32(1)))
+ a_concat = lax.concatenate((a, a), i)
+ a = lax.dynamic_slice_in_dim(a_concat, a_shape_i - x, a.shape[i], axis=i)
return a
+@partial(jit, static_argnums=(1, 2))
+def _roll_static(a: Array, shift: Sequence[int], axis: Sequence[int]) -> Array:
+ for ax, s in zip(*np.broadcast_arrays(axis, shift)):
+ if a.shape[ax] == 0:
+ continue
+ i = (-s) % a.shape[ax]
+ a = lax.concatenate([lax.slice_in_dim(a, i, a.shape[ax], axis=ax),
+ lax.slice_in_dim(a, 0, i, axis=ax)],
+ dimension=ax)
+ return a
@util._wraps(np.roll)
-def roll(a, shift, axis: Optional[Union[int, Sequence[int]]] = None):
- util.check_arraylike("roll", a,)
- if isinstance(axis, list):
- axis = tuple(axis)
- return _roll(a, shift, axis)
+def roll(a: ArrayLike, shift: Union[ArrayLike, Sequence[int]],
+ axis: Optional[Union[int, Sequence[int]]] = None) -> Array:
+ util.check_arraylike("roll", a)
+ arr = asarray(a)
+ if axis is None:
+ return roll(arr.ravel(), shift, 0).reshape(arr.shape)
+ axis = _ensure_index_tuple(axis)
+ axis = tuple(_canonicalize_axis(ax, arr.ndim) for ax in axis)
+ if not core.is_constant_shape(arr.shape):
+ # TODO(necula): support static roll for polymorphic shapes.
+ return _roll_dynamic(arr, asarray(shift), axis)
+ try:
+ shift = _ensure_index_tuple(shift)
+ except TypeError:
+ return _roll_dynamic(arr, asarray(shift), axis)
+ else:
+ return _roll_static(arr, shift, axis)
@util._wraps(np.rollaxis, lax_description=_ARRAY_VIEW_DOC)
| jnp.roll should use simpler approach for static shifts
Current implementation:
```python
>>> jax.make_jaxpr(lambda x: jnp.roll(x, 2))(x)
{ lambda ; a:i32[10]. let
b:i32[10] = pjit[
jaxpr={ lambda ; c:i32[10] d:i32[]. let
e:i32[10] = pjit[
jaxpr={ lambda ; f:i32[10] g:i32[]. let
h:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] g
i:i32[1] = slice[
limit_indices=(1,)
start_indices=(0,)
strides=(1,)
] h
j:i32[] = squeeze[dimensions=(0,)] i
k:i32[] = convert_element_type[new_dtype=int32 weak_type=False] j
l:i32[] = max 10 1
m:i32[] = pjit[
jaxpr={ lambda ; n:i32[] o:i32[]. let
p:bool[] = eq o 0
q:i32[] = pjit[
jaxpr={ lambda ; r:bool[] s:i32[] t:i32[]. let
u:i32[] = select_n r t s
in (u,) }
name=_where
] p 1 o
v:i32[] = rem n q
w:bool[] = ne v 0
x:bool[] = lt v 0
y:bool[] = lt q 0
z:bool[] = ne x y
ba:bool[] = and z w
bb:i32[] = add v q
bc:i32[] = select_n ba v bb
in (bc,) }
name=remainder
] k l
bd:i32[20] = concatenate[dimension=0] f f
be:i32[] = sub 10 m
bf:bool[] = lt be 0
bg:i32[] = add be 20
bh:i32[] = select_n bf be bg
bi:i32[10] = dynamic_slice[slice_sizes=(10,)] bd bh
in (bi,) }
name=_roll
] c d
in (e,) }
name=_roll
] a 2
in (b,) }
```
A more streamlined implementation might look something like this:
```python
>>> jax.make_jaxpr(lambda x: jnp.concatenate([x[-2:], x[:-2]]))(x)
{ lambda ; a:i32[10]. let
b:i32[2] = dynamic_slice[slice_sizes=(2,)] a 8
c:i32[8] = dynamic_slice[slice_sizes=(8,)] a 0
d:i32[10] = concatenate[dimension=0] b c
in (d,) }
```
| Thinking about it, maybe we should just compute the `roll` via a single `gather` over appropriate indices. I think that would be a better implementation in all cases
An important consideration here is what can be lowered into efficient distributed computation by XLA SPMD.
If you look at the SPMD code generated for the existing (dynamic slice) or gather implementations of roll, they are implemented using all-to-all communication (all reduce) rather than nearest neighbor communication (collective permute):
```python
import jax
import jax.numpy as jnp
import chex
import numpy as np
chex.set_n_cpu_devices(4)
def roll_existing(x):
return jnp.roll(x, +1)
def roll_concat(x):
return jnp.concatenate([x[-1:], x[:-1]])
def roll_gather(x):
indices = np.concatenate([[x.size - 1], np.arange(x.size - 1)])
return x.at[indices].get(unique_indices=True)
np.testing.assert_array_equal(roll_concat(jnp.arange(1, 5)), roll_existing(jnp.arange(1, 5)))
np.testing.assert_array_equal(roll_gather(jnp.arange(1, 5)), roll_existing(jnp.arange(1, 5)))
mesh = jax.sharding.Mesh(jax.devices(), 'i')
spec = jax.sharding.PartitionSpec('i')
sharding = jax.sharding.NamedSharding(mesh, spec)
x = jax.device_put(jnp.arange(16), sharding)
print(jax.jit(roll_existing).lower(x).compile().as_text())
print("-" * 80 + "\n")
print(jax.jit(roll_concat).lower(x).compile().as_text())
print("-" * 80 + "\n")
print(jax.jit(roll_gather).lower(x).compile().as_text())
```
<details>
```
HloModule jit_roll_existing, entry_computation_layout={(s32[4]{0})->s32[16]{0}}, allow_spmd_sharding_propagation_to_output={true}
%add (x: s32[], y: s32[]) -> s32[] {
%x = s32[] parameter(0)
%y = s32[] parameter(1)
ROOT %add.5 = s32[] add(s32[] %x, s32[] %y)
}
%add.1 (x.1: s32[], y.1: s32[]) -> s32[] {
%x.1 = s32[] parameter(0)
%y.1 = s32[] parameter(1)
ROOT %add.10 = s32[] add(s32[] %x.1, s32[] %y.1)
}
%fused_computation (param_0.1: u32[], param_1.2: u32[], param_2.5: s32[32]) -> s32[32] {
%constant.62 = s32[] constant(0), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%broadcast.2 = s32[32]{0} broadcast(s32[] %constant.62), dimensions={}
%param_2.5 = s32[32]{0} parameter(2)
%param_0.1 = u32[] parameter(0)
%convert.2 = s32[] convert(u32[] %param_0.1), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%constant.61 = s32[] constant(8), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%multiply.7 = s32[] multiply(s32[] %convert.2, s32[] %constant.61), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%dynamic-slice.8 = s32[8]{0} dynamic-slice(s32[32]{0} %param_2.5, s32[] %multiply.7), dynamic_slice_sizes={8}, metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%param_1.2 = u32[] parameter(1)
%constant.60 = u32[] constant(4)
%multiply.6 = u32[] multiply(u32[] %param_1.2, u32[] %constant.60)
%add.11 = u32[] add(u32[] %multiply.6, u32[] %param_0.1)
%constant.59 = u32[] constant(8)
%multiply.5 = u32[] multiply(u32[] %add.11, u32[] %constant.59)
ROOT %dynamic-update-slice.3 = s32[32]{0} dynamic-update-slice(s32[32]{0} %broadcast.2, s32[8]{0} %dynamic-slice.8, u32[] %multiply.5)
}
%fused_computation.1 (param_0.3: s32[4], param_1.6: u32[]) -> s32[32] {
%constant.65 = s32[] constant(0), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%broadcast.3 = s32[32]{0} broadcast(s32[] %constant.65), dimensions={}
%param_0.3 = s32[4]{0} parameter(0)
%param_1.6 = u32[] parameter(1)
%convert.3 = s32[] convert(u32[] %param_1.6), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%constant.64 = s32[] constant(4), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%multiply.8 = s32[] multiply(s32[] %convert.3, s32[] %constant.64), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%dynamic-update-slice.5 = s32[32]{0} dynamic-update-slice(s32[32]{0} %broadcast.3, s32[4]{0} %param_0.3, s32[] %multiply.8), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%constant.63 = s32[] constant(16), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%add.12 = s32[] add(s32[] %multiply.8, s32[] %constant.63), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
ROOT %dynamic-update-slice.4 = s32[32]{0} dynamic-update-slice(s32[32]{0} %dynamic-update-slice.5, s32[4]{0} %param_0.3, s32[] %add.12), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
}
ENTRY %main.41_spmd (param: s32[4]) -> s32[16] {
%partition-id = u32[] partition-id(), metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%replica-id = u32[] replica-id()
%param = s32[4]{0} parameter(0), sharding={devices=[4]0,1,2,3}
%fusion.1 = s32[32]{0} fusion(s32[4]{0} %param, u32[] %partition-id), kind=kLoop, calls=%fused_computation.1, metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%all-reduce = s32[32]{0} all-reduce(s32[32]{0} %fusion.1), channel_id=1, replica_groups={{0}}, to_apply=%add, metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
%fusion = s32[32]{0} fusion(u32[] %partition-id, u32[] %replica-id, s32[32]{0} %all-reduce), kind=kLoop, calls=%fused_computation
%all-reduce.1 = s32[32]{0} all-reduce(s32[32]{0} %fusion), channel_id=2, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=%add.1
ROOT %slice = s32[16]{0} slice(s32[32]{0} %all-reduce.1), slice={[15:31]}, metadata={op_name="jit(roll_existing)/jit(main)/jit(_roll)/jit(_roll)/dynamic_slice[slice_sizes=(16,)]" source_file="<ipython-input-26-e19688a73297>" source_line=9}
}
--------------------------------------------------------------------------------
HloModule jit_roll_concat, entry_computation_layout={(s32[4]{0})->s32[4]{0}}, allow_spmd_sharding_propagation_to_output={true}
%fused_computation (param_0: s32[1], param_1.1: s32[4]) -> s32[4] {
%param_0 = s32[1]{0} parameter(0)
%param_1.1 = s32[4]{0} parameter(1)
%slice.4 = s32[3]{0} slice(s32[4]{0} %param_1.1), slice={[0:3]}, metadata={op_name="jit(roll_concat)/jit(main)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=12}
ROOT %concatenate.1 = s32[4]{0} concatenate(s32[1]{0} %param_0, s32[3]{0} %slice.4), dimensions={0}, metadata={op_name="jit(roll_concat)/jit(main)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=12}
}
ENTRY %main.5_spmd (param: s32[4]) -> s32[4] {
%param = s32[4]{0} parameter(0), sharding={devices=[4]0,1,2,3}
%slice = s32[1]{0} slice(s32[4]{0} %param), slice={[3:4]}, metadata={op_name="jit(roll_concat)/jit(main)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=12}
%collective-permute = s32[1]{0} collective-permute(s32[1]{0} %slice), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}, metadata={op_name="jit(roll_concat)/jit(main)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=12}
ROOT %fusion = s32[4]{0} fusion(s32[1]{0} %collective-permute, s32[4]{0} %param), kind=kLoop, calls=%fused_computation, metadata={op_name="jit(roll_concat)/jit(main)/concatenate[dimension=0]" source_file="<ipython-input-26-e19688a73297>" source_line=12}
}
--------------------------------------------------------------------------------
HloModule jit_roll_gather, entry_computation_layout={(s32[4]{0})->s32[16]{0}}, allow_spmd_sharding_propagation_to_output={true}
%add.1 (x.1: s32[], y.1: s32[]) -> s32[] {
%x.1 = s32[] parameter(0)
%y.1 = s32[] parameter(1)
ROOT %add = s32[] add(s32[] %x.1, s32[] %y.1)
}
%fused_computation (param_0.1: s32[4], param_1.4: u32[]) -> s32[16] {
%constant.14 = s32[16,1]{1,0} constant({...})
%param_1.4 = u32[] parameter(1)
%convert.1 = s32[] convert(u32[] %param_1.4), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%constant.13 = s32[] constant(4), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%multiply.2 = s32[] multiply(s32[] %convert.1, s32[] %constant.13), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%broadcast.9 = s32[16,1]{1,0} broadcast(s32[] %multiply.2), dimensions={}, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%compare.3 = pred[16,1]{1,0} compare(s32[16,1]{1,0} %constant.14, s32[16,1]{1,0} %broadcast.9), direction=LT, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%constant.12 = s32[] constant(3), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%add.2 = s32[] add(s32[] %multiply.2, s32[] %constant.12), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%broadcast.8 = s32[16,1]{1,0} broadcast(s32[] %add.2), dimensions={}, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%compare.2 = pred[16,1]{1,0} compare(s32[16,1]{1,0} %constant.14, s32[16,1]{1,0} %broadcast.8), direction=GT, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%or.2 = pred[16,1]{1,0} or(pred[16,1]{1,0} %compare.3, pred[16,1]{1,0} %compare.2), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%reshape.9 = pred[16]{0} reshape(pred[16,1]{1,0} %or.2), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%constant.11 = s32[] constant(0), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%broadcast.7 = s32[16]{0} broadcast(s32[] %constant.11), dimensions={}, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%param_0.1 = s32[4]{0} parameter(0)
%clamp.2 = s32[16,1]{1,0} clamp(s32[16,1]{1,0} %broadcast.9, s32[16,1]{1,0} %constant.14, s32[16,1]{1,0} %broadcast.8), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%subtract.1 = s32[16,1]{1,0} subtract(s32[16,1]{1,0} %clamp.2, s32[16,1]{1,0} %broadcast.9), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%gather.1 = s32[16]{0} gather(s32[4]{0} %param_0.1, s32[16,1]{1,0} %subtract.1), offset_dims={}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1}, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
ROOT %select.1 = s32[16]{0} select(pred[16]{0} %reshape.9, s32[16]{0} %broadcast.7, s32[16]{0} %gather.1), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
}
ENTRY %main.4_spmd (param: s32[4]) -> s32[16] {
%param = s32[4]{0} parameter(0), sharding={devices=[4]0,1,2,3}
%partition-id = u32[] partition-id(), metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
%fusion = s32[16]{0} fusion(s32[4]{0} %param, u32[] %partition-id), kind=kLoop, calls=%fused_computation, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
ROOT %all-reduce = s32[16]{0} all-reduce(s32[16]{0} %fusion), channel_id=1, replica_groups={{0}}, to_apply=%add.1, metadata={op_name="jit(roll_gather)/jit(main)/gather[dimension_numbers=GatherDimensionNumbers(offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)) slice_sizes=(1,) unique_indices=True indices_are_sorted=False mode=GatherScatterMode.PROMISE_IN_BOUNDS fill_value=None]" source_file="<ipython-input-26-e19688a73297>" source_line=16}
}
```
</details>
Do you have thoughts on how to implement this in an SPMD-friendly way?
The `roll_concat` implementation in my example snippet (basically the same as your initial suggestion) does generate SPMD friendly code -- it uses collective permute under the hood.
#16217 should address this. | 2023-06-01T13:23:04 |
|
google/jax | 16,265 | google__jax-16265 | [
"16122"
]
| 2502e2a7be118cf8545ab5fa29b5a2526d68d201 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -68,7 +68,7 @@
from jax._src.lib.mlir.dialects import chlo
from jax._src.lib.mlir.dialects import hlo
from jax._src.sharding_impls import PmapSharding
-from jax._src.typing import Array, ArrayLike, DTypeLike, Shape
+from jax._src.typing import Array, ArrayLike, DuckTypedArray, DTypeLike, Shape
from jax._src.util import (cache, safe_zip, safe_map, canonicalize_axis,
split_list)
@@ -1330,7 +1330,8 @@ def expand_dims(array: ArrayLike, dimensions: Sequence[int]) -> Array:
### convenience wrappers around traceables
-def full_like(x: ArrayLike, fill_value: ArrayLike, dtype: Optional[DTypeLike] = None,
+def full_like(x: Union[ArrayLike, DuckTypedArray],
+ fill_value: ArrayLike, dtype: Optional[DTypeLike] = None,
shape: Optional[Shape] = None) -> Array:
"""Create a full array like np.full based on the example array `x`.
@@ -1344,7 +1345,7 @@ def full_like(x: ArrayLike, fill_value: ArrayLike, dtype: Optional[DTypeLike] =
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
- fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape)
+ fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape) # type: ignore[arg-type]
weak_type = dtype is None and dtypes.is_weakly_typed(x)
dtype = dtype or _dtype(x)
if dtypes.is_opaque_dtype(dtype):
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -60,7 +60,7 @@
from jax._src.numpy import ufuncs
from jax._src.numpy import util
from jax._src.numpy.vectorize import vectorize
-from jax._src.typing import Array, ArrayLike, DimSize, DType, DTypeLike, Shape
+from jax._src.typing import Array, ArrayLike, DimSize, DuckTypedArray, DType, DTypeLike, Shape
from jax._src.util import (unzip2, subvals, safe_zip,
ceil_of_ratio, partition_list,
canonicalize_axis as _canonicalize_axis)
@@ -320,7 +320,7 @@ def isscalar(element: Any) -> bool:
iterable = np.iterable
@util._wraps(np.result_type)
-def result_type(*args: ArrayLike) -> DType:
+def result_type(*args: Any) -> DType:
return dtypes.result_type(*args)
@@ -2077,9 +2077,11 @@ def copy(a: ArrayLike, order: Optional[str] = None) -> Array:
@util._wraps(np.zeros_like)
-def zeros_like(a: ArrayLike, dtype: Optional[DTypeLike] = None,
+def zeros_like(a: Union[ArrayLike, DuckTypedArray],
+ dtype: Optional[DTypeLike] = None,
shape: Any = None) -> Array:
- util.check_arraylike("zeros_like", a)
+ if not (hasattr(a, 'dtype') and hasattr(a, 'shape')): # support duck typing
+ util.check_arraylike("ones_like", a)
dtypes.check_user_dtype_supported(dtype, "zeros_like")
if shape is not None:
shape = canonicalize_shape(shape)
@@ -2087,9 +2089,11 @@ def zeros_like(a: ArrayLike, dtype: Optional[DTypeLike] = None,
@util._wraps(np.ones_like)
-def ones_like(a: ArrayLike, dtype: Optional[DTypeLike] = None,
+def ones_like(a: Union[ArrayLike, DuckTypedArray],
+ dtype: Optional[DTypeLike] = None,
shape: Any = None) -> Array:
- util.check_arraylike("ones_like", a)
+ if not (hasattr(a, 'dtype') and hasattr(a, 'shape')): # support duck typing
+ util.check_arraylike("ones_like", a)
dtypes.check_user_dtype_supported(dtype, "ones_like")
if shape is not None:
shape = canonicalize_shape(shape)
@@ -2099,9 +2103,11 @@ def ones_like(a: ArrayLike, dtype: Optional[DTypeLike] = None,
@util._wraps(np.empty_like, lax_description="""\
Because XLA cannot create uninitialized arrays, the JAX version will
return an array initialized with zeros.""")
-def empty_like(prototype: ArrayLike, dtype: Optional[DTypeLike] = None,
+def empty_like(prototype: Union[ArrayLike, DuckTypedArray],
+ dtype: Optional[DTypeLike] = None,
shape: Any = None) -> Array:
- util.check_arraylike("empty_like", prototype)
+ if not (hasattr(prototype, 'dtype') and hasattr(prototype, 'shape')): # support duck typing
+ util.check_arraylike("ones_like", prototype)
dtypes.check_user_dtype_supported(dtype, "empty_like")
return zeros_like(prototype, dtype=dtype, shape=shape)
@@ -2119,17 +2125,21 @@ def full(shape: Any, fill_value: ArrayLike,
@util._wraps(np.full_like)
-def full_like(a: ArrayLike, fill_value: ArrayLike, dtype: Optional[DTypeLike] = None,
+def full_like(a: Union[ArrayLike, DuckTypedArray],
+ fill_value: ArrayLike, dtype: Optional[DTypeLike] = None,
shape: Any = None) -> Array:
+ if hasattr(a, 'dtype') and hasattr(a, 'shape'): # support duck typing
+ util.check_arraylike("full_like", 0, fill_value)
+ else:
+ util.check_arraylike("full_like", a, fill_value)
dtypes.check_user_dtype_supported(dtype, "full_like")
- util.check_arraylike("full_like", a, fill_value)
if shape is not None:
shape = canonicalize_shape(shape)
if ndim(fill_value) == 0:
return lax.full_like(a, fill_value, dtype, shape)
else:
- shape = np.shape(a) if shape is None else shape
- dtype = result_type(a) if dtype is None else dtype
+ shape = np.shape(a) if shape is None else shape # type: ignore[arg-type]
+ dtype = result_type(a) if dtype is None else dtype # type: ignore[arg-type]
return broadcast_to(asarray(fill_value, dtype=dtype), shape)
diff --git a/jax/_src/typing.py b/jax/_src/typing.py
--- a/jax/_src/typing.py
+++ b/jax/_src/typing.py
@@ -57,6 +57,12 @@ def dtype(self) -> DType: ...
DimSize = Union[int, Any] # extensible
Shape = Sequence[DimSize]
+class DuckTypedArray(Protocol):
+ @property
+ def dtype(self) -> DType: ...
+ @property
+ def shape(self) -> Shape: ...
+
# Array is a type annotation for standard JAX arrays and tracers produced by
# core functions in jax.lax and jax.numpy; it is not meant to include
# future non-standard array types like KeyArray and BInt. It is imported above.
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2621,6 +2621,13 @@ def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
+ def testDuckTypedLike(self):
+ x = jax.ShapeDtypeStruct((1, 2, 3), np.dtype("int32"))
+ self.assertArraysEqual(jnp.zeros_like(x), jnp.zeros(x.shape, x.dtype))
+ self.assertArraysEqual(jnp.ones_like(x), jnp.ones(x.shape, x.dtype))
+ self.assertArraysEqual(jnp.empty_like(x), jnp.empty(x.shape, x.dtype))
+ self.assertArraysEqual(jnp.full_like(x, 2), jnp.full(x.shape, 2, x.dtype))
+
@jtu.sample_product(
[dict(func=func, args=args)
for func, args in [("full_like", (-100,)), ("ones_like", ()), ("zeros_like", ())]
| consider duck typing for jnp.ones_like, other similar functions
right now, `jnp.ones_like(jax.ShapeDtypeStruct(shape=(1, 2, 3), dtype=jnp.float32)` throws a type error
it probably shouldn't, and in fact `jnp.ones_like` should probably work on anything that has `shape`, `dtype`, etc attributes
| `jnp.ones_like` et al would ideally get shape, dtype, etc from `x` whatever `x` is and only check that the shape, dtype, etc are themselves well typed
Seems reasonable! (In fact I vaguely recall this coming up before...)
What do you think, @jakevdp ?
Sure, sounds reasonable to me. We have to be somewhat careful because some valid inputs won't have a `dtype` or `shape` defined (e.g. Python scalars) and some non-array inputs we want to be errors (e.g. lists and tuples).
Maybe we can try to look for `shape` and `dtype` attributes first, and if that fails fall back to the current path.
I just ran into a case where this would be nice as well. Would it be possible for me to make a PR for this or are there non-obvious things that could get broken? | 2023-06-06T08:00:19 |
google/jax | 16,283 | google__jax-16283 | [
"16137"
]
| e4fb4c65ed13c5ffd8109bf32c4509450bcf81a5 | diff --git a/jax/_src/interpreters/mlir.py b/jax/_src/interpreters/mlir.py
--- a/jax/_src/interpreters/mlir.py
+++ b/jax/_src/interpreters/mlir.py
@@ -1500,7 +1500,7 @@ def _wrap_with_spmd_op(name: str,
else:
backend_config = ""
result_type = aval_to_ir_type(aval_out)
- out_shape = aval_out.shape # type: ignore
+ out_shape = core.physical_aval(aval_out).shape # type: ignore
if core.is_constant_shape(out_shape):
result_shapes = None
else:
diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -470,7 +470,8 @@ def physical_hlo_sharding(aval, op_sharding_proto):
op_sharding_proto = op_sharding_proto.to_proto()
new_op_sharding = op_sharding_proto.clone()
tad = list(new_op_sharding.tile_assignment_dimensions)
- tad.extend([1] * len(key_shape))
+ suffix = [tad.pop()] if op_sharding_proto.replicate_on_last_tile_dim else []
+ tad.extend([1] * len(key_shape) + suffix)
new_op_sharding.tile_assignment_dimensions = tad
return xc.HloSharding.from_proto(new_op_sharding)
| diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -740,6 +740,20 @@ def g(x):
# error!
jax.jit(g)(x) # doesn't crash
+ def test_key_array_with_replicated_last_tile_dim(self):
+ # See https://github.com/google/jax/issues/16137
+
+ mesh = jtu.create_global_mesh((2, 4), ('i', 'j'))
+
+ def f(rng):
+ @partial(shard_map, mesh=mesh, in_specs=P('i'), out_specs=P('i'),
+ check_rep=False)
+ def g(rng):
+ return jnp.array([jax.random.normal(rng[0])])
+ return g(jax.random.split(rng, 4))
+
+ jax.jit(f)(jax.random.key(0)) # doesn't crash
+
# same method appears in api_test.py:DCETest
# TODO(mattjj): consider moving this method to be a helper in jtu
def assert_dce_result(self, jaxpr: core.Jaxpr, used_outputs: List[bool],
| [shard-map] Check failed using unsafe_rbg_key
Based on code from @jheek :
```python
import os
os.environ['XLA_FLAGS'] = '--xla_force_host_platform_device_count=8'
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
from jax import random
from jax.experimental.shard_map import shard_map, PartitionSpec as P, Mesh
from flax import linen
jax.config.update('jax_enable_custom_prng', True) # doesn't crash if we exclude this line!
dense = linen.Dense(128)
mesh = Mesh(np.array(jax.devices()).reshape(2, 4), ('model', 'data'))
@jax.jit
def f(rng):
@partial(shard_map, mesh=mesh, in_specs=P("model"), out_specs=P("model"), check_rep=False)
def g(rng):
rng = rng[0]
return dense.init(rng, jnp.zeros((8, 128)))
return g(random.split(rng, 4))
f(random.PRNGKey(0)) # ==> crashes VM on ASSERT(IsManual(sharding))
```
| 2023-06-06T22:53:30 |
|
google/jax | 16,284 | google__jax-16284 | [
"16272"
]
| eea03ced0a4ce6066a61c22d9bd03524d7b71aab | diff --git a/jax/_src/interpreters/mlir.py b/jax/_src/interpreters/mlir.py
--- a/jax/_src/interpreters/mlir.py
+++ b/jax/_src/interpreters/mlir.py
@@ -360,6 +360,14 @@ def _source_info_to_location(
def make_ir_context() -> ir.Context:
"""Creates an MLIR context suitable for JAX IR."""
context = ir.Context()
+
+ # If threading is enabled, each MLIR context will keep alive a thread pool.
+ # Since we cache MLIR modules (and hence contexts), this means we might keep
+ # several threads alive for each cache entry. This is a terrible idea. However
+ # we don't do any heavy computation on MLIR modules from Python anyway, so we
+ # just disable threading.
+ context.enable_multithreading(False)
+
dialects.mhlo.register_mhlo_dialect(context)
dialects.chlo.register_dialect(context)
dialects.stablehlo.register_dialect(context)
| LLVM worker thread limit
### Description
Recently we have been seeing a lot of crashed jobs on our cluster, with the following error message:
```
LLVM ERROR: pthread_create failed: Resource temporarily unavailable
```
caused by the jobs hitting the thread limit per user per node (set to 4096).
Further investigation revealed that this is caused by jax spawning hundreds/thousands of llvm-worker threads, and never exiting them.
This happens whenever we run a script which contains many compilations, and a certain subset of jax primitives, e.g. qr decomposition.
Here is a minimal working example to reproduce the aforementioned behaviour:
```python
import jax
import jax.numpy as jnp
from functools import partial
@partial(jax.jit, static_argnums=0)
def expensive_computation(f, x):
x = f(x)
q, r = jnp.linalg.qr(x)
return q.sum()+r.sum()
x = jnp.ones((16,16))
i =1
while True:
i = i+1
if i%1000 ==0: print(i)
f = lambda x: x #trigger recompilation
u = jax.block_until_ready(expensive_computation(f, x))
```
After a few thousand iterations, ```top -Hp $(pgrep python3)``` shows ~6100 llvm-worker threads which have been spawned:
```
top - 12:04:00 up 3:05, 3 users, load average: 1.04, 0.52, 0.26
Threads: 6178 total, 1 running, 6177 sleeping, 0 stopped, 0 zombie
%Cpu(s): 33.7 us, 1.0 sy, 0.0 ni, 65.3 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
MiB Mem : 60000.0 total, 56564.2 free, 3173.1 used, 262.8 buff/cache
MiB Swap: 0.0 total, 0.0 free, 0.0 used. 56826.9 avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
507363 clemens 20 0 52.8g 2.9g 103472 R 99.0 5.0 3:05.13 python3
507364 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.06 python3
507365 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.07 python3
507367 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507368 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507369 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507370 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507371 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507372 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507373 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507374 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507375 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507376 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507377 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507378 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507379 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507380 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507381 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507382 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507383 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507384 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507385 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507386 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507387 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507388 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 python3
507390 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.02 python3
507391 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.06 python3
507392 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.07 python3
521501 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521502 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521503 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521507 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521508 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521509 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521513 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521514 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521515 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521519 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521520 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521521 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521525 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521526 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521527 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521531 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521532 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521533 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521537 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521538 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521539 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521543 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521544 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521545 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521549 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521550 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521551 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521555 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521556 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521557 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521561 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521562 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521563 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521567 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521568 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521569 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521573 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521574 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521575 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521579 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521580 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521581 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
521585 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-0
521586 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-1
521587 clemens 20 0 52.8g 2.9g 103472 S 0.0 5.0 0:00.00 llvm-worker-2
...
```
(this was run locally, in an lxc container limited to 3 cpu cores)
A few more remarks:
- if the `jnp.linalg.qr` decomposition is replaced with some normal jax function, e.g. `jax.lax.sin` the issue does not happen
- as can be seen from the thread names (repeated indices), they are not all spawned by the same `llvm::ThreadPool`, but somewhere else (I am not familiar enough with XLA to find where)
Context:
- possibly related to https://github.com/google/jax/issues/15819 and https://github.com/google/jax/issues/16215
- this issue is different from https://github.com/google/jax/issues/1539 and https://github.com/google/jax/issues/15866 , as we are not worried about the number of threads jax is using for doing the acutal computations (which can be limited via cpu pinning e.g. taskset) but rather about the compilation threads
Let me know if this is the right place or if I should open an issue on https://github.com/openxla/xla instead.
### What jax/jaxlib version are you using?
jax 0.4.11, jaxlib 0.4.11
### Which accelerator(s) are you using?
CPU
| 2023-06-06T23:48:46 |
||
google/jax | 16,364 | google__jax-16364 | [
"16362"
]
| 4d698c30b97b6c23cf34083965f05db5c6bbda79 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,9 +22,9 @@
_current_jaxlib_version = '0.4.12'
# The following should be updated with each new jaxlib release.
_latest_jaxlib_version_on_pypi = '0.4.12'
-_available_cuda11_cudnn_versions = ['82', '86']
+_available_cuda11_cudnn_versions = ['86']
_default_cuda11_cudnn_version = '86'
-_default_cuda12_cudnn_version = '88'
+_default_cuda12_cudnn_version = '89'
_libtpu_version = '0.1.dev20230608'
_dct = {}
| Installation Bug: No Matching distribution found for jaxlib==0.4.12+cuda12.cudnn88; extra == "cuda12_local" (from jax[cuda12_local])
### Description
For CUDA 12.1 and cuDNN 8.8 No wheel is available. The suggested whl to use was CUDA 12 and cuDNN 8.9 but this does not work and I only have cuDNN 8.8.
Thanks,
### What jax/jaxlib version are you using?
jaxlib==0.4.12
### Which accelerator(s) are you using?
GPU
### Additional system info
Python 3.8.10, Ubuntu 20.04.6 LTS
### NVIDIA GPU info
Mon Jun 12 14:37:26 2023
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 530.41.03 Driver Version: 530.41.03 CUDA Version: 12.1 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA RTX A6000 Off| 00000000:65:00.0 On | Off |
| 32% 61C P2 79W / 300W| 48625MiB / 49140MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| 0 N/A N/A 1189 G /usr/lib/xorg/Xorg 77MiB |
| 0 N/A N/A 1939 G /usr/lib/xorg/Xorg 161MiB |
| 0 N/A N/A 2074 G /usr/bin/gnome-shell 75MiB |
| 0 N/A N/A 6749 C python3 37240MiB |
| 0 N/A N/A 7046 C python3 10934MiB |
| 0 N/A N/A 11541 G /usr/lib/firefox/firefox 121MiB |
+---------------------------------------------------------------------------------------+
| 2023-06-12T19:32:43 |
||
google/jax | 16,427 | google__jax-16427 | [
"16422"
]
| ef91a8d55ece83d7752523fbcddfb36ddca494b0 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -3488,7 +3488,6 @@ def _select_hlo_lowering_opaque(ctx, which, *cases):
avals_in = ctx.avals_in
aval_out, = ctx.avals_out
assert all(aval_case == aval_out for aval_case in avals_in[1:])
- assert avals_in[0].ndim == aval_out.ndim
select_lower = _select_hlo_lowering
physical_aval_out = core.physical_aval(aval_out)
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1860,6 +1860,15 @@ def test_select(self):
self.assertIsInstance(ys, random.KeyArray)
self.assertEqual(ys.shape, (3, 2))
+ @skipIf(not config.jax_enable_custom_prng,
+ 'requires config.jax_enable_custom_prng')
+ def test_select_scalar_cond(self):
+ # regression test for https://github.com/google/jax/issues/16422
+ ks = self.make_keys(3)
+ ys = lax.select(True, ks, ks)
+ self.assertIsInstance(ys, random.KeyArray)
+ self.assertEqual(ys.shape, (3,))
+
@skipIf(not config.jax_enable_custom_prng,
'requires config.jax_enable_custom_prng')
def test_select2(self):
| Interaction between `vmap` and new PRNG
### Description
Hi, I'm having trouble seeing how the following MWE using [Diffrax](https://github.com/patrick-kidger/diffrax) errors:
```py
import jax
from jax import numpy as jnp
from diffrax import diffeqsolve, ControlTerm, Euler, MultiTerm, ODETerm, SaveAt, VirtualBrownianTree
B = 20
D = 5
tol = 1e-3
dt0 = 0.05
T0 = 0.0
Tf = 1.0
def f(x0, key):
drift = lambda t, x, args: jnp.zeros((D, ))
diffusion = lambda t, x, args: jnp.zeros((D, D))
brownian_motion = VirtualBrownianTree(T0, Tf, tol=tol, shape=x0.shape, key=key)
terms = MultiTerm(ODETerm(drift), ControlTerm(diffusion, brownian_motion))
solver = Euler()
saveat = SaveAt(dense=True)
sol = diffeqsolve(terms, solver, T0, Tf, dt0=dt0, y0=x0, saveat=saveat)
return sol.evaluate(Tf)
x0 = jnp.zeros((D, ))
key = jax.random.key(1)
print(f(x0, key))
x0 = jnp.zeros((B, D))
key = jax.random.split(key, B)
print(jax.vmap(f)(x0, key))
```
In particular, the last three lines (with `vmap`) cause an error, while the previous three lines seem to work fine.
I raised [this issue in Diffrax](https://github.com/patrick-kidger/diffrax/issues/267) and was told that it was an issue only with the new PRNG, so the bug is in Jax rather than Diffrax. Hence I'm raising it here.
Here are links for:
- [The stack trace.](https://pastebin.com/46hbkCKb)
- [The output of my `conda list`.](https://pastebin.com/3xjrfYYz)
### What jax/jaxlib version are you using?
0.4.12/0.4.12
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.10.11, WSL 2 -> Ubuntu 22.04.2 LTS
### NVIDIA GPU info
_No response_
| 2023-06-15T09:11:15 |
|
google/jax | 16,487 | google__jax-16487 | [
"16461"
]
| 9fe63ad053a072e770f0be6840ab0a2e28250a94 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -349,11 +349,24 @@ def trunc(x: ArrayLike) -> Array:
return where(lax.lt(x, _lax_const(x, 0)), ufuncs.ceil(x), ufuncs.floor(x))
-@partial(jit, static_argnums=(2, 3, 4))
-def _conv(x: Array, y: Array, mode: str, op: str, precision: PrecisionLike) -> Array:
+_PREFERRED_ELEMENT_TYPE_DESCRIPTION = """
+preferred_element_type : dtype, optional
+ If specified, accumulate results and return a result of the given data type.
+ If not specified, the function instead follows the numpy convention of always
+ accumulating results and returning an inexact dtype.
+"""
+
+@partial(jit, static_argnames=['mode', 'op', 'precision', 'preferred_element_type'])
+def _conv(x: Array, y: Array, mode: str, op: str, precision: PrecisionLike,
+ preferred_element_type: Optional[DTypeLike] = None) -> Array:
if ndim(x) != 1 or ndim(y) != 1:
raise ValueError(f"{op}() only support 1-dimensional inputs.")
- x, y = util.promote_dtypes_inexact(x, y)
+ if preferred_element_type is None:
+ # if unspecified, promote to inexact following NumPy's default for convolutions.
+ x, y = util.promote_dtypes_inexact(x, y)
+ else:
+ # otherwise cast to same type but otherwise preserve input dtypes
+ x, y = util.promote_dtypes(x, y)
if len(x) == 0 or len(y) == 0:
raise ValueError(f"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.")
@@ -378,24 +391,31 @@ def _conv(x: Array, y: Array, mode: str, op: str, precision: PrecisionLike) -> A
raise ValueError("mode must be one of ['full', 'same', 'valid']")
result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,),
- padding, precision=precision)
+ padding, precision=precision,
+ preferred_element_type=preferred_element_type)
return result[0, 0, out_order]
-@util._wraps(np.convolve, lax_description=_PRECISION_DOC)
-@partial(jit, static_argnames=('mode', 'precision'))
+@util._wraps(np.convolve, lax_description=_PRECISION_DOC,
+ extra_params=_PREFERRED_ELEMENT_TYPE_DESCRIPTION)
+@partial(jit, static_argnames=('mode', 'precision', 'preferred_element_type'))
def convolve(a: ArrayLike, v: ArrayLike, mode: str = 'full', *,
- precision: PrecisionLike = None) -> Array:
+ precision: PrecisionLike = None,
+ preferred_element_type: Optional[dtype] = None) -> Array:
util.check_arraylike("convolve", a, v)
- return _conv(asarray(a), asarray(v), mode, 'convolve', precision)
+ return _conv(asarray(a), asarray(v), mode=mode, op='convolve',
+ precision=precision, preferred_element_type=preferred_element_type)
-@util._wraps(np.correlate, lax_description=_PRECISION_DOC)
-@partial(jit, static_argnames=('mode', 'precision'))
+@util._wraps(np.correlate, lax_description=_PRECISION_DOC,
+ extra_params=_PREFERRED_ELEMENT_TYPE_DESCRIPTION)
+@partial(jit, static_argnames=('mode', 'precision', 'preferred_element_type'))
def correlate(a: ArrayLike, v: ArrayLike, mode: str = 'valid', *,
- precision: PrecisionLike = None) -> Array:
+ precision: PrecisionLike = None,
+ preferred_element_type: Optional[dtype] = None) -> Array:
util.check_arraylike("correlate", a, v)
- return _conv(asarray(a), asarray(v), mode, 'correlate', precision)
+ return _conv(asarray(a), asarray(v), mode=mode, op='correlate',
+ precision=precision, preferred_element_type=preferred_element_type)
@util._wraps(np.histogram_bin_edges)
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -1900,12 +1900,35 @@ def testConvolutions(self, xshape, yshape, dtype, mode, op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
- np_fun = partial(np_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode, precision=precision)
+ def np_fun(x, y):
+ return np_op(x, y, mode=mode).astype(dtypes.to_inexact_dtype(dtype))
tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
np.complex128: 1e-14}
- self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
- tol=tol)
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True, tol=tol)
+ self._CompileAndCheck(jnp_fun, args_maker)
+
+ @jtu.sample_product(
+ mode=['full', 'same', 'valid'],
+ op=['convolve', 'correlate'],
+ dtype=number_dtypes,
+ xshape=one_dim_array_shapes,
+ yshape=one_dim_array_shapes,
+ )
+ @jtu.skip_on_devices("gpu", "tpu", "rocm") # backends don't support all dtypes.
+ def testConvolutionsPreferredElementType(self, xshape, yshape, dtype, mode, op):
+ jnp_op = getattr(jnp, op)
+ np_op = getattr(np, op)
+ rng = jtu.rand_default(self.rng())
+ args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
+ precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
+ jnp_fun = partial(jnp_op, mode=mode, precision=precision,
+ preferred_element_type=dtype)
+ def np_fun(x, y):
+ return np_op(x, y, mode=mode).astype(dtype)
+ tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
+ np.complex128: 1e-14}
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
| Support preferred_element_type in jax.numpy.convolve
I would like to compute a jax.numpy.convolve on `int8` input arrays, accumulating to an `int32` output array. With einsum I can use `preferred_element_type` to accomplish this, but the option is not present in convolve.
```
A = jnp.array([127,127,127,127], dtype=jnp.int8)
B = jnp.array([2,2,2,2], dtype=jnp.int8)
jnp.convolve(A, B, mode="valid", preferred_element_type=jnp.int32)
# Array([1016], dtype=int32)
```
`conv_general_dilated` has the option, so I was able to get a workaround as the following function, which copies `lax_numpy._conv`, omits the branches I don't care about, and notably omits the line
`x, y = util.promote_dtypes_inexact(x, y)`
which promotes the integral types to floats.
```
from jax._src.numpy.lax_numpy import asarray, ndim, flip
@partial(jax.jit, static_argnums=(2,))
def conv_helper(x, y, preferred_element_type=jnp.int32):
x = asarray(x)
y = asarray(y)
if ndim(x) != 1 or ndim(y) != 1:
raise ValueError(f"{op}() only support 1-dimensional inputs.")
if len(x) == 0 or len(y) == 0:
raise ValueError(f"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.")
# op = "convolve"
out_order = slice(None)
if len(x) < len(y):
x, y = y, x
y = flip(y)
# mode: valid
padding = [(0, 0)]
result = jax.lax.conv_general_dilated(
x[None, None, :], y[None, None, :], (1,),
padding, precision=None,
preferred_element_type=preferred_element_type,
)
return result[0, 0, out_order]
```
| Thanks for the request! With #16487 this is now possible:
```python
In [1]: import jax.numpy as jnp
In [2]: x = jnp.arange(10, dtype='int8')
In [3]: y = jnp.ones(3, dtype='int8')
In [4]: jnp.convolve(x, y, preferred_element_type='int32')
Out[4]: Array([ 0, 1, 3, 6, 9, 12, 15, 18, 21, 24, 17, 9], dtype=int32)
``` | 2023-06-20T12:20:18 |
google/jax | 16,566 | google__jax-16566 | [
"16550"
]
| 6bc74d2a9874e1fe93a45191bb829c07dfee04fa | diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py
new file mode 100644
--- /dev/null
+++ b/jax/_src/scipy/stats/binom.py
@@ -0,0 +1,42 @@
+# Copyright 2023 The JAX Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+
+import scipy.stats as osp_stats
+
+from jax import lax
+import jax.numpy as jnp
+from jax._src.numpy.util import _wraps, promote_args_inexact
+from jax._src.scipy.special import gammaln, xlogy, xlog1py
+from jax._src.typing import Array, ArrayLike
+
+
+@_wraps(osp_stats.nbinom.logpmf, update_doc=False)
+def logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
+ """JAX implementation of scipy.stats.binom.logpmf."""
+ k, n, p, loc = promote_args_inexact("binom.logpmf", k, n, p, loc)
+ y = lax.sub(k, loc)
+ comb_term = lax.sub(
+ gammaln(n + 1),
+ lax.add(gammaln(y + 1), gammaln(n - y + 1))
+ )
+ log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
+ log_probs = lax.add(comb_term, log_linear_term)
+ return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
+
+
+@_wraps(osp_stats.nbinom.pmf, update_doc=False)
+def pmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Array:
+ """JAX implementation of scipy.stats.binom.pmf."""
+ return lax.exp(logpmf(k, n, p, loc))
diff --git a/jax/scipy/stats/__init__.py b/jax/scipy/stats/__init__.py
--- a/jax/scipy/stats/__init__.py
+++ b/jax/scipy/stats/__init__.py
@@ -17,6 +17,7 @@
from jax.scipy.stats import bernoulli as bernoulli
from jax.scipy.stats import beta as beta
+from jax.scipy.stats import binom as binom
from jax.scipy.stats import cauchy as cauchy
from jax.scipy.stats import dirichlet as dirichlet
from jax.scipy.stats import expon as expon
diff --git a/jax/scipy/stats/binom.py b/jax/scipy/stats/binom.py
new file mode 100644
--- /dev/null
+++ b/jax/scipy/stats/binom.py
@@ -0,0 +1,18 @@
+# Copyright 2023 The JAX Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from jax._src.scipy.stats.binom import (
+ logpmf as logpmf,
+ pmf as pmf,
+)
| diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py
--- a/tests/scipy_stats_test.py
+++ b/tests/scipy_stats_test.py
@@ -971,6 +971,27 @@ def args_maker():
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=1e-5, atol=1e-5)
+ @genNamedParametersNArgs(4)
+ def testBinomLogPmf(self, shapes, dtypes):
+ rng = jtu.rand_positive(self.rng())
+ scipy_fun = osp_stats.binom.logpmf
+ lax_fun = lsp_stats.binom.logpmf
+
+ def args_maker():
+ k, n, logit, loc = map(rng, shapes, dtypes)
+ k = np.floor(np.abs(k))
+ n = np.ceil(np.abs(n))
+ p = expit(logit)
+ loc = np.floor(loc)
+ return [k, n, p, loc]
+
+ tol = {np.float32: 1e-6, np.float64: 1e-8}
+
+ with jtu.strict_promotion_if_dtypes_match(dtypes):
+ self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
+ tol=5e-4)
+ self._CompileAndCheck(lax_fun, args_maker, rtol=tol, atol=tol)
+
def testIssue972(self):
self.assertAllClose(
np.ones((4,), np.float32),
| jax.scipy.stats.binom missing
I can't find an implementation of the jax equivalent of the binomial from scipy.stats. Specifically in my case I am looking for a jittable way to calculate the binomial cdf i.e. `jax.scipy.stats.binom.cdf(k, n, p)`
| Based on reading the boost code [here](https://www.boost.org/doc/libs/1_67_0/boost/math/distributions/binomial.hpp), it looks like scipy computes this using the incomplete beta function.
You could try using `jax.scipy.special(n - k, k + 1, 1 - p)` as follows:
```python
>>> scipy.stats.binom.cdf(7, 10, 0.6)
0.8327102464
>>> jax.scipy.special.betainc(3, 8, 1 - 0.6)
Array(0.8327104, dtype=float32, weak_type=True)
```
The boost version is using `ibetac(k + 1, n - k, p)` which is mathematically identical to `ibeta(n - k, k + 1, 1 - p)`, but there's presumably some numerical reason why it's better to do the former.
Thank you that looks perfect
Thanks for the report - I'm going to re-open this to track adding support for `jax.scipy.stats.binom`. | 2023-06-27T06:18:58 |
google/jax | 16,567 | google__jax-16567 | [
"16557"
]
| 14f32653a12867d2f3382eb40d35f0c71e83e5c8 | diff --git a/jax/_src/lax/slicing.py b/jax/_src/lax/slicing.py
--- a/jax/_src/lax/slicing.py
+++ b/jax/_src/lax/slicing.py
@@ -1329,10 +1329,12 @@ def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
# indices shape (7, 3, 4, 5). We transform that to indices of shape
# (7, 3, 4, 6) where we concatenated an iota that counts along our batch
# dimension to the front of the ndindex.
+ index_dtype = _promote_dtype_for_size(indices.dtype, indices.shape[0])
count_shape = list(indices.shape)
count_shape[-1] = 1
- counts = lax.broadcasted_iota(indices.dtype, tuple(count_shape), 0)
- indices = lax.concatenate([counts, indices], len(count_shape) - 1)
+ counts = lax.broadcasted_iota(index_dtype, tuple(count_shape), 0)
+ indices = lax.concatenate([counts, indices.astype(index_dtype)],
+ len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
@@ -1348,6 +1350,21 @@ def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
indices_are_sorted=indices_are_sorted, mode=mode,
fill_value=fill_value), 0
+def _promote_dtype_for_size(dtype, size):
+ if not dtypes.issubdtype(dtype, np.integer):
+ return dtype
+ # size may be a dynamic shape, in which case we return at least int32
+ try:
+ size = int(size)
+ except:
+ return dtype if np.iinfo(dtype).bits >= 32 else np.dtype('int32')
+ if size <= np.iinfo(dtype).max:
+ return dtype
+ elif size <= np.iinfo(np.int32).max:
+ return np.dtype('int32')
+ else:
+ return dtypes.canonicalize_dtype(np.int64)
+
def _gather_pad_rule(in_avals, out_avals, operand, indices, *,
dimension_numbers, slice_sizes, unique_indices,
indices_are_sorted, mode, fill_value):
| diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -3170,6 +3170,15 @@ def test_gather(self):
self.assertIsInstance(ys, FooArray)
self.assertEqual(ys.shape, (3, 2, 1))
+ def test_gather_batched_index_dtype(self):
+ # Regression test for https://github.com/google/jax/issues/16557
+ dtype = jnp.int8
+ size = jnp.iinfo(dtype).max + 10
+ indices = jnp.zeros(size, dtype=dtype)
+ values = jnp.zeros((size, 1))
+ results = jax.vmap(lambda x, i: jnp.take(x, i, axis=0))(values, indices)
+ self.assertArraysEqual(results, jnp.zeros(size))
+
@parameterized.parameters([
(0,),
(slice(1),),
| Mysterious integer overflow in jax.vmap(jax.take)
Consider the following:
```python
>>> import jax.numpy as jnp
>>> indices = jnp.zeros(256, dtype=jnp.int8)
>>> choices = jnp.zeros(1, dtype=jnp.float32)
>>> choices[indices]
Array([0, 0, 0, 0, ...], dtype=float32)
>>> jnp.take(choices, indices, axis=0)
Array([0, 0, 0, 0, ...], dtype=float32)
```
Working as intended. But if we instead vmap the call to `jnp.take`:
```python
>>> jax.vmap(lambda x, i: jnp.take(x, i, axis=0))(jnp.stack([choices] * 256), indices)
Array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan, nan, nan, nan, nan, nan, nan], dtype=float32)
```
Not working as intended! The `nan`s are being filled in as if there are out-of-bounds indices, even though all the values in the `int8` array are zero. Perhaps this code is causing some internal indexing calculations to be done in `int8`?
If you make the arrays longer, you can see the values oscillate between `0` and `nan`, which makes it clear it's some sort of overflow. See [this reproduction in Colab](https://colab.research.google.com/drive/1dRDM8z15x-Cu26i6TcLLFRPOu-8KT6H0?usp=sharing).
For what it's worth, the same issue seems to occur in `int16`, but only when you reach `2**31` in length. Overall strongly suggesting to me that something is being keyed off the index dtype, when it really shouldn't.
Also, it only seems to occur with `take(..., mode=None)` (the default) or `take(..., mode='fill')`, which both internally use `lax.GatherScatterMode.FILL_OR_DROP`. So perhaps the error can be found in the logic for filling. I don't know how to inspect the code for that, since it's in the `'gather'` primitive.
A workaround is to always upcast indices to (u)int32/int64, which is perhaps what is already happening with the regular indexing operation `x[i]`. I would also separately lobby for supporting mode 'promise_in_bounds' in `jnp.take`, and making that the default.
### What jax/jaxlib version are you using?
jax v0.4.11, jaxlib v0.4.11
### Which accelerator(s)?
Replicated on CPU, Nvidia GPU
| Thanks for the clear report!
I think the issue comes from this line: https://github.com/google/jax/blob/6bc74d2a9874e1fe93a45191bb829c07dfee04fa/jax/_src/lax/slicing.py#L1332-L1334
If the shape of the index array is not representable in the dtype of the index array, it will overflow. We'll need to cast this to a wider type for the sake of the computation. | 2023-06-27T08:47:02 |
google/jax | 16,600 | google__jax-16600 | [
"16579"
]
| 16f72cf903bd3646e371ada62a4c155c5442b2fc | diff --git a/jax/_src/lax/linalg.py b/jax/_src/lax/linalg.py
--- a/jax/_src/lax/linalg.py
+++ b/jax/_src/lax/linalg.py
@@ -2110,7 +2110,7 @@ def _schur_batching_rule(batched_args, batch_dims, *, compute_schur_vectors,
select_callable=select_callable), (0,) * (1 + compute_schur_vectors)
-def _schur_jvp_rule(primals, tangents, *, compute_schur_vectors, sort_eig_vals):
+def _schur_jvp_rule(primals, tangents, **kwds):
raise NotImplementedError(
'The differentiation rules for the Schur factorization have not been implemented.'
)
| Gradient of `sqrtm`
### Description
I require taking gradients with respect to the square root of a matrix. So, I have turned to `jax.scipy.linalg.sqrtm`. Based on previous discussions and issues on this repo, I understand that it is only implemented on CPU. I can accept this for now -- I can always do a callback when necessary and just eat the computational overhead.
But, I am getting an error when trying to calculate gradients w.r.t. this operation. For example, see the following minimal example:
```python
import jax.numpy as jnp
from jax.scipy.linalg import sqrtm
from jax import grad
arr = jnp.ones((2, 2))
sqrt_arr = sqrtm(arr) # This works
grad_sqrt_arr = grad(sqrtm)(arr) # This does not work
```
This yields the following error:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 935, in sqrtm
return _sqrtm(A)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 915, in _sqrtm
T, Z = schur(A, output='complex')
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 206, in schur
return _schur(a, output)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 199, in _schur
return lax_linalg.schur(a)
jax._src.source_info_util.JaxStackTraceBeforeTransformation: TypeError: _schur_jvp_rule() got an unexpected keyword argument 'select_callable'
The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/api.py", line 647, in grad_f
_, g = value_and_grad_f(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/api.py", line 723, in value_and_grad_f
ans, vjp_py = _vjp(f_partial, *dyn_args, reduce_axes=reduce_axes)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/api.py", line 2208, in _vjp
out_primal, out_vjp = ad.vjp(
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 139, in vjp
out_primals, pvals, jaxpr, consts = linearize(traceable, *primals)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 128, in linearize
jaxpr, out_pvals, consts = pe.trace_to_jaxpr_nounits(jvpfun_flat, in_pvals)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/profiler.py", line 314, in wrapper
return func(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/partial_eval.py", line 777, in trace_to_jaxpr_nounits
jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/linear_util.py", line 188, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 935, in sqrtm
return _sqrtm(A)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/pjit.py", line 250, in cache_miss
outs, out_flat, out_tree, args_flat, jaxpr = _python_pjit_helper(
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/pjit.py", line 163, in _python_pjit_helper
out_flat = pjit_p.bind(*args_flat, **params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 2677, in bind
return self.bind_with_trace(top_trace, args, params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 383, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 315, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/pjit.py", line 1465, in _pjit_jvp
jaxpr_jvp, is_nz_tangents_out = ad.jvp_jaxpr(
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 699, in jvp_jaxpr
return _jvp_jaxpr(jaxpr, tuple(nonzeros), tuple(instantiate))
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 709, in _jvp_jaxpr
jaxpr_out, avals_out, literals_out = pe.trace_to_jaxpr_dynamic(f_jvp, avals_in)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/profiler.py", line 314, in wrapper
return func(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/partial_eval.py", line 2155, in trace_to_jaxpr_dynamic
jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/partial_eval.py", line 2177, in trace_to_subjaxpr_dynamic
ans = fun.call_wrapped(*in_tracers_)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/linear_util.py", line 188, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 229, in jaxpr_as_fun
return eval_jaxpr(closed_jaxpr.jaxpr, closed_jaxpr.consts, *args)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 448, in eval_jaxpr
ans = eqn.primitive.bind(*subfuns, *map(read, eqn.invars), **bind_params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 2677, in bind
return self.bind_with_trace(top_trace, args, params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 383, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 315, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/pjit.py", line 1465, in _pjit_jvp
jaxpr_jvp, is_nz_tangents_out = ad.jvp_jaxpr(
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 699, in jvp_jaxpr
return _jvp_jaxpr(jaxpr, tuple(nonzeros), tuple(instantiate))
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 709, in _jvp_jaxpr
jaxpr_out, avals_out, literals_out = pe.trace_to_jaxpr_dynamic(f_jvp, avals_in)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/profiler.py", line 314, in wrapper
return func(*args, **kwargs)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/partial_eval.py", line 2155, in trace_to_jaxpr_dynamic
jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/partial_eval.py", line 2177, in trace_to_subjaxpr_dynamic
ans = fun.call_wrapped(*in_tracers_)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/linear_util.py", line 188, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 229, in jaxpr_as_fun
return eval_jaxpr(closed_jaxpr.jaxpr, closed_jaxpr.consts, *args)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 448, in eval_jaxpr
ans = eqn.primitive.bind(*subfuns, *map(read, eqn.invars), **bind_params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 380, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/core.py", line 383, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/interpreters/ad.py", line 315, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
jax._src.traceback_util.UnfilteredStackTrace: TypeError: _schur_jvp_rule() got an unexpected keyword argument 'select_callable'
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ryan/.local/lib/python3.9/site-packages/jax/_src/scipy/linalg.py", line 935, in sqrtm
return _sqrtm(A)
TypeError: _schur_jvp_rule() got an unexpected keyword argument 'select_callable'
```
### What jax/jaxlib version are you using?
jax v0.4.13, jaxlib v0.4.13
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.9.7, Ubuntu 18.04.6
### NVIDIA GPU info
_No response_
| Thanks for the report. There is indeed a missing argument in the schur JVP implementation, but unfortunately all it's preventing is a `NotImplementedError`: https://github.com/google/jax/blob/f463437c7ee81f915d2404d302b6bc5b32ecffbe/jax/_src/lax/linalg.py#L2113-L2116
I don't think this is trival to implement unfortunately; see some related discussion at https://github.com/google/jax/issues/669. | 2023-06-30T09:31:51 |
|
google/jax | 16,642 | google__jax-16642 | [
"16592"
]
| e2478685b98ddc21bfd1dd3f3159dce9a0389e6a | diff --git a/jax/_src/lax/slicing.py b/jax/_src/lax/slicing.py
--- a/jax/_src/lax/slicing.py
+++ b/jax/_src/lax/slicing.py
@@ -54,6 +54,50 @@ def slice(operand: ArrayLike, start_indices: Sequence[int],
"""Wraps XLA's `Slice
<https://www.tensorflow.org/xla/operation_semantics#slice>`_
operator.
+
+ Args:
+ operand: an array to slice
+ start_indices: a sequence of ``operand.ndim`` start indices.
+ limit_indices: a sequence of ``operand.ndim`` limit indices.
+ strides: an optional sequence of ``operand.ndim`` strides.
+
+ Returns:
+ The sliced array
+
+ Examples:
+ Here are some examples of simple two-dimensional slices:
+
+ >>> x = jnp.arange(12).reshape(3, 4)
+ >>> x
+ Array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]], dtype=int32)
+
+ >>> lax.slice(x, (1, 0), (3, 2))
+ Array([[4, 5],
+ [8, 9]], dtype=int32)
+
+ >>> lax.slice(x, (0, 0), (3, 4), (1, 2))
+ Array([[ 0, 2],
+ [ 4, 6],
+ [ 8, 10]], dtype=int32)
+
+ These two examples are equivalent to the following Python slicing syntax:
+
+ >>> x[1:3, 0:2]
+ Array([[4, 5],
+ [8, 9]], dtype=int32)
+
+ >>> x[0:3, 0:4:2]
+ Array([[ 0, 2],
+ [ 4, 6],
+ [ 8, 10]], dtype=int32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.slice_in_dim`
+ - :func:`jax.lax.index_in_dim`
+ - :func:`jax.lax.dynamic_slice`
"""
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
@@ -101,6 +145,12 @@ def dynamic_slice(
>>> dynamic_slice(x, (1, 1), (2, 4))
Array([[ 4, 5, 6, 7],
[ 8, 9, 10, 11]], dtype=int32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.slice`
+ - :func:`jax.lax.dynamic_slice_in_dim`
+ - :func:`jax.lax.dynamic_index_in_dim`
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
if jax.config.jax_dynamic_shapes:
@@ -151,6 +201,11 @@ def dynamic_update_slice(operand: Union[Array, np.ndarray], update: ArrayLike,
[0., 0., 1., 1.],
[0., 0., 1., 1.],
[0., 0., 0., 0.]], dtype=float32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :attr:`lax.dynamic_update_index_in_dim`
+ - :attr:`lax.dynamic_update_slice_in_dim`
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
@@ -646,7 +701,53 @@ def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
def slice_in_dim(operand: Union[Array, np.ndarray], start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0) -> Array:
- """Convenience wrapper around slice applying to only one dimension."""
+ """Convenience wrapper around :func:`lax.slice` applying to only one dimension.
+
+ This is effectively equivalent to ``operand[..., start_index:limit_index:stride]``
+ with the indexing applied on the specified axis.
+
+ Args:
+ operand: an array to slice.
+ start_index: an optional start index (defaults to zero)
+ limit_index: an optional end index (defaults to operand.shape[axis])
+ stride: an optional stride (defaults to 1)
+ axis: the axis along which to apply the slice (defaults to 0)
+
+ Returns:
+ An array containing the slice.
+
+ Examples:
+ Here is a one-dimensional example:
+
+ >>> x = jnp.arange(4)
+ >>> lax.slice_in_dim(x, 1, 3)
+ Array([1, 2], dtype=int32)
+
+ Here are some two-dimensional examples:
+
+ >>> x = jnp.arange(12).reshape(4, 3)
+ >>> x
+ Array([[ 0, 1, 2],
+ [ 3, 4, 5],
+ [ 6, 7, 8],
+ [ 9, 10, 11]], dtype=int32)
+
+ >>> lax.slice_in_dim(x, 1, 3)
+ Array([[3, 4, 5],
+ [6, 7, 8]], dtype=int32)
+
+ >>> lax.slice_in_dim(x, 1, 3, axis=1)
+ Array([[ 1, 2],
+ [ 4, 5],
+ [ 7, 8],
+ [10, 11]], dtype=int32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.slice`
+ - :func:`jax.lax.index_in_dim`
+ - :func:`jax.lax.dynamic_slice_in_dim`
+ """
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
@@ -674,7 +775,51 @@ def slice_in_dim(operand: Union[Array, np.ndarray], start_index: Optional[int],
def index_in_dim(operand: Union[Array, np.ndarray], index: int, axis: int = 0,
keepdims: bool = True) -> Array:
- """Convenience wrapper around slice to perform int indexing."""
+ """Convenience wrapper around :func:`lax.slice` to perform int indexing.
+
+ This is effectively equivalent to ``operand[..., start_index:limit_index:stride]``
+ with the indexing applied on the specified axis.
+
+ Args:
+ operand: an array to index.
+ index: integer index
+ axis: the axis along which to apply the index (defaults to 0)
+ keepdims: boolean specifying whether the output array should preserve the
+ rank of the input (default=True)
+
+ Returns:
+ The subarray at the specified index.
+
+ Examples:
+ Here is a one-dimensional example:
+
+ >>> x = jnp.arange(4)
+ >>> lax.index_in_dim(x, 2)
+ Array([2], dtype=int32)
+
+ >>> lax.index_in_dim(x, 2, keepdims=False)
+ Array(2, dtype=int32)
+
+ Here are some two-dimensional examples:
+
+ >>> x = jnp.arange(12).reshape(3, 4)
+ >>> x
+ Array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]], dtype=int32)
+
+ >>> lax.index_in_dim(x, 1)
+ Array([[4, 5, 6, 7]], dtype=int32)
+
+ >>> lax.index_in_dim(x, 1, axis=1, keepdims=False)
+ Array([1, 5, 9], dtype=int32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.slice`
+ - :func:`jax.lax.slice_in_dim`
+ - :func:`jax.lax.dynamic_index_in_dim`
+ """
index, axis = core._canonicalize_dimension(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
@@ -691,7 +836,52 @@ def index_in_dim(operand: Union[Array, np.ndarray], index: int, axis: int = 0,
def dynamic_slice_in_dim(operand: Union[Array, np.ndarray],
start_index: ArrayLike,
slice_size: int, axis: int = 0) -> Array:
- """Convenience wrapper around dynamic_slice applying to one dimension."""
+ """Convenience wrapper around :func:`lax.dynamic_slice` applied to one dimension.
+
+ This is roughly equivalent to the following Python indexing syntax applied
+ along the specified axis: ``operand[..., start_index:start_index + slice_size]``.
+
+ Args:
+ operand: an array to slice.
+ start_index: the (possibly dynamic) start index
+ slice_size: the static slice size
+ axis: the axis along which to apply the slice (defaults to 0)
+
+ Returns:
+ An array containing the slice.
+
+ Examples:
+ Here is a one-dimensional example:
+
+ >>> x = jnp.arange(5)
+ >>> dynamic_slice_in_dim(x, 1, 3)
+ Array([1, 2, 3], dtype=int32)
+
+ Like `jax.lax.dynamic_slice`, out-of-bound slices will be clipped to the
+ valid range:
+
+ >>> dynamic_slice_in_dim(x, 4, 3)
+ Array([2, 3, 4], dtype=int32)
+
+ Here is a two-dimensional example:
+
+ >>> x = jnp.arange(12).reshape(3, 4)
+ >>> x
+ Array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]], dtype=int32)
+
+ >>> dynamic_slice_in_dim(x, 1, 2, axis=1)
+ Array([[ 1, 2],
+ [ 5, 6],
+ [ 9, 10]], dtype=int32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.slice_in_dim`
+ - :func:`jax.lax.dynamic_slice`
+ - :func:`jax.lax.dynamic_index_in_dim`
+ """
start_indices: list[ArrayLike] = [lax._const(start_index, 0)] * operand.ndim
slice_sizes = list(operand.shape)
@@ -704,7 +894,48 @@ def dynamic_slice_in_dim(operand: Union[Array, np.ndarray],
def dynamic_index_in_dim(operand: Union[Array, np.ndarray],
index: Union[int, Array],
axis: int = 0, keepdims: bool = True) -> Array:
- """Convenience wrapper around dynamic_slice to perform int indexing."""
+ """Convenience wrapper around dynamic_slice to perform int indexing.
+
+ This is roughly equivalent to the following Python indexing syntax applied
+ along the specified axis: ``operand[..., index]``.
+
+ Args:
+ operand: an array to slice.
+ index: the (possibly dynamic) start index
+ axis: the axis along which to apply the slice (defaults to 0)
+ keepdims: boolean specifying whether the output should have the same rank as
+ the input (default = True)
+
+ Returns:
+ An array containing the slice.
+
+ Examples:
+ Here is a one-dimensional example:
+
+ >>> x = jnp.arange(5)
+ >>> dynamic_index_in_dim(x, 1)
+ Array([1], dtype=int32)
+
+ >>> dynamic_index_in_dim(x, 1, keepdims=False)
+ Array(1, dtype=int32)
+
+ Here is a two-dimensional example:
+
+ >>> x = jnp.arange(12).reshape(3, 4)
+ >>> x
+ Array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]], dtype=int32)
+
+ >>> dynamic_index_in_dim(x, 1, axis=1, keepdims=False)
+ Array([1, 5, 9], dtype=int32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.index_in_dim`
+ - :func:`jax.lax.dynamic_slice`
+ - :func:`jax.lax.dynamic_slice_in_dim`
+ """
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
@@ -715,8 +946,58 @@ def dynamic_index_in_dim(operand: Union[Array, np.ndarray],
def dynamic_update_slice_in_dim(operand: Union[Array, np.ndarray],
update: ArrayLike,
start_index: ArrayLike, axis: int) -> Array:
- """Convenience wrapper around :func:`dynamic_update_slice` to update a slice
- in a single ``axis``.
+ """Convenience wrapper around :func:`dynamic_update_slice` to update
+ a slice in a single ``axis``.
+
+ Args:
+ operand: an array to slice.
+ update: an array containing the new values to write onto `operand`.
+ start_index: a single scalar index
+ axis: the axis of the update.
+
+ Returns:
+ The updated array
+
+ Examples:
+
+ >>> x = jnp.zeros(6)
+ >>> y = jnp.ones(3)
+ >>> dynamic_update_slice_in_dim(x, y, 2, axis=0)
+ Array([0., 0., 1., 1., 1., 0.], dtype=float32)
+
+ If the update slice is too large to fit in the array, the start
+ index will be adjusted to make it fit:
+
+ >>> dynamic_update_slice_in_dim(x, y, 3, axis=0)
+ Array([0., 0., 0., 1., 1., 1.], dtype=float32)
+ >>> dynamic_update_slice_in_dim(x, y, 5, axis=0)
+ Array([0., 0., 0., 1., 1., 1.], dtype=float32)
+
+ Here is an example of a two-dimensional slice update:
+
+ >>> x = jnp.zeros((4, 4))
+ >>> y = jnp.ones((2, 4))
+ >>> dynamic_update_slice_in_dim(x, y, 1, axis=0)
+ Array([[0., 0., 0., 0.],
+ [1., 1., 1., 1.],
+ [1., 1., 1., 1.],
+ [0., 0., 0., 0.]], dtype=float32)
+
+ Note that the shape of the additional axes in ``update`` need not
+ match the associated dimensions of the ``operand``:
+
+ >>> y = jnp.ones((2, 3))
+ >>> dynamic_update_slice_in_dim(x, y, 1, axis=0)
+ Array([[0., 0., 0., 0.],
+ [1., 1., 1., 0.],
+ [1., 1., 1., 0.],
+ [0., 0., 0., 0.]], dtype=float32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.dynamic_update_slice`
+ - :func:`jax.lax.dynamic_update_index_in_dim`
+ - :func:`jax.lax.dynamic_slice_in_dim`
"""
axis = int(axis)
start_indices: list[ArrayLike] = [lax._const(start_index, 0)] * lax._ndim(operand)
@@ -728,7 +1009,59 @@ def dynamic_update_index_in_dim(operand: Union[Array, np.ndarray],
update: ArrayLike, index: ArrayLike,
axis: int) -> Array:
"""Convenience wrapper around :func:`dynamic_update_slice` to update a slice
- of size 1 in a single ``axis``.
+ of size 1 in a single ``axis``.
+
+ Args:
+ operand: an array to slice.
+ update: an array containing the new values to write onto `operand`.
+ index: a single scalar index
+ axis: the axis of the update.
+
+ Returns:
+ The updated array
+
+ Examples:
+
+ >>> x = jnp.zeros(6)
+ >>> y = 1.0
+ >>> dynamic_update_index_in_dim(x, y, 2, axis=0)
+ Array([0., 0., 1., 0., 0., 0.], dtype=float32)
+
+ >>> y = jnp.array([1.0])
+ >>> dynamic_update_index_in_dim(x, y, 2, axis=0)
+ Array([0., 0., 1., 0., 0., 0.], dtype=float32)
+
+ If the specified index is out of bounds, the index will be clipped to the
+ valid range:
+
+ >>> dynamic_update_index_in_dim(x, y, 10, axis=0)
+ Array([0., 0., 0., 0., 0., 1.], dtype=float32)
+
+ Here is an example of a two-dimensional dynamic index update:
+
+ >>> x = jnp.zeros((4, 4))
+ >>> y = jnp.ones(4)
+ >>> dynamic_update_index_in_dim(x, y, 1, axis=0)
+ Array([[0., 0., 0., 0.],
+ [1., 1., 1., 1.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.]], dtype=float32)
+
+ Note that the shape of the additional axes in ``update`` need not
+ match the associated dimensions of the ``operand``:
+
+ >>> y = jnp.ones((1, 3))
+ >>> dynamic_update_index_in_dim(x, y, 1, 0)
+ Array([[0., 0., 0., 0.],
+ [1., 1., 1., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.]], dtype=float32)
+
+ See Also:
+ - :attr:`jax.numpy.ndarray.at`
+ - :func:`jax.lax.dynamic_update_slice`
+ - :func:`jax.lax.dynamic_update_index_in_dim`
+ - :func:`jax.lax.dynamic_index_in_dim`
"""
axis = int(axis)
if lax._ndim(update) != lax._ndim(operand):
| Stricter validation of inputs to `dynamic_update_slice_in_dim`
I was extremely surprised to find that `start_indices` is implicitly set to 0 for dimensions other than `axis`, rather than requiring that all other dimensions be equal or broadcastable to each other:
```python
>>> z = jnp.zeros((3, 5))
>>> u = jnp.ones((2, 2))
>>> jax.lax.dynamic_update_slice_in_dim(z, u, 1, axis=0)
Array([[0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.]], dtype=float32)
```
I think it's non-obvious that this is how the `dynamic_update_*_in_dim` methods would work, so noting it in the documentation would be nice.
This led to me really struggling to track down a bug yesterday, which turned out to be in the following code:
```python
updated = jax.lax.dynamic_update_slice_in_dim(operand, update, start_index, axis=2)
```
Unfortunately, I called this code both with arrays of shape `(batch, head, time, hidden)` _and_ `(batch2, batch, head, time, hidden)`. When I was looking for the bug, I thought this couldn't be the problem since I was assuming it would error if it received "incompatible" shapes. Later when I did find it, I assumed it was just broadcasting since the `time` dimension was size 1. (Thanks for the tip on Twitter @mattjj!)
If this _is_ an unintended use of `dynamic_update_slice_in_dim`, it might be nice to do a dimension check, but maybe people are relying on this behavior.
| Hi - thanks for the report, and sorry for the unclear semantics here. Can you say more about what your expected result would be? Were you expecting an error because `u.shape[1] != z.shape[1]`?
Yeah I think I was expecting the semantics to be something like numpy slice updates, so for axis=2:
```python
operand[:, :, start_index:start_index + update.shape[2]] = update
```
I think another way this might trip someone up is if they were expecting broadcasting of size-1 dimensions, but instead got the 0-th index updated in those dimensions.
Makes sense, thanks. I believe this was the intended use-case for this function, I might add some shape validation and see if it breaks any code. | 2023-07-06T17:36:20 |
|
google/jax | 16,644 | google__jax-16644 | [
"4013"
]
| a1a01dd86ecfa801127c8adf7cbce70dc9ff625d | diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -64,6 +64,7 @@
Device = xc.Device
Shard = Any # TODO(jakevdp): fix circular imports and import Shard
+Shape = tuple[int, ...]
UINT_DTYPES = {
8: jnp.uint8, 16: jnp.uint16, 32: jnp.uint32, 64: jnp.uint64} # type: ignore[has-type]
@@ -80,14 +81,14 @@ class PRNGImpl(NamedTuple):
seed :: int[] -> K
fold_in :: K -> int[] -> K
- split[n] :: K -> K[n]
- random_bits[shape, bit_width] :: K -> uint<bit_width>[shape]
+ split[shape] :: K -> K[*shape]
+ random_bits[shape, bit_width] :: K -> uint<bit_width>[*shape]
A PRNG implementation is adapted to an array-like object of keys
``K`` by the ``PRNGKeyArray`` class, which should be created via the
``seed_with_impl`` function.
"""
- key_shape: core.Shape
+ key_shape: Shape
seed: Callable
split: Callable
random_bits: Callable
@@ -717,31 +718,31 @@ def random_seed_lowering(ctx, seeds, *, impl):
mlir.register_lowering(random_seed_p, random_seed_lowering)
-def random_split(keys, count):
- return random_split_p.bind(keys, count=count)
+def random_split(keys, shape: Shape):
+ return random_split_p.bind(keys, shape=shape)
random_split_p = core.Primitive('random_split')
ad.defjvp_zero(random_split_p)
batching.defvectorized(random_split_p)
@random_split_p.def_abstract_eval
-def random_split_abstract_eval(keys_aval, *, count):
- return keys_shaped_array(keys_aval.dtype.impl, (*keys_aval.shape, count))
+def random_split_abstract_eval(keys_aval, *, shape):
+ return keys_shaped_array(keys_aval.dtype.impl, (*keys_aval.shape, *shape))
@random_split_p.def_impl
-def random_split_impl(keys, *, count):
+def random_split_impl(keys, *, shape):
base_arr = random_split_impl_base(
- keys.impl, keys.unsafe_raw_array(), keys.ndim, count=count)
+ keys.impl, keys.unsafe_raw_array(), keys.ndim, shape=shape)
return PRNGKeyArrayImpl(keys.impl, base_arr)
-def random_split_impl_base(impl, base_arr, keys_ndim, *, count):
- split = iterated_vmap_unary(keys_ndim, lambda k: impl.split(k, count))
+def random_split_impl_base(impl, base_arr, keys_ndim, *, shape):
+ split = iterated_vmap_unary(keys_ndim, lambda k: impl.split(k, shape))
return split(base_arr)
-def random_split_lowering(ctx, keys, *, count):
+def random_split_lowering(ctx, keys, *, shape):
aval, = ctx.avals_in
impl = aval.dtype.impl
- split = iterated_vmap_unary(aval.ndim, lambda k: impl.split(k, count))
+ split = iterated_vmap_unary(aval.ndim, lambda k: impl.split(k, shape))
split_lowering = mlir.lower_fun(split, multiple_results=False)
return mlir.delegate_lowering(
ctx, split_lowering, keys,
@@ -1249,28 +1250,29 @@ def threefry_2x32(keypair, count):
return lax.reshape(out[:-1] if odd_size else out, count.shape)
-def threefry_split(key: typing.Array, num: core.DimSize) -> typing.Array:
- num = core.concrete_dim_or_error(num)
- return _threefry_split(key, num)
+def threefry_split(key: typing.Array, shape: Shape) -> typing.Array:
+ shape = tuple(unsafe_map(core.concrete_dim_or_error, shape))
+ return _threefry_split(key, shape)
@partial(jit, static_argnums=(1,))
-def _threefry_split(key, num) -> typing.Array:
+def _threefry_split(key, shape) -> typing.Array:
if config.jax_threefry_partitionable:
- return _threefry_split_foldlike(key, num) # type: ignore
+ return _threefry_split_foldlike(key, shape) # type: ignore
else:
- return _threefry_split_original(key, num) # type: ignore
+ return _threefry_split_original(key, shape) # type: ignore
@partial(jit, static_argnums=(1,), inline=True)
-def _threefry_split_original(key, num) -> typing.Array:
+def _threefry_split_original(key, shape) -> typing.Array:
+ num = math.prod(shape)
counts = lax.iota(np.uint32, num * 2)
- return lax.reshape(threefry_2x32(key, counts), (num, 2))
+ return lax.reshape(threefry_2x32(key, counts), (*shape, 2))
@partial(jit, static_argnums=(1,), inline=True)
-def _threefry_split_foldlike(key, num) -> typing.Array:
+def _threefry_split_foldlike(key, shape) -> typing.Array:
k1, k2 = key
- counts1, counts2 = iota_2x32_shape((num,))
+ counts1, counts2 = iota_2x32_shape(shape)
bits1, bits2 = threefry2x32_p.bind(k1, k2, counts1, counts2)
- return jnp.stack([bits1, bits2], axis=1)
+ return jnp.stack([bits1, bits2], axis=bits1.ndim)
def threefry_fold_in(key: typing.Array, data: typing.Array) -> typing.Array:
@@ -1329,7 +1331,7 @@ def _threefry_random_bits_original(key: typing.Array, bit_width, shape):
if not nblocks:
bits = threefry_2x32(key, lax.iota(np.uint32, rem))
else:
- keys = threefry_split(key, nblocks + 1)
+ keys = threefry_split(key, (nblocks + 1,))
subkeys, last_key = keys[:-1], keys[-1]
blocks = vmap(threefry_2x32, in_axes=(0, None))(subkeys, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))
last = threefry_2x32(last_key, lax.iota(np.uint32, rem))
@@ -1378,13 +1380,15 @@ def _rbg_seed(seed: typing.Array) -> typing.Array:
halfkey = threefry_seed(seed)
return jnp.concatenate([halfkey, halfkey])
-def _rbg_split(key: typing.Array, num: int) -> typing.Array:
+def _rbg_split(key: typing.Array, shape: Shape) -> typing.Array:
if config.jax_threefry_partitionable:
_threefry_split = _threefry_split_foldlike
else:
_threefry_split = _threefry_split_original
+ halfkeys = key.reshape(2, 2)
return vmap(
- _threefry_split, (0, None), 1)(key.reshape(2, 2), num).reshape(num, 4)
+ _threefry_split, (0, None), len(shape))(halfkeys, shape).reshape(
+ *shape, 4)
def _rbg_fold_in(key: typing.Array, data: typing.Array) -> typing.Array:
assert not data.shape
@@ -1407,10 +1411,12 @@ def _rbg_random_bits(key: typing.Array, bit_width: int, shape: Sequence[int]
fold_in=_rbg_fold_in,
tag='rbg')
-def _unsafe_rbg_split(key: typing.Array, num: int) -> typing.Array:
+def _unsafe_rbg_split(key: typing.Array, shape: Shape) -> typing.Array:
# treat 10 iterations of random bits as a 'hash function'
+ num = math.prod(shape)
_, keys = lax.rng_bit_generator(key, (10 * num, 4), dtype='uint32')
- return lax.slice_in_dim(keys, start_index=None, limit_index=None, stride=10)
+ return lax.slice_in_dim(
+ keys, start_index=None, limit_index=None, stride=10).reshape(*shape, 4)
def _unsafe_rbg_fold_in(key: typing.Array, data: typing.Array) -> typing.Array:
assert not data.shape
diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -232,7 +232,7 @@ def fold_in(key: KeyArray, data: IntegerArray) -> KeyArray:
key, wrapped = _check_prng_key(key)
return _return_prng_keys(wrapped, _fold_in(key, data))
-def _split(key: KeyArray, num: int = 2) -> KeyArray:
+def _split(key: KeyArray, num: Union[int, tuple[int, ...]] = 2) -> KeyArray:
# Alternative to split() to use within random samplers.
# TODO(frostig): remove and use split(); we no longer need to wait
# to always enable_custom_prng
@@ -240,15 +240,16 @@ def _split(key: KeyArray, num: int = 2) -> KeyArray:
if key.ndim:
raise TypeError("split accepts a single key, but was given a key array of"
f"shape {key.shape} != (). Use jax.vmap for batching.")
- return prng.random_split(key, count=num)
+ shape = tuple(num) if isinstance(num, Sequence) else (num,)
+ return prng.random_split(key, shape=shape)
-def split(key: KeyArray, num: int = 2) -> KeyArray:
+def split(key: KeyArray, num: Union[int, tuple[int, ...]] = 2) -> KeyArray:
"""Splits a PRNG key into `num` new keys by adding a leading axis.
Args:
key: a PRNG key (from ``PRNGKey``, ``split``, ``fold_in``).
- num: optional, a positive integer indicating the number of keys to produce
- (default 2).
+ num: optional, a positive integer (or tuple of integers) indicating
+ the number (or shape) of keys to produce. Defaults to 2.
Returns:
An array-like object of `num` new PRNG keys.
diff --git a/jax/experimental/jax2tf/jax2tf.py b/jax/experimental/jax2tf/jax2tf.py
--- a/jax/experimental/jax2tf/jax2tf.py
+++ b/jax/experimental/jax2tf/jax2tf.py
@@ -2575,18 +2575,18 @@ def impl_wrapper(seeds: TfVal, *, impl):
tf_impl_with_avals[prng.random_seed_p] = _random_seed_impl
-def _random_split_impl(keys: TfVal, *, count, _in_avals, _out_aval):
+def _random_split_impl(keys: TfVal, *, shape, _in_avals, _out_aval):
keys_aval, = _in_avals
- def impl_wrapper(keys: TfVal, *, count):
+ def impl_wrapper(keys: TfVal, *, shape):
return prng.random_split_impl_base(
- keys_aval.dtype.impl, keys, keys_aval.ndim, count=count)
+ keys_aval.dtype.impl, keys, keys_aval.ndim, shape=shape)
converted_impl = _convert_jax_impl(
impl_wrapper, multiple_results=False, with_physical_avals=True,
extra_name_stack="random_split")
return converted_impl(
- keys, count=count, _in_avals=_in_avals, _out_aval=_out_aval)
+ keys, shape=shape, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[prng.random_split_p] = _random_split_impl
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -612,6 +612,23 @@ def _CheckChiSquared(self, samples, pmf):
def make_key(self, seed):
return random.threefry2x32_key(seed)
+ @jtu.sample_product(
+ num=(None, 6, (6,), (2, 3), (2, 3, 4)),
+ )
+ def test_split_size_shape(self, num):
+ key = self.make_key(0)
+ if num is None:
+ key_split = jax.random.split(key)
+ else:
+ key_split = jax.random.split(key, num)
+
+ if num is None:
+ self.assertEqual(key_split.shape, (2, *key.shape))
+ elif type(num) is tuple:
+ self.assertEqual(key_split.shape, (*num, *key.shape))
+ else:
+ self.assertEqual(key_split.shape, (num, *key.shape))
+
@jtu.sample_product(dtype=jtu.dtypes.floating)
def testNumpyAndXLAAgreeOnFloatEndianness(self, dtype):
bits_dtype = np.uint32 if jnp.finfo(dtype).bits == 32 else np.uint64
@@ -2039,12 +2056,9 @@ def _double_threefry_seed(seed):
return jnp.vstack([threefry_seed(s1),
threefry_seed(s2)])
-def _double_threefry_split(key, num):
- split0 = threefry_split(key[0], num)
- split1 = threefry_split(key[1], num)
- merge = jnp.vstack([jnp.expand_dims(split0, axis=0),
- jnp.expand_dims(split1, axis=0)])
- return merge.transpose((1, 0, 2))
+def _double_threefry_split(key, shape):
+ return vmap(
+ threefry_split, (0, None), len(shape))(key, shape)
def _double_threefry_random_bits(key, bit_width, shape):
bits0 = threefry_random_bits(key[0], bit_width, shape)
| random.split() should allow for passing in a desired shape
The current API is `random.split(key, num)`, which returns an array of shape `(num, 2)`.
I'd like to have an API of the form `random.split(key, shape)`, which returns an array of shape `shape + (2,)`, without requiring an additional `reshape`. This would make `random.split` easier to use for preparing multi-dimensional key, e.g., for use with repeated `vmap`.
Some options:
1. Write a function with a new name for this API, e.g., `random.split_with_shape` (this is a bad name)
2. Overload the `num` argument to allow for _either_ an integer size or a tuple of integers `shape`. This is a little tricky to get right, but in principle could be done (NumPy has lot of APIs like this).
3. Make `num` optional and add a new optional `shape` parameter that could be used instead.
4. Deprecate `num` and replace it with `shape`. This would involve (2) and an extra temporary keyword-only argument. Eventually, we could require either (a) the new argument name `shape` or (b) the new argument name `shape` _and_ passing a tuple of integers instead of an integer.
I would lean towards option (3). It's a little messy to add an extra argument but it is cleaner than overloading `random.split` to support either sizes or shapes. The later would be consistent with how `numpy.random` works with the `size` argument, but not how the rest of `jax.random` works.
| I like option 3 as well, though 2 also appeals to me.
Offhand, do you know why JAX tends not to allow integers in places where shapes are expected? That's something that I've found pretty jarring moving to jax from numpy. Was this a deliberate decision?
> Offhand, do you know why JAX tends not to allow integers in places where shapes are expected? That's something that I've found pretty jarring moving to jax from numpy. Was this a deliberate decision?
I don't know who wrote the first version of `random.py`. I think we'd have to ask @mattjj on this one.
Assigning to @mattjj for design opinions about polymorphic `num`/`shape` vs. adding a new shape parameter
I wrote the first version! I'll take a look at this when I can.
Hello, would it be possible to add something like this:
```py
def split_shape(key, shape):
if isinstance(shape, (tuple, list)):
return tuple(split_shape(k, s) for (k, s) in zip(jax.random.split(key, len(shape)), shape))
elif shape == 0:
return key
else:
return jax.random.split(key, shape)
```
For example:
```py
split_shape(k, 4) = split(k, 4)
split_shape(k, [4,4]) = (split(k1, 4), split(k2, 4))
```
I would implement it this way:
```python
def split_shape(key, shape):
size = math.prod(shape) if isinstance(shape, (tuple, list)) else shape
return jax.random.split(key, size).reshape(*shape, *key.shape)
```
Whether this logic should be contained in `random.split()` itself, I'm not sure.
The thing I was looking for, maybe it's not common, was to have recursively defined shape:
```py
a, bs = split_shape(k, (0, [2]*5))
model += linear(a)
for b0, b1 in bs:
model += linear(b0)
model += linear(b1)
```
I opened a PR with this proposed change in #16398 | 2023-07-06T21:10:01 |
google/jax | 16,645 | google__jax-16645 | [
"16594"
]
| f08e52faef3bb8a1f4ab733e3cf1efb2a003c4f1 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -1275,6 +1275,11 @@ def _get_axis_size(name: str, shape: tuple[core.AxisSize, ...], axis: int
msg = f"{name} must have at least one non-None value in in_axes"
raise ValueError(msg)
+ def _get_argument_type(x):
+ try:
+ return shaped_abstractify(x).str_short()
+ except TypeError: #Catch all for user specified objects that can't be interpreted as a data type
+ return "unknown"
msg = [f"{name} got inconsistent sizes for array axes to be mapped:\n"]
args, kwargs = tree_unflatten(tree, vals)
try:
@@ -1283,15 +1288,15 @@ def _get_axis_size(name: str, shape: tuple[core.AxisSize, ...], axis: int
ba = None
if ba is None:
args_paths = [f'args{keystr(p)} '
- f'of type {shaped_abstractify(x).str_short()}'
+ f'of type {_get_argument_type(x)}'
for p, x in generate_key_paths(args)]
kwargs_paths = [f'kwargs{keystr(p)} '
- f'of type {shaped_abstractify(x).str_short()}'
+ f'of type {_get_argument_type(x)}'
for p, x in generate_key_paths(kwargs)]
key_paths = [*args_paths, *kwargs_paths]
else:
key_paths = [f'argument {name}{keystr(p)} '
- f'of type {shaped_abstractify(x).str_short()}'
+ f'of type {_get_argument_type(x)}'
for name, arg in ba.arguments.items()
for p, x in generate_key_paths(arg)]
all_sizes = [_get_axis_size(name, np.shape(x), d) if d is not None else None
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -1699,6 +1699,15 @@ def test_device_put_sharding_mismatched_tree_different_leaf_count(self):
):
jax.device_put((x, y, z), device=(s1, s2))
+ def test_vmap_inconsistent_sizes_constructs_proper_error_message(self):
+ def f(x1, x2, g):
+ return g(x1, x2)
+
+ with self.assertRaisesRegex(
+ ValueError,
+ "vmap got inconsistent sizes for array axes to be mapped:"
+ ):
+ jax.vmap(f, (0, 0, None))(jnp.ones(2), jnp.ones(3), jnp.add)
def test_device_get_scalar(self):
x = np.arange(12.).reshape((3, 4)).astype("float32")
| vmap raises ambiguous exception when inconsistent sizes for array axes to be mapped occurs with user defined objects as static parameters
### Description
When trying to use vmap with functions, that accept other functions, an ambiguous error is raised if there is are inconsistent sizes for array axes to be mapped. A minimal example is shown below:
```
import jax.numpy as jnp
import jax
def functionThatTakesAFunction(array1,array2,function):
"""
Takes in two arrays, applies function 1 to one of them and returns
"""
array3 = function(array1)
return jnp.vstack((array3,array2)),jnp.hstack((array3,array2))
def multiplyBy2(array):
return 2 * array
batchMultiplyAndStack = jax.vmap(functionThatTakesAFunction,in_axes=[0,0,None])
test = jnp.array([[1,2],
[1,2]])
test2 = jnp.array([[1,2],
[1,2],
[1,2]])
testStacked = jnp.array((test,test,test))
test2Stacked = jnp.array((test2,test2,test2))
print(batchMultiplyAndStack(test,test2,multiplyBy2))
```
Running the above code produces the following stack trace:
```
Traceback (most recent call last):
File "/Users/user/Desktop/jax/jax/_src/api_util.py", line 562, in shaped_abstractify
return _shaped_abstractify_handlers[type(x)](x)
KeyError: <class 'function'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/user/Desktop/test.py", line 41, in <module>
print(batchMultiplyAndStack(test,test2,multiplyBy2))
File "/Users/user/Desktop/jax/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/Users/user/Desktop/jax/jax/_src/api.py", line 1239, in vmap_f
_mapped_axis_size(fun, in_tree, args_flat, in_axes_flat, "vmap"))
File "/Users/user/Desktop/jax/jax/_src/api.py", line 1298, in _mapped_axis_size
key_paths = [f'argument {name}{keystr(p)} '
File "/Users/user/Desktop/jax/jax/_src/api.py", line 1299, in <listcomp>
f'of type {_get_argument_type(x)}'
File "/Users/user/Desktop/jax/jax/_src/api.py", line 1280, in _get_argument_type
return shaped_abstractify(x).str_short()
File "/Users/user/Desktop/jax/jax/_src/api_util.py", line 564, in shaped_abstractify
return _shaped_abstractify_slow(x)
File "/Users/user/Desktop/jax/jax/_src/api_util.py", line 553, in _shaped_abstractify_slow
raise TypeError(
jax._src.traceback_util.UnfilteredStackTrace: TypeError: Cannot interpret value of type <class 'function'> as an abstract array; it does not have a dtype attribute
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/user/Desktop/test.py", line 41, in <module>
print(batchMultiplyAndStack(test,test2,multiplyBy2))
TypeError: Cannot interpret value of type <class 'function'> as an abstract array; it does not have a dtype attribute
```
I've tracked down the issue to how `jax._src.api._mapped_axis_size` creates the exception to inform users about inconsistent sizes for array axes to be mapped. When trying to get the data type of a function object, a TypeError is raised and not caught, resulting in an ambiguous exception as it is unclear why the exception is raised (since vmap normally works just fine with functions passed as static arguments).
I've made a pull request that fixes the issue (#16593), but I'm not sure if my fix (making a wrapper around a problematic call to `shaped_abstractify(x).str_short()` is compliant with jax's coding standards. With this fix we get the expected stack trace for this error:
```
Traceback (most recent call last):
File "/Users/user/Desktop/test.py", line 41, in <module>
print(batchMultiplyAndStack(test,test2,multiplyBy2))
File "/Users/user/Desktop/jax/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/Users/user/Desktop/jax/jax/_src/api.py", line 1239, in vmap_f
_mapped_axis_size(fun, in_tree, args_flat, in_axes_flat, "vmap"))
File "/Users/user/Desktop/jax/jax/_src/api.py", line 1322, in _mapped_axis_size
raise ValueError(''.join(msg)[:-2]) # remove last semicolon and newline
jax._src.traceback_util.UnfilteredStackTrace: ValueError: vmap got inconsistent sizes for array axes to be mapped:
* one axis had size 2: axis 0 of argument array1 of type int32[2,2];
* one axis had size 3: axis 0 of argument array2 of type int32[3,2]
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/user/Desktop/test.py", line 41, in <module>
print(batchMultiplyAndStack(test,test2,multiplyBy2))
ValueError: vmap got inconsistent sizes for array axes to be mapped:
* one axis had size 2: axis 0 of argument array1 of type int32[2,2];
* one axis had size 3: axis 0 of argument array2 of type int32[3,2]
```
### What jax/jaxlib version are you using?
jax v0.4.14 jaxlib v0.4.13
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.10.10, MacOS
### NVIDIA GPU info
_No response_
| Thanks for the report! Here's a shorter repro:
```python
import jax
import jax.numpy as jnp
def f(x1, x2, g):
return g(x1, x2)
jax.vmap(f, (0, 0, None))(jnp.ones(2), jnp.ones(3), jnp.add)
```
It seems the main issue is that when you pass invalid arguments to `vmap`, it can fail in constructing the error message if there are non-array inputs.
That's a much more succinct way to put it! That's exactly the issue.
The root cause seems to be an uncaught exception when trying to determine the data type of the non-array inputs. The pull request I made tries to fix it. I just cleaned it up, but in squashing commits to one commit, there seems to be a [email protected] author (<no****ly@google.com>) who's being blocked by the CLA check. Let me know if I need to do anything to fix that or if you'd rather fix the issue some other way. | 2023-07-07T01:54:15 |
google/jax | 16,656 | google__jax-16656 | [
"16630"
]
| 88a60b808c1f91260cc9e75b9aa2508aae5bc9f9 | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -175,7 +175,6 @@ def entr(x: ArrayLike) -> Array:
lax.full_like(x, -np.inf),
lax.neg(_xlogx(x)))
-
@_wraps(osp_special.multigammaln, update_doc=False)
def multigammaln(a: ArrayLike, d: ArrayLike) -> Array:
d = core.concrete_or_error(int, d, "d argument of multigammaln")
@@ -191,6 +190,50 @@ def multigammaln(a: ArrayLike, d: ArrayLike) -> Array:
return res + constant
+@_wraps(osp_special.kl_div, module="scipy.special")
+def kl_div(
+ p: ArrayLike,
+ q: ArrayLike,
+) -> Array:
+ p, q = promote_args_inexact("kl_div", p, q)
+ zero = _lax_const(p, 0.0)
+ both_gt_zero_mask = lax.bitwise_and(lax.gt(p, zero), lax.gt(q, zero))
+ one_zero_mask = lax.bitwise_and(lax.eq(p, zero), lax.ge(q, zero))
+
+ safe_p = jnp.where(both_gt_zero_mask, p, 1)
+ safe_q = jnp.where(both_gt_zero_mask, q, 1)
+
+ log_val = lax.sub(
+ lax.add(
+ lax.sub(_xlogx(safe_p), xlogy(safe_p, safe_q)),
+ safe_q,
+ ),
+ safe_p,
+ )
+ result = jnp.where(
+ both_gt_zero_mask, log_val, jnp.where(one_zero_mask, q, np.inf)
+ )
+ return result
+
+
+@_wraps(osp_special.rel_entr, module="scipy.special")
+def rel_entr(
+ p: ArrayLike,
+ q: ArrayLike,
+) -> Array:
+ p, q = promote_args_inexact("rel_entr", p, q)
+ zero = _lax_const(p, 0.0)
+ both_gt_zero_mask = lax.bitwise_and(lax.gt(p, zero), lax.gt(q, zero))
+ one_zero_mask = lax.bitwise_and(lax.eq(p, zero), lax.ge(q, zero))
+
+ safe_p = jnp.where(both_gt_zero_mask, p, 1)
+ safe_q = jnp.where(both_gt_zero_mask, q, 1)
+ log_val = lax.sub(_xlogx(safe_p), xlogy(safe_p, safe_q))
+ result = jnp.where(
+ both_gt_zero_mask, log_val, jnp.where(one_zero_mask, q, jnp.inf)
+ )
+ return result
+
# coefs of (2k)! / B_{2k} where B are bernoulli numbers
# those numbers are obtained using https://www.wolframalpha.com
_BERNOULLI_COEFS = [
diff --git a/jax/scipy/special.py b/jax/scipy/special.py
--- a/jax/scipy/special.py
+++ b/jax/scipy/special.py
@@ -50,4 +50,6 @@
xlogy as xlogy,
xlog1py as xlog1py,
zeta as zeta,
+ kl_div as kl_div,
+ rel_entr as rel_entr,
)
| diff --git a/tests/lax_scipy_special_functions_test.py b/tests/lax_scipy_special_functions_test.py
--- a/tests/lax_scipy_special_functions_test.py
+++ b/tests/lax_scipy_special_functions_test.py
@@ -137,6 +137,11 @@ def op_record(name, nargs, dtypes, rng_factory, test_grad, nondiff_argnums=(), t
op_record("exp1", 1, [np.float32], jtu.rand_positive, True),
op_record(
"expn", 2, (int_dtypes, [np.float32]), jtu.rand_positive, True, (0,)),
+ op_record("kl_div", 2, float_dtypes, jtu.rand_positive, True),
+ op_record(
+ "rel_entr", 2, float_dtypes, jtu.rand_positive, True,
+ ),
+
]
| Adding `kl_div` and `rel_entropy` to `jax.scipy.special`
Hi, right now `jax.scipy.special` does not have a `kl_div` function, and computing this function naively in its closed form does not have the same numerical stability.
I checked the [implementation](https://github.com/scipy/scipy/blob/899f4efbcd8020b538bd97eb54353b3249af6e43/scipy/special/_convex_analysis.pxd#L13) from `scipy.special.kl_div`, equivalent to:
```math
\text{kldiv}(p,q)= \begin{cases}
p\textrm{log}(p/q) -p +q , & \text{if } p \gt 0, q \gt 0\\
q, & p = 0, q \ge 0 \\
\infty, & \text{otherwise}
\end{cases}
```
A possible LAX implementation:
```python
def kl_div(
p: ArrayLike,
q: ArrayLike,
) -> Array:
both_gt_zero_mask = lax.bitwise_and(lax.gt(p, 0.0), lax.gt(q, 0.0))
one_zero_mask = lax.bitwise_and(lax.eq(p, 0.0), lax.ge(q, 0.0))
one_filler = lax.full_like(p, 1.0)
inf_filler = lax.full_like(p, np.inf)
safe_p = lax.select(both_gt_zero_mask, p, one_filler)
safe_q = lax.select(both_gt_zero_mask, q, one_filler)
log_val = lax.add(
lax.sub(lax.mul(safe_p, lax.log(lax.div(safe_p, safe_q))), safe_p), safe_q
)
result = lax.select(
both_gt_zero_mask, log_val, lax.select(one_zero_mask, q, inf_filler)
)
return result
```
Notice that scipy upgrades floats to double precision, so this JAX implementation only gives a result that matches scipy's up to an absolute tolerance of `1e-6`.
I was thinking of making a pull request adding this function, and perhaps `rel_entropy` function, (similarly implemented). What do you think?
@j-towns
| Looks useful, thanks! If you make the PR, feel free to tag me as a reviewer. | 2023-07-08T11:44:18 |
google/jax | 16,672 | google__jax-16672 | [
"16655"
]
| f4eed78e9079aece4feffe6ab31e7c76b6826e7d | diff --git a/jax/_src/lax/slicing.py b/jax/_src/lax/slicing.py
--- a/jax/_src/lax/slicing.py
+++ b/jax/_src/lax/slicing.py
@@ -1383,11 +1383,12 @@ def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
- return _scatter_batching_rule(
- scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
- update_jaxpr=None, update_consts=None, dimension_numbers=dnums,
- indices_are_sorted=True, unique_indices=True,
- mode=GatherScatterMode.CLIP)
+ return jax.vmap(
+ partial(scatter, dimension_numbers=dnums,
+ indices_are_sorted=True, unique_indices=True,
+ mode=GatherScatterMode.CLIP),
+ in_axes=(operand_bd, index_bdim, update_bd),
+ out_axes=0)(operand, index, update), 0
dynamic_update_slice_p = standard_primitive(
@@ -2067,7 +2068,6 @@ def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
indices_are_sorted, unique_indices, mode):
operand, indices, updates = batched_args
operand_bdim, indices_bdim, updates_bdim = batch_dims
- del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
@@ -2086,10 +2086,10 @@ def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
- return scatter_op(
- operand, indices, updates, dnums,
+ return scatter_op.bind(
+ operand, indices, updates, dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices,
- mode=mode), 0
+ mode=mode, update_jaxpr=update_jaxpr, update_consts=update_consts), 0
# see the third case in _gather_batching_rule for comparison and comments
@@ -2108,10 +2108,10 @@ def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
- return scatter_op(
- operand, indices, updates, dnums,
+ return scatter_op.bind(
+ operand, indices, updates, dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices,
- mode=mode), 0
+ mode=mode, update_jaxpr=update_jaxpr, update_consts=update_consts), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
@@ -2119,7 +2119,7 @@ def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
- partial(_scatter_batching_rule, scatter_add))
+ partial(_scatter_batching_rule, scatter_add_p))
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
@@ -2141,7 +2141,7 @@ def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
- partial(_scatter_batching_rule, scatter_mul))
+ partial(_scatter_batching_rule, scatter_mul_p))
def _scatter_extremal_jvp(scatter_op, primals, tangents, update_jaxpr,
update_consts, dimension_numbers,
@@ -2248,14 +2248,14 @@ def _scatter_extremal_jvp(scatter_op, primals, tangents, update_jaxpr,
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
weak_type_rule=_argnum_weak_type(0))
batching.primitive_batchers[scatter_min_p] = (
- partial(_scatter_batching_rule, scatter_min))
+ partial(_scatter_batching_rule, scatter_min_p))
ad.primitive_jvps[scatter_min_p] = partial(_scatter_extremal_jvp, scatter_min_p)
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
weak_type_rule=_argnum_weak_type(0))
batching.primitive_batchers[scatter_max_p] = (
- partial(_scatter_batching_rule, scatter_max))
+ partial(_scatter_batching_rule, scatter_max_p))
ad.primitive_jvps[scatter_max_p] = partial(_scatter_extremal_jvp, scatter_max_p)
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
@@ -2401,7 +2401,7 @@ def _scatter_transpose_rule(t, operand, indices, updates, *,
ad.primitive_jvps[scatter_p] = _scatter_jvp
ad.primitive_transposes[scatter_p] = _scatter_transpose_rule
batching.primitive_batchers[scatter_p] = (
- partial(_scatter_batching_rule, scatter))
+ partial(_scatter_batching_rule, scatter_p))
def _scatter_lower_opaque(ctx, operand, indices, updates, *,
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -466,6 +466,15 @@ def jnp_op(x, idx):
self._CheckAgainstNumpy(np_op_idx, jnp_op_idx, args_maker)
self._CompileAndCheck(jnp_op_idx, args_maker)
+ def testIndexApplyBatchingBug(self):
+ # https://github.com/google/jax/issues/16655
+ arr = jnp.array([[1, 2, 3, 4, 5, 6]])
+ ind = jnp.array([3])
+ func = lambda a, i: a.at[i].apply(lambda x: x - 1)
+ expected = jnp.array(list(map(func, arr, ind)))
+ out = jax.vmap(func)(arr, ind)
+ self.assertArraysEqual(out, expected)
+
def testIndexUpdateScalarBug(self):
# https://github.com/google/jax/issues/14923
a = jnp.arange(10.)
diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2302,6 +2302,29 @@ def testScatterMax(self, arg_shape, dtype, idxs, update_shape, dnums):
fun = partial(lax.scatter_max, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker)
+ @jtu.sample_product(
+ [dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape, dnums=dnums)
+ for arg_shape, idxs, update_shape, dnums in [
+ ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
+ update_window_dims=(), inserted_window_dims=(0,),
+ scatter_dims_to_operand_dims=(0,))),
+ ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
+ update_window_dims=(1,), inserted_window_dims=(),
+ scatter_dims_to_operand_dims=(0,))),
+ ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
+ update_window_dims=(1,), inserted_window_dims=(0,),
+ scatter_dims_to_operand_dims=(0,))),
+ ]],
+ dtype=lax_test_util.float_dtypes,
+ )
+ def testScatterApply(self, arg_shape, dtype, idxs, update_shape, dnums):
+ rng = jtu.rand_default(self.rng())
+ rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
+ rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
+ args_maker = lambda: [rng(arg_shape, dtype), rand_idxs()]
+ fun = partial(lax.scatter_apply, func=jnp.sin, update_shape=update_shape, dimension_numbers=dnums)
+ self._CompileAndCheck(fun, args_maker)
+
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape,
dnums=dnums)
diff --git a/tests/lax_vmap_test.py b/tests/lax_vmap_test.py
--- a/tests/lax_vmap_test.py
+++ b/tests/lax_vmap_test.py
@@ -601,6 +601,29 @@ def testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums, bdims):
[dtype, idxs.dtype, dtype], jtu.rand_default(self.rng()),
rtol={np.float16: 5e-3, dtypes.bfloat16: 7e-2})
+ @jtu.sample_product(
+ [dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape,
+ dnums=dnums, bdims=bdims)
+ for arg_shape, idxs, update_shape, dnums in [
+ ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
+ update_window_dims=(), inserted_window_dims=(0,),
+ scatter_dims_to_operand_dims=(0,))),
+ ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
+ update_window_dims=(1,), inserted_window_dims=(),
+ scatter_dims_to_operand_dims=(0,))),
+ ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
+ update_window_dims=(1,), inserted_window_dims=(0,),
+ scatter_dims_to_operand_dims=(0,))),
+ ]
+ for bdims in lax_test_util.all_bdims(arg_shape, idxs.shape)],
+ dtype=lax_test_util.float_dtypes,
+ )
+ def testScatterApply(self, arg_shape, dtype, idxs, update_shape, dnums, bdims):
+ fun = partial(lax.scatter_apply, func=jnp.sin, update_shape=update_shape, dimension_numbers=dnums)
+ self._CheckBatching(fun, 5, bdims, [arg_shape, idxs.shape],
+ [dtype, idxs.dtype], jtu.rand_default(self.rng()),
+ rtol={np.float16: 5e-3, dtypes.bfloat16: 7e-2})
+
def testShapeUsesBuiltinInt(self):
x = lax.iota(np.int32, 3) + 1
self.assertIsInstance(x.shape[0], int) # not np.int64
| Different result while using ``at[].apply`` inside jax.vmap
### Description
Jax's Array index helper `at` potentially has a bug when used inside `jax.vmap`. Using ``apply(lambda...)`` inside ``jax.vmap`` gives me a different result as to when run sequentially and inside jit.
Here's the following code that I'm trying to run
```python
import jax
import jax.numpy as np
a = np.array([1,2,3,4,5,6])
def adder(a,val):
a = a.at[val].apply(lambda a:a-1)
return a
ind = np.array([3])
print(adder(a,ind))
```
I get the expected result;
``[1 2 3 4 5 6]``
However, when the same function is used inside jax.vmap I get a different result
```python
v_adder = jax.vmap(adder,in_axes=(0,None))
a = np.array([[1,2,3,4,5,6]])
ind = np.array([3])
print(v_adder(a,ind))
```
``[[1 2 3 0 5 6]]``
Note that I had to add another dimension in order to use it inside vmap which I don't think is the root cause.
Took me a day of debugging to hunt down this bug 😭.
### What jax/jaxlib version are you using?
jax==0.4.13 jaxlib==0.4.13
### Which accelerator(s) are you using?
CPU
### Additional system info
WSL
### NVIDIA GPU info
_No response_
| Thanks for the report, and sorry for the difficulty in tracking down the bug. We really appreciate it though, especially with the minimal repro.
> I get the expected result;
> [1 2 3 4 5 6]
Did you mean `[1 2 3 3 5 6]`?
This certainly looks like a bug to me! It reproduces on TPU too. Here are the jaxprs:
```python
print(jax.make_jaxpr(adder)(a,ind))
```
```
{ lambda ; a:i32[6] b:i32[1]. let
c:bool[1] = lt b 0
d:i32[1] = add b 6
e:i32[1] = select_n c b d
f:i32[1,1] = broadcast_in_dim[broadcast_dimensions=(0,) shape=(1, 1)] e
_:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 0
g:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 0
h:i32[6] = scatter[
dimension_numbers=ScatterDimensionNumbers(update_window_dims=(), inserted_window_dims=(0,), scatter_dims_to_operand_dims=(0,))
indices_are_sorted=False
mode=GatherScatterMode.FILL_OR_DROP
unique_indices=False
update_consts=()
update_jaxpr={ lambda ; i:i32[] j:i32[]. let k:i32[] = sub i 1 in (k,) }
] a f g
in (h,) }
```
```python
print(jax.make_jaxpr(v_adder)(a,ind))
```
```
{ lambda ; a:i32[1,6] b:i32[1]. let
c:bool[1] = lt b 0
d:i32[1] = add b 6
e:i32[1] = select_n c b d
f:i32[1,1] = broadcast_in_dim[broadcast_dimensions=(0,) shape=(1, 1)] e
_:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 0
g:i32[1] = broadcast_in_dim[broadcast_dimensions=() shape=(1,)] 0
h:i32[1,1] = broadcast_in_dim[broadcast_dimensions=(1,) shape=(1, 1)] g
i:i32[1,6] = scatter[
dimension_numbers=ScatterDimensionNumbers(update_window_dims=(0,), inserted_window_dims=(1,), scatter_dims_to_operand_dims=(1,))
indices_are_sorted=False
mode=GatherScatterMode.FILL_OR_DROP
unique_indices=False
update_consts=()
update_jaxpr={ lambda ; j:i32[] k:i32[]. let in (k,) }
] a f h
in (i,) }
```
What's up with that second `update_jaxpr`!?
Yeah sorry, scatter-apply just isn't batchable; there's a rule defined but it doesn't work. In particular, it [ignores the `update_jaxpr` (and hence the user-defined fucntion)](https://github.com/google/jax/blob/1795b12a9f7bca118d75c981fadd61331680de57/jax/_src/lax/slicing.py#L2070) it's given. | 2023-07-10T19:06:18 |
google/jax | 16,735 | google__jax-16735 | [
"16186"
]
| 2e35f25b4b89089526b003fce348cdc372aeeeed | diff --git a/jax/_src/debugger/core.py b/jax/_src/debugger/core.py
--- a/jax/_src/debugger/core.py
+++ b/jax/_src/debugger/core.py
@@ -19,8 +19,6 @@
import threading
from typing import Any, Protocol
-import numpy as np
-
import jax
from jax import tree_util
from jax._src import core
@@ -84,10 +82,7 @@ def tree_flatten(self):
flat_locals, locals_tree = _safe_flatten_dict(self.locals)
flat_globals, globals_tree = _safe_flatten_dict(self.globals)
flat_vars = flat_locals + flat_globals
- is_valid = [
- isinstance(l, (core.Tracer, jax.Array, np.ndarray))
- for l in flat_vars
- ]
+ is_valid = [isinstance(l, core.Tracer) for l in flat_vars]
invalid_vars, valid_vars = util.partition_list(is_valid, flat_vars)
return valid_vars, (is_valid, invalid_vars, locals_tree, globals_tree,
len(flat_locals), self.filename, self.code_context,
@@ -158,7 +153,7 @@ def register_debugger(name: str, debugger: Debugger, priority: int) -> None:
def breakpoint(*, backend: str | None = None, filter_frames: bool = True,
num_frames: int | None = None, ordered: bool = False,
- **kwargs): # pylint: disable=redefined-builtin
+ token = None, **kwargs): # pylint: disable=redefined-builtin
"""Enters a breakpoint at a point in a program.
Args:
@@ -172,17 +167,26 @@ def breakpoint(*, backend: str | None = None, filter_frames: bool = True,
num_frames: The number of frames above the current stack frame to make
available for inspection in the interactive debugger.
ordered: A keyword only argument used to indicate whether or not the
- staged out computation will enforce ordering of this ``debug_print``
- with respect to other ordered ``debug_print`` calls.
+ staged out computation will enforce ordering of this ``jax.debug.breakpoint``
+ with respect to other ordered ``jax.debug.breakpoint`` and ``jax.debug.print``
+ calls.
+ token: A keyword only argument; an alternative to ``ordered``. If used then a JAX
+ array (or pytree of JAX arrays) should be passed, and the breakpoint will be run
+ once its value is computed.
+ This is returned unchanged, and should be passed back to the computation.
+ If the return value is unused in the later computation, then the whole computation
+ will be pruned and this breakpoint will not be run.
Returns:
- None.
+ If `token` is passed, then its value is returned unchanged. Otherwise, returns
+ `None`.
"""
+ if token is not None:
+ if ordered:
+ raise ValueError("`ordered` and `token` are mutually exclusive arguments.")
frame_infos = inspect.stack()
# Throw out first frame corresponding to this function
frame_infos = frame_infos[1:]
- if num_frames is not None:
- frame_infos = frame_infos[:num_frames]
# Filter out internal frames
if filter_frames:
frames = [
@@ -195,6 +199,8 @@ def breakpoint(*, backend: str | None = None, filter_frames: bool = True,
DebuggerFrame.from_frameinfo(frame_info)
for frame_info in frame_infos
]
+ if num_frames is not None:
+ frames = frames[:num_frames]
flat_args, frames_tree = tree_util.tree_flatten(frames)
def _breakpoint_callback(*flat_args):
@@ -208,4 +214,11 @@ def _breakpoint_callback(*flat_args):
with debug_lock:
debugger(frames, thread_id, **kwargs)
- debugging.debug_callback(_breakpoint_callback, *flat_args, ordered=ordered)
+ if token is None:
+ debugging.debug_callback(_breakpoint_callback, *flat_args, ordered=ordered)
+ else:
+ def _breakpoint_callback_wrapper(x, *flat_args):
+ _breakpoint_callback(*flat_args)
+ return x
+ token, flat_args = jax.lax.stop_gradient((token, flat_args))
+ return jax.pure_callback(_breakpoint_callback_wrapper, token, token, *flat_args)
| `jax.debug.breakpoint` crashes with numpy arrays of objects.
### Description
```python
import jax
import numpy as np
@jax.jit
def f():
y = np.array([object(), object()])
jax.debug.breakpoint()
f()
# AssertionError: [<object object at 0x7d3e8ddab2c0> <object object at 0x7d3e8cbc8510>]
```
It looks like `jax.debug.breakpoint` is grabbing all the arraylikes from the stack, but is falsely assuming that all NumPy arrays can be treated as a jaxtype.
### What jax/jaxlib version are you using?
HEAD
| 2023-07-14T18:49:59 |
||
google/jax | 16,826 | google__jax-16826 | [
"16805"
]
| 83d99bbb17fb0a03e14b190485c7e264481790c1 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -2608,7 +2608,7 @@ def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
left_stack_dim = lbd.stacked_axis if type(lbd) is RaggedAxis else lbd
right_stack_dim = rbd.stacked_axis if type(rbd) is RaggedAxis else rbd
new_dimension_numbers, result_stack_dim = _dot_general_batch_dim_nums(
- (lhs.ndim, rhs.ndim), (left_stack_dim, right_stack_dim),
+ (np.ndim(lhs), np.ndim(rhs)), (left_stack_dim, right_stack_dim),
dimension_numbers)
# TODO Should probably check that any ragged dimensions have corresponding
# sizes, because otherwise the dot product is technically undefined.
@@ -2619,12 +2619,12 @@ def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
lhs = batching.mask_ragged_axes(lhs, _get_sum_identity, lbd)
lhs_shape = batching.bdim_as_shape(lbd, lhs.shape)
else:
- lhs_shape = lhs.shape
+ lhs_shape = np.shape(lhs)
if type(rbd) is RaggedAxis:
rhs = batching.mask_ragged_axes(rhs, _get_sum_identity, rbd)
rhs_shape = batching.bdim_as_shape(rbd, rhs.shape)
else:
- rhs_shape = rhs.shape
+ rhs_shape = np.shape(rhs)
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision,
preferred_element_type=preferred_element_type)
| diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2664,6 +2664,14 @@ def testDynamicSliceU8Index(self):
np.testing.assert_equal(
np.array(lax.dynamic_slice(x, np.uint8([128]), (1,))), [128])
+ def test_dot_general_batching_python_builtin_arg(self):
+ # https://github.com/google/jax/issues/16805
+ @jax.remat
+ def f(x):
+ return jax.lax.dot_general(x, x, (([], []), ([], [])))
+
+ jax.hessian(f)(1.0) # don't crash
+
class LazyConstantTest(jtu.JaxTestCase):
def _Check(self, make_const, expected):
| 2nd-order autodiff of remat of `dot_general` with `float` input leads to error
This came up in the process of creating #16721
Here's a minimal repro:
```python
import jax
@jax.remat
def f(x):
return jax.lax.dot_general(x, x, (([], []), ([], [])))
jax.hessian(f)(1.0)
```
```pytb
Traceback (most recent call last):
File "/Users/vanderplas/github/google/jax/tmp.py", line 7, in <module>
jax.hessian(f)(1.0)
File "/Users/vanderplas/github/google/jax/tmp.py", line 5, in f
return jax.lax.dot_general(x, x, (([], []), ([], [])))
jax._src.source_info_util.JaxStackTraceBeforeTransformation: AttributeError: 'float' object has no attribute 'ndim'
```
<details>
<summary>Show full traceback</summary>
```pytb
The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/vanderplas/github/google/jax/tmp.py", line 7, in <module>
jax.hessian(f)(1.0)
File "/Users/vanderplas/github/google/jax/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/api.py", line 1257, in vmap_f
out_flat = batching.batch(
^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/linear_util.py", line 188, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/api.py", line 1986, in _jvp
out_primals, out_tangents = ad.jvp(flat_fun).call_wrapped(ps_flat, ts_flat)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/linear_util.py", line 188, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/api.py", line 946, in jacfun
y, pullback = _vjp(f_partial, *dyn_args)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/api.py", line 2235, in _vjp
out_primal, out_vjp = ad.vjp(
^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 139, in vjp
out_primals, pvals, jaxpr, consts = linearize(traceable, *primals)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 128, in linearize
jaxpr, out_pvals, consts = pe.trace_to_jaxpr_nounits(jvpfun_flat, in_pvals)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/profiler.py", line 314, in wrapper
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/partial_eval.py", line 776, in trace_to_jaxpr_nounits
jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals)
^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/linear_util.py", line 188, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/traceback_util.py", line 166, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/ad_checkpoint.py", line 285, in fun_remat
out_flat = remat_p.bind(
^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 379, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 382, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 315, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/ad_checkpoint.py", line 475, in remat_jvp
outs = remat_p.bind(
^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 379, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 382, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/partial_eval.py", line 212, in process_primitive
return custom_partial_eval_rules[primitive](self, *tracers, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/ad_checkpoint.py", line 513, in remat_partial_eval
out_consts = core.eval_jaxpr(jaxpr_known, (), *in_consts)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 447, in eval_jaxpr
ans = eqn.primitive.bind(*subfuns, *map(read, eqn.invars), **bind_params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 379, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 382, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 315, in process_primitive
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 531, in standard_jvp
tangents_out = [rule(t, *primals, **params) for rule, t in zip(jvprules, tangents)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 531, in <listcomp>
tangents_out = [rule(t, *primals, **params) for rule, t in zip(jvprules, tangents)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/ad.py", line 557, in <lambda>
lhs_jvp = lambda g, x, y, **kwargs: prim.bind(g, y, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 379, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 382, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/batching.py", line 430, in process_primitive
val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/vanderplas/github/google/jax/jax/_src/lax/lax.py", line 2610, in _dot_general_batch_rule
(lhs.ndim, rhs.ndim), (left_stack_dim, right_stack_dim),
^^^^^^^^
jax._src.traceback_util.UnfilteredStackTrace: AttributeError: 'float' object has no attribute 'ndim'
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/vanderplas/github/google/jax/tmp.py", line 7, in <module>
jax.hessian(f)(1.0)
File "/Users/vanderplas/github/google/jax/jax/_src/api.py", line 859, in jacfun
y, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'float' object has no attribute 'ndim'
```
</details>
| I think `_dot_general_batch_rule` should accept Python numeric types for `lhs` and `rhs`, i.e. it shouldn't expect the `ndim` attribute to exist on them and should instead use `np.ndim` to handle the polymorphism. | 2023-07-24T21:01:38 |
google/jax | 16,938 | google__jax-16938 | [
"16935"
]
| 4fb8cdb019bbd2d0761acfeb89b4debd34996696 | diff --git a/jax/experimental/sparse/linalg.py b/jax/experimental/sparse/linalg.py
--- a/jax/experimental/sparse/linalg.py
+++ b/jax/experimental/sparse/linalg.py
@@ -20,10 +20,12 @@
import jax
import jax.numpy as jnp
+from jax.experimental import sparse
from jax.interpreters import mlir
from jax.interpreters import xla
from jax._src import core
+from jax._src.interpreters import ad
from jax._src.lib import gpu_solver
import numpy as np
@@ -549,9 +551,48 @@ def _callback(data, indices, indptr, b, **kwargs):
return result
+def _spsolve_jvp_lhs(data_dot, data, indices, indptr, b, **kwds):
+ # d/dM M^-1 b = M^-1 M_dot M^-1 b
+ p = spsolve(data, indices, indptr, b, **kwds)
+ q = sparse.csr_matvec_p.bind(data_dot, indices, indptr, p,
+ shape=(indptr.size - 1, len(b)),
+ transpose=False)
+ return -spsolve(data, indices, indptr, q, **kwds)
+
+
+def _spsolve_jvp_rhs(b_dot, data, indices, indptr, b, **kwds):
+ # d/db M^-1 b = M^-1 b_dot
+ return spsolve(data, indices, indptr, b_dot, **kwds)
+
+
+def _csr_transpose(data, indices, indptr):
+ # Transpose of a square CSR matrix
+ m = indptr.size - 1
+ row = jnp.cumsum(jnp.zeros_like(indices).at[indptr].add(1)) - 1
+ row_T, indices_T, data_T = jax.lax.sort((indices, row, data), num_keys=2)
+ indptr_T = jnp.zeros_like(indptr).at[1:].set(
+ jnp.cumsum(jnp.bincount(row_T, length=m)).astype(indptr.dtype))
+ return data_T, indices_T, indptr_T
+
+
+def _spsolve_transpose(ct, data, indices, indptr, b, **kwds):
+ assert not ad.is_undefined_primal(indices)
+ assert not ad.is_undefined_primal(indptr)
+ if ad.is_undefined_primal(b):
+ # TODO(jakevdp): can we do this without an explicit transpose?
+ data_T, indices_T, indptr_T = _csr_transpose(data, indices, indptr)
+ ct_out = spsolve(data_T, indices_T, indptr_T, ct, **kwds)
+ return data, indices, indptr, ct_out
+ else:
+ # Should never reach here, because JVP is linear wrt data.
+ raise NotImplementedError("spsolve transpose with respect to data")
+
+
spsolve_p = core.Primitive('spsolve')
spsolve_p.def_impl(functools.partial(xla.apply_primitive, spsolve_p))
spsolve_p.def_abstract_eval(_spsolve_abstract_eval)
+ad.defjvp(spsolve_p, _spsolve_jvp_lhs, None, None, _spsolve_jvp_rhs)
+ad.primitive_transposes[spsolve_p] = _spsolve_transpose
mlir.register_lowering(spsolve_p, _spsolve_gpu_lowering, platform='cuda')
mlir.register_lowering(spsolve_p, _spsolve_cpu_lowering, platform='cpu')
| diff --git a/tests/sparse_test.py b/tests/sparse_test.py
--- a/tests/sparse_test.py
+++ b/tests/sparse_test.py
@@ -2799,6 +2799,28 @@ def sparse_solve(data, indices, indptr, b):
self.assertAllClose(a @ x, b, rtol=1e-2, atol=1e-3)
self._CompileAndCheck(sparse_solve, args_maker)
+ @jtu.sample_product(
+ size=[10, 20, 50],
+ dtype=jtu.dtypes.floating,
+ )
+ @unittest.skipIf(jtu.device_under_test() == "tpu", "test requires CPU or GPU")
+ @unittest.skipIf(jtu.device_under_test() == "cuda" and not GPU_LOWERING_ENABLED,
+ "test requires cusparse/cusolver")
+ @jtu.skip_on_devices("rocm", "test requires cusolver")
+ def test_sparse_qr_linear_solver_grads(self, size, dtype):
+ rng = rand_sparse(self.rng())
+ a = rng((size, size), dtype)
+ nse = (a != 0).sum()
+ data, indices, indptr = sparse_csr._csr_fromdense(a, nse=nse)
+
+ rng_k = jtu.rand_default(self.rng())
+ b = rng_k([size], dtype)
+
+ def sparse_solve(data, b, tol=1e-8):
+ return sparse.linalg.spsolve(data, indices, indptr, b, tol=tol)
+
+ jtu.check_grads(sparse_solve, (data, b), order=1, rtol=0.05, atol=0.05)
+
class SparseUtilTest(sptu.SparseTestCase):
| [sparse] Feature request: add grad support to experimental.sparse.linalg.spsolve
`experimental.sparse.linalg.spsolve` is implemented via the [`cuda_csrlsvqr` routine](https://github.com/google/jax/blob/7708cf5c21e3296d0a607125f8686d5a9712c1ca/jaxlib/gpu_solver.py#L258). If I am not mistaken, the gradient should be computable (GPU only) with another call to `spsolve`.
| FWIW you should be able to do this by wrapping into a `lineax.AbstractSolver`. ([Lineax is here](https://github.com/google/lineax).) This automatically synthesises the backward pass given just the forward solve.
Out of curiosity, how do you automatically construct the transpose rule for an abstract solver if no transpose solver is available?
Transposition is handled as a property of the operator, rather than the solver. I.e. we basically just do `linear_solve(operator.transpose(), solver, ...)`.
So yeah, you still have to define transposition somewhere! But this was (a) we can do single-dispatch on the operator type (e.g. efficiently transpose a tridiagonal matrix), and (b) it doesn't come up when adding new solvers.
Makes sense, thanks! Since `spsolve_p` does not provide any built-in transpose solve, it sounds like a simple `lineax` wrapper would not be sufficient to make it work with a backward pass.
@Alessio-Amaolo hopefully #16938 will enable the things you want to do, even though it has to resort to using a manual CSR transpose for the backward pass.
> Makes sense, thanks! Since spsolve_p does not provide any built-in transpose solve, it sounds like a simple lineax wrapper would not be sufficient to make it work with a backward pass.
I think it should. It's the *matrix* that is transposed, not the solver.
Agreed, I used "transpose solve" as a shorthand for "transpose the matrix and then solve", sorry that wasn't clear.
The issue is that this primitive takes a CSR representation as the input, and unlike COO there's no trivial way to transpose a CSR representation without calling out to some other kernel. Some solvers have a transpose flag built-in to handle this case, but `spsolve_p` does not, because `csrlsvqr` doesn't provide any such flag.
Right, I see! I hadn't fully appreciated that (a) `spsolve` only accepts CSR, and that (b) the transpose rule for a `CSR` matrix wasn't already defined (due to it being so inefficient). | 2023-08-03T00:04:40 |
google/jax | 17,080 | google__jax-17080 | [
"17075"
]
| cd24a15188199ec85398accae1ed58235c150f98 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,8 +85,11 @@ def generate_proto(source):
# Cloud TPU VM jaxlib can be installed via:
# $ pip install jax[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html
- 'tpu': [f'jaxlib=={_current_jaxlib_version}',
- f'libtpu-nightly=={_libtpu_version}'],
+ 'tpu': [
+ f'jaxlib=={_current_jaxlib_version}',
+ f'libtpu-nightly=={_libtpu_version}',
+ 'requests', # necessary for jax.distributed.initialize
+ ],
# $ pip install jax[australis]
'australis': ['protobuf>=3.13,<4'],
| ModuleNotFoundError: No module named 'requests' when executing `jax.distributed.initialize()` on TPU Pods
### Description
```python
import jax
jax.distributed.initialize()
```
Error:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/ayaka/nfs_share/venv/lib/python3.11/site-packages/jax/_src/distributed.py", line 168, in initialize
global_state.initialize(coordinator_address, num_processes, process_id, local_device_ids)
File "/home/ayaka/nfs_share/venv/lib/python3.11/site-packages/jax/_src/distributed.py", line 47, in initialize
clusters.ClusterEnv.auto_detect_unset_distributed_params(
File "/home/ayaka/nfs_share/venv/lib/python3.11/site-packages/jax/_src/clusters/cluster.py", line 55, in auto_detect_unset_distributed_params
coordinator_address = env.get_coordinator_address()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ayaka/nfs_share/venv/lib/python3.11/site-packages/jax/_src/clusters/cloud_tpu_cluster.py", line 54, in get_coordinator_address
return cls._get_worker_endpoints()[0].split(':')[2] + ':8476'
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ayaka/nfs_share/venv/lib/python3.11/site-packages/jax/_src/clusters/cloud_tpu_cluster.py", line 74, in _get_worker_endpoints
return get_metadata('worker-network-endpoints').split(',')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ayaka/nfs_share/venv/lib/python3.11/site-packages/jax/_src/clusters/cloud_tpu_cluster.py", line 23, in get_metadata
import requests # pytype: disable=import-error
^^^^^^^^^^^^^^^
ModuleNotFoundError: No module named 'requests'
```
I think `requests` should be automatically installed when installing JAX.
### What jax/jaxlib version are you using?
jax 0.4.14
### Which accelerator(s) are you using?
TPU
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks - I think that makes sense. It looks like requests is only imported in `jax/_src/clusters/cloud_tpu_cluster.py`, so it would probably be better as a TPU-only requirement. Perhaps we could include it in in the optional requirements for `jax[tpu]`?
Tagging @skye who knows a lot about cloud TPU runtimes – what do you think?
I agree it should be required for `jax[tpu]`. I think it used to be at some point actually, but was removed because we didn't need it anymore (but now we do!). | 2023-08-11T18:40:19 |
|
google/jax | 17,204 | google__jax-17204 | [
"17199"
]
| 9f5999d5456d7f9e3d7f1ea5972a161424857bf8 | diff --git a/jax/_src/scipy/stats/norm.py b/jax/_src/scipy/stats/norm.py
--- a/jax/_src/scipy/stats/norm.py
+++ b/jax/_src/scipy/stats/norm.py
@@ -57,10 +57,16 @@ def ppf(q: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
return jnp.asarray(special.ndtri(q) * scale + loc, float)
+@_wraps(osp_stats.norm.logsf, update_doc=False)
+def logsf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
+ x, loc, scale = promote_args_inexact("norm.logsf", x, loc, scale)
+ return logcdf(-x, -loc, scale)
+
+
@_wraps(osp_stats.norm.sf, update_doc=False)
def sf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
- cdf_result = cdf(x, loc, scale)
- return lax.sub(_lax_const(cdf_result, 1), cdf_result)
+ x, loc, scale = promote_args_inexact("norm.sf", x, loc, scale)
+ return cdf(-x, -loc, scale)
@_wraps(osp_stats.norm.isf, update_doc=False)
diff --git a/jax/scipy/stats/norm.py b/jax/scipy/stats/norm.py
--- a/jax/scipy/stats/norm.py
+++ b/jax/scipy/stats/norm.py
@@ -19,6 +19,7 @@
cdf as cdf,
logcdf as logcdf,
logpdf as logpdf,
+ logsf as logsf,
pdf as pdf,
ppf as ppf,
sf as sf,
| diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py
--- a/tests/scipy_stats_test.py
+++ b/tests/scipy_stats_test.py
@@ -693,6 +693,23 @@ def args_maker():
tol=1e-6)
self._CompileAndCheck(lax_fun, args_maker)
+ @genNamedParametersNArgs(3)
+ def testNormLogSf(self, shapes, dtypes):
+ rng = jtu.rand_default(self.rng())
+ scipy_fun = osp_stats.norm.logsf
+ lax_fun = lsp_stats.norm.logsf
+
+ def args_maker():
+ x, loc, scale = map(rng, shapes, dtypes)
+ # clipping to ensure that scale is not too low
+ scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
+ return [x, loc, scale]
+
+ with jtu.strict_promotion_if_dtypes_match(dtypes):
+ self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
+ tol=1e-4)
+ self._CompileAndCheck(lax_fun, args_maker)
+
@genNamedParametersNArgs(3)
def testNormSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
@@ -710,6 +727,13 @@ def args_maker():
tol=1e-6)
self._CompileAndCheck(lax_fun, args_maker)
+ def testNormSfNearZero(self):
+ # Regression test for https://github.com/google/jax/issues/17199
+ value = np.array(10, np.float32)
+ self.assertAllClose(osp_stats.norm.sf(value).astype('float32'),
+ lsp_stats.norm.sf(value),
+ atol=0, rtol=1E-5)
+
@genNamedParametersNArgs(3)
def testNormPpf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
| Numerically inaccurate `scipy.stats.norm.sf`
### Description
Since a distribution cdf's goes asymptotically to 1 as $x \to \infty$, computing it numerically rounds to 1 at a certain point. For this reason, distributions in `scipy.stats` also provide the method `sf = 1 - cdf` that retains the precision.
I was about to use `jax.scipy.stats.norm.sf`, but I noticed that it is not accurate:
```python
from jax.scipy import stats as jstats
from scipy import stats
import numpy as np
value = np.array(10, np.float32)
print('scipy:', stats.norm.sf(value))
print('jax:', jstats.norm.sf(value))
```
```
scipy: 7.61985302416047e-24
jax: 0.0
```
Indeed, looking at the code, the implementation just does `1 - cdf`:
```python
@_wraps(osp_stats.norm.sf, update_doc=False)
def sf(x: ArrayLike, loc: ArrayLike = 0, scale: ArrayLike = 1) -> Array:
cdf_result = cdf(x, loc, scale)
return lax.sub(_lax_const(cdf_result, 1), cdf_result)
```
For the normal distribution, one can use `sf(x) = cdf(-x)`. I have not looked at what jax is doing for other distributions.
### What jax/jaxlib version are you using?
0.4.14
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.11.2, macOS 13.4
### NVIDIA GPU info
_No response_
| Thanks for the report! #17204 fixes this for `norm.sf`; I'll look into similar fixes for the other distributions as well. | 2023-08-21T18:38:24 |
google/jax | 17,228 | google__jax-17228 | [
"5587"
]
| 36cdafdcf400e5f201ebffe94c9d048c5d2ae952 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -85,6 +85,8 @@
'ref.citation', # Many duplicated citations in numpy/scipy docstrings.
'ref.footnote', # Many unreferenced footnotes in numpy/scipy docstrings
'myst.header',
+ # TODO(jakevdp): remove this suppression once issue is fixed.
+ 'misc.highlighting_failure', # https://github.com/ipython/ipython/issues/14142
]
# Add any paths that contain templates here, relative to this directory.
@@ -199,8 +201,6 @@
'notebooks/neural_network_with_tfds_data.*',
# Slow notebook
'notebooks/Neural_Network_and_Data_Loading.*',
- # Strange error apparently due to asynchronous cell execution
- 'notebooks/thinking_in_jax.*',
# Has extra requirements: networkx, pandas, pytorch, tensorflow, etc.
'jep/9407-type-promotion.*',
# TODO(jakevdp): enable execution on the following if possible:
| RTD build failure executing thinking_in_jax.ipynb
#5426 added a new notebook to our docs, and ever since there have been unpredictable failures in the readthedocs build. An example is here: https://readthedocs.org/projects/jax/builds/12905004/
This is the error:
```pytb
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-1-1cefe89b730a> in <module>
1 x = np.random.randn(3, 4)
2 y = np.random.randn(4)
----> 3 f(x, y)
NameError: name 'f' is not defined
```
<details><summary>(Show full traceback)</summary>
<p>
```pytb
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbsphinx.py", line 1021, in parse
rststring, resources = exporter.from_notebook_node(nb, resources)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbsphinx.py", line 832, in from_notebook_node
nb, resources = pp.preprocess(nb, resources)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbconvert/preprocessors/execute.py", line 79, in preprocess
self.execute()
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nest_asyncio.py", line 70, in run_until_complete
return f.result()
File "/home/docs/.pyenv/versions/3.7.9/lib/python3.7/asyncio/futures.py", line 181, in result
raise self._exception
File "/home/docs/.pyenv/versions/3.7.9/lib/python3.7/asyncio/tasks.py", line 249, in __step
result = coro.send(None)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/client.py", line 541, in async_execute
cell, index, execution_count=self.code_cells_executed + 1
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbconvert/preprocessors/execute.py", line 123, in async_execute_cell
cell, resources = self.preprocess_cell(cell, self.resources, cell_index)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbconvert/preprocessors/execute.py", line 146, in preprocess_cell
cell = run_sync(NotebookClient.async_execute_cell)(self, cell, index, store_history=self.store_history)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nest_asyncio.py", line 70, in run_until_complete
return f.result()
File "/home/docs/.pyenv/versions/3.7.9/lib/python3.7/asyncio/futures.py", line 181, in result
raise self._exception
File "/home/docs/.pyenv/versions/3.7.9/lib/python3.7/asyncio/tasks.py", line 249, in __step
result = coro.send(None)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/client.py", line 832, in async_execute_cell
self._check_raise_for_error(cell, exec_reply)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbclient/client.py", line 740, in _check_raise_for_error
raise CellExecutionError.from_cell_and_msg(cell, exec_reply['content'])
nbclient.exceptions.CellExecutionError: An error occurred while executing the following cell:
------------------
x = np.random.randn(3, 4)
y = np.random.randn(4)
f(x, y)
------------------
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-1-1cefe89b730a> in <module>
1 x = np.random.randn(3, 4)
2 y = np.random.randn(4)
----> 3 f(x, y)
NameError: name 'f' is not defined
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/cmd/build.py", line 276, in build_main
app.build(args.force_all, filenames)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/application.py", line 349, in build
self.builder.build_update()
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/builders/__init__.py", line 299, in build_update
len(to_build))
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/builders/__init__.py", line 311, in build
updated_docnames = set(self.read())
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/builders/__init__.py", line 418, in read
self._read_serial(docnames)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/builders/__init__.py", line 439, in _read_serial
self.read_doc(docname)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/builders/__init__.py", line 479, in read_doc
doctree = read_doc(self.app, self.env, self.env.doc2path(docname))
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/io.py", line 316, in read_doc
pub.publish()
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/docutils/core.py", line 218, in publish
self.settings)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/sphinx/io.py", line 130, in read
self.parse()
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/docutils/readers/__init__.py", line 77, in parse
self.parser.parse(self.input, document)
File "/home/docs/checkouts/readthedocs.org/user_builds/jax/envs/5573/lib/python3.7/site-packages/nbsphinx.py", line 1028, in parse
raise NotebookError('\n'.join(lines))
nbsphinx.NotebookError: CellExecutionError in notebooks/thinking_in_jax.ipynb:
------------------
x = np.random.randn(3, 4)
y = np.random.randn(4)
f(x, y)
------------------
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-1-1cefe89b730a> in <module>
1 x = np.random.randn(3, 4)
2 y = np.random.randn(4)
----> 3 f(x, y)
NameError: name 'f' is not defined
You can ignore this error by setting the following in conf.py:
nbsphinx_allow_errors = True
Notebook error:
CellExecutionError in notebooks/thinking_in_jax.ipynb:
------------------
x = np.random.randn(3, 4)
y = np.random.randn(4)
f(x, y)
------------------
[0;31m---------------------------------------------------------------------------[0m
[0;31mNameError[0m Traceback (most recent call last)
[0;32m<ipython-input-1-1cefe89b730a>[0m in [0;36m<module>[0;34m[0m
[1;32m 1[0m [0mx[0m [0;34m=[0m [0mnp[0m[0;34m.[0m[0mrandom[0m[0;34m.[0m[0mrandn[0m[0;34m([0m[0;36m3[0m[0;34m,[0m [0;36m4[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[1;32m 2[0m [0my[0m [0;34m=[0m [0mnp[0m[0;34m.[0m[0mrandom[0m[0;34m.[0m[0mrandn[0m[0;34m([0m[0;36m4[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0;32m----> 3[0;31m [0mf[0m[0;34m([0m[0mx[0m[0;34m,[0m [0my[0m[0;34m)[0m[0;34m[0m[0;34m[0m[0m
[0m
[0;31mNameError[0m: name 'f' is not defined
NameError: name 'f' is not defined
You can ignore this error by setting the following in conf.py:
nbsphinx_allow_errors = True
```
</p>
</details>
The notebook in question is here: https://github.com/google/jax/blob/967f3ac7a335c09739469a18e7f2ad45bbc193df/docs/notebooks/thinking_in_jax.ipynb
Looking at the notebook, it's clear that the name `f` is defined in the previous cell. This appears to be some sort of race condition in cell execution: the cells are nominally executed synchronously, but under the hood nbclient is running cells using [asynchronous calls](https://github.com/jupyter/nbclient/blob/c3898b6df19b1f2fbbf88a6dcf7262c898d71df5/nbclient/client.py#L497-L547) that are synchronized via this utility: https://github.com/jupyter/nbclient/blob/c3898b6df19b1f2fbbf88a6dcf7262c898d71df5/nbclient/util.py#L56-L76
Something about the RTD environment is making this particular cell start running before the previous cell has populated the namespace.
| #5635 side-steps the issue by not executing the problematic notebook. I'm going to leave this issue open, though, to track re-enabling the execution if possible. | 2023-08-22T18:24:42 |
|
google/jax | 17,295 | google__jax-17295 | [
"17294"
]
| 3ea0a74fcc8057a20d7314eee8a829d12249ea0c | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -1989,6 +1989,10 @@ def _pow_jvp_lhs(g, ans, x, y):
y_dtype = dtypes.dtype(y)
x, y = jax._src.numpy.util.promote_dtypes_numeric(x, y) # TODO replace this
if dtypes.issubdtype(y_dtype, np.integer):
+ if x.shape != y.shape:
+ shape = broadcast_shapes(x.shape, y.shape)
+ x = _maybe_broadcast(shape, x)
+ y = _maybe_broadcast(shape, y)
jac = select(eq(y, _const(y, 0)), _ones(y),
mul(_replace_zero(y), pow(x, sub(y, _ones(y)))))
else:
| diff --git a/tests/lax_autodiff_test.py b/tests/lax_autodiff_test.py
--- a/tests/lax_autodiff_test.py
+++ b/tests/lax_autodiff_test.py
@@ -1140,6 +1140,14 @@ def f(x):
with self.assertRaises(NotImplementedError):
jax.jacrev(f)(x)
+ def testPowShapeMismatch(self):
+ # Regression test for https://github.com/google/jax/issues/17294
+ x = lax.iota('float32', 4)
+ y = 2
+ actual = jax.jacrev(jax.jit(jax.lax.pow))(x, y) # no error
+ expected = jax.numpy.diag(y * x ** (y - 1))
+ self.assertArraysEqual(actual, expected)
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| jvp of jnp.power(arr, scalar) fails
Repro:
```python
>>> import jax
>>> jax.jacrev(jax.jit(jax.lax.pow))(jax.numpy.arange(4.0), 2)
...
TypeError: select cases must have the same shapes, got [(4,), ()].
```
It looks like this was introduced by https://github.com/google/jax/pull/16419.
| 2023-08-25T17:06:45 |
|
google/jax | 17,389 | google__jax-17389 | [
"17366"
]
| 24c3a9dc790c3102c9cbe486e11993372f8b26d1 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -325,7 +325,7 @@ def bits(key: KeyArray,
def uniform(key: KeyArray,
shape: Union[Shape, NamedShape] = (),
- dtype: DTypeLikeFloat = dtypes.float_,
+ dtype: DTypeLikeFloat = float,
minval: RealArray = 0.,
maxval: RealArray = 1.) -> Array:
"""Sample uniform random values in [minval, maxval) with given shape/dtype.
@@ -343,6 +343,8 @@ def uniform(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
+
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `uniform` must be a float dtype, "
f"got {dtype}")
@@ -393,7 +395,7 @@ def randint(key: KeyArray,
shape: Shape,
minval: IntegerArray,
maxval: IntegerArray,
- dtype: DTypeLikeInt = dtypes.int_) -> Array:
+ dtype: DTypeLikeInt = int) -> Array:
"""Sample uniform random values in [minval, maxval) with given shape/dtype.
Args:
@@ -410,6 +412,7 @@ def randint(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _randint(key, shape, minval, maxval, dtype)
@@ -633,7 +636,7 @@ def choice(key: KeyArray,
def normal(key: KeyArray,
shape: Union[Shape, NamedShape] = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample standard normal random values with given shape and float dtype.
The values are returned according to the probability density function:
@@ -654,6 +657,7 @@ def normal(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.inexact):
raise ValueError(f"dtype argument to `normal` must be a float or complex dtype, "
f"got {dtype}")
@@ -720,6 +724,7 @@ def multivariate_normal(key: KeyArray,
``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
mean, cov = promote_dtypes_inexact(mean, cov)
if method not in {'svd', 'eigh', 'cholesky'}:
raise ValueError("method must be one of {'svd', 'eigh', 'cholesky'}")
@@ -769,7 +774,7 @@ def truncated_normal(key: KeyArray,
lower: RealArray,
upper: RealArray,
shape: Optional[Union[Shape, NamedShape]] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample truncated standard normal random values with given shape and dtype.
The values are returned according to the probability density function:
@@ -798,6 +803,7 @@ def truncated_normal(key: KeyArray,
Returns values in the open interval ``(lower, upper)``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `truncated_normal` must be a float "
f"dtype, got {dtype}")
@@ -879,7 +885,7 @@ def beta(key: KeyArray,
a: RealArray,
b: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Beta random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -906,6 +912,7 @@ def beta(key: KeyArray,
``shape`` is not None, or else by broadcasting ``a`` and ``b``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `beta` must be a float "
f"dtype, got {dtype}")
@@ -937,7 +944,7 @@ def _beta(key, a, b, shape, dtype) -> Array:
def cauchy(key: KeyArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Cauchy random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -958,6 +965,7 @@ def cauchy(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `cauchy` must be a float "
f"dtype, got {dtype}")
@@ -976,7 +984,7 @@ def _cauchy(key, shape, dtype) -> Array:
def dirichlet(key: KeyArray,
alpha: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Dirichlet random values with given shape and float dtype.
The values are distributed according the the probability density function:
@@ -1009,6 +1017,7 @@ def dirichlet(key: KeyArray,
``alpha.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `dirichlet` must be a float "
f"dtype, got {dtype}")
@@ -1046,7 +1055,7 @@ def _softmax(x, axis) -> Array:
def exponential(key: KeyArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Exponential random values with given shape and float dtype.
The values are distributed according the the probability density function:
@@ -1067,6 +1076,7 @@ def exponential(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `exponential` must be a float "
f"dtype, got {dtype}")
@@ -1213,7 +1223,7 @@ def _gamma_batching_rule(batched_args, batch_dims, *, log_space):
def gamma(key: KeyArray,
a: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Gamma random values with given shape and float dtype.
The values are distributed according the the probability density function:
@@ -1247,6 +1257,7 @@ def gamma(key: KeyArray,
accuracy for small values of ``a``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gamma` must be a float "
f"dtype, got {dtype}")
@@ -1259,7 +1270,7 @@ def gamma(key: KeyArray,
def loggamma(key: KeyArray,
a: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
"""Sample log-gamma random values with given shape and float dtype.
This function is implemented such that the following will hold for a
@@ -1288,6 +1299,7 @@ def loggamma(key: KeyArray,
gamma : standard gamma sampler.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gamma` must be a float "
f"dtype, got {dtype}")
@@ -1400,7 +1412,7 @@ def _poisson(key, lam, shape, dtype) -> Array:
def poisson(key: KeyArray,
lam: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeInt = dtypes.int_) -> Array:
+ dtype: DTypeLikeInt = int) -> Array:
r"""Sample Poisson random values with given shape and integer dtype.
The values are distributed according to the probability mass function:
@@ -1423,6 +1435,7 @@ def poisson(key: KeyArray,
``shape is not None, or else by ``lam.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
# TODO(frostig): generalize underlying poisson implementation and
# remove this check
key_impl = key.dtype.impl # type: ignore[union-attr]
@@ -1442,7 +1455,7 @@ def poisson(key: KeyArray,
def gumbel(key: KeyArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
"""Sample Gumbel random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1461,6 +1474,7 @@ def gumbel(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gumbel` must be a float "
f"dtype, got {dtype}")
@@ -1519,7 +1533,7 @@ def categorical(key: KeyArray,
def laplace(key: KeyArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Laplace random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1538,6 +1552,7 @@ def laplace(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `laplace` must be a float "
f"dtype, got {dtype}")
@@ -1555,7 +1570,7 @@ def _laplace(key, shape, dtype) -> Array:
def logistic(key: KeyArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample logistic random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1574,6 +1589,7 @@ def logistic(key: KeyArray,
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `logistic` must be a float "
f"dtype, got {dtype}")
@@ -1591,7 +1607,7 @@ def _logistic(key, shape, dtype):
def pareto(key: KeyArray,
b: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Pareto random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1616,6 +1632,7 @@ def pareto(key: KeyArray,
``shape`` is not None, or else by ``b.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `pareto` must be a float "
f"dtype, got {dtype}")
@@ -1639,7 +1656,7 @@ def _pareto(key, b, shape, dtype) -> Array:
def t(key: KeyArray,
df: RealArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Student's t random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1664,6 +1681,7 @@ def t(key: KeyArray,
``shape`` is not None, or else by ``df.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `t` must be a float "
f"dtype, got {dtype}")
@@ -1690,7 +1708,7 @@ def _t(key, df, shape, dtype) -> Array:
def chisquare(key: KeyArray,
df: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Chisquare random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1716,6 +1734,7 @@ def chisquare(key: KeyArray,
``shape`` is not None, or else by ``df.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `chisquare` must be a float "
f"dtype, got {dtype}")
@@ -1742,7 +1761,7 @@ def f(key: KeyArray,
dfnum: RealArray,
dfden: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample F-distribution random values with given shape and float dtype.
The values are distributed according to the probability density function:
@@ -1773,6 +1792,7 @@ def f(key: KeyArray,
``shape`` is not None, or else by ``df.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `f` must be a float "
f"dtype, got {dtype}")
@@ -1803,7 +1823,7 @@ def _f(key, dfnum, dfden, shape, dtype) -> Array:
def rademacher(key: KeyArray,
shape: Shape,
- dtype: DTypeLikeInt = dtypes.int_) -> Array:
+ dtype: DTypeLikeInt = int) -> Array:
r"""Sample from a Rademacher distribution.
The values are distributed according to the probability mass function:
@@ -1824,6 +1844,7 @@ def rademacher(key: KeyArray,
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
return _rademacher(key, shape, dtype)
@@ -1837,7 +1858,7 @@ def _rademacher(key, shape, dtype) -> Array:
def maxwell(key: KeyArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample from a one sided Maxwell distribution.
The values are distributed according to the probability density function:
@@ -1859,6 +1880,7 @@ def maxwell(key: KeyArray,
# Generate samples using:
# sqrt(X^2 + Y^2 + Z^2), X,Y,Z ~N(0,1)
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `maxwell` must be a float "
f"dtype, got {dtype}")
@@ -1878,7 +1900,7 @@ def double_sided_maxwell(key: KeyArray,
loc: RealArray,
scale: RealArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample from a double sided Maxwell distribution.
The values are distributed according to the probability density function:
@@ -1901,6 +1923,7 @@ def double_sided_maxwell(key: KeyArray,
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `double_sided_maxwell` must be a float"
f" dtype, got {dtype}")
@@ -1929,7 +1952,7 @@ def weibull_min(key: KeyArray,
scale: RealArray,
concentration: RealArray,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample from a Weibull distribution.
The values are distributed according to the probability density function:
@@ -1952,6 +1975,7 @@ def weibull_min(key: KeyArray,
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `weibull_min` must be a float "
f"dtype, got {dtype}")
@@ -1982,7 +2006,7 @@ def orthogonal(
key: KeyArray,
n: int,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_
+ dtype: DTypeLikeFloat = float
) -> Array:
"""Sample uniformly from the orthogonal group O(n).
@@ -1999,6 +2023,7 @@ def orthogonal(
A random array of shape `(*shape, n, n)` and specified dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
_check_shape("orthogonal", shape)
n = core.concrete_or_error(index, n, "The error occurred in jax.random.orthogonal()")
z = normal(key, (*shape, n, n), dtype)
@@ -2010,7 +2035,7 @@ def generalized_normal(
key: KeyArray,
p: float,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_
+ dtype: DTypeLikeFloat = float
) -> Array:
r"""Sample from the generalized normal distribution.
@@ -2033,6 +2058,7 @@ def generalized_normal(
A random array with the specified shape and dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
_check_shape("generalized_normal", shape)
keys = split(key)
g = gamma(keys[0], 1/p, shape, dtype)
@@ -2044,7 +2070,7 @@ def ball(
d: int,
p: float = 2,
shape: Shape = (),
- dtype: DTypeLikeFloat = dtypes.float_
+ dtype: DTypeLikeFloat = float
):
"""Sample uniformly from the unit Lp ball.
@@ -2062,6 +2088,7 @@ def ball(
A random array of shape `(*shape, d)` and specified dtype.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
_check_shape("ball", shape)
d = core.concrete_or_error(index, d, "The error occurred in jax.random.ball()")
k1, k2 = split(key)
@@ -2073,7 +2100,7 @@ def ball(
def rayleigh(key: KeyArray,
scale: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Rayleigh random values with given shape and float dtype.
The values are returned according to the probability density function:
@@ -2099,6 +2126,7 @@ def rayleigh(key: KeyArray,
``shape`` is not None, or else by ``scale.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `rayleigh` must be a float "
f"dtype, got {dtype}")
@@ -2125,7 +2153,7 @@ def _rayleigh(key, scale, shape, dtype) -> Array:
def wald(key: KeyArray,
mean: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Wald random values with given shape and float dtype.
The values are returned according to the probability density function:
@@ -2152,6 +2180,7 @@ def wald(key: KeyArray,
``shape`` is not None, or else by ``mean.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `wald` must be a float "
f"dtype, got {dtype}")
@@ -2182,7 +2211,7 @@ def _wald(key, mean, shape, dtype) -> Array:
def geometric(key: KeyArray,
p: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeInt = dtypes.int_) -> Array:
+ dtype: DTypeLikeInt = int) -> Array:
r"""Sample Geometric random values with given shape and float dtype.
The values are returned according to the probability mass function:
@@ -2207,6 +2236,7 @@ def geometric(key: KeyArray,
``shape`` is not None, or else by ``p.shape``.
"""
key, _ = _check_prng_key(key)
+ dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.integer):
raise ValueError("dtype argument to `geometric` must be an int "
f"dtype, got {dtype}")
@@ -2236,7 +2266,7 @@ def triangular(key: KeyArray,
mode: RealArray,
right: RealArray,
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r"""Sample Triangular random values with given shape and float dtype.
The values are returned according to the probability density function:
@@ -2299,7 +2329,7 @@ def _triangular(key, left, mode, right, shape, dtype) -> Array:
def lognormal(key: KeyArray,
sigma: RealArray = np.float32(1),
shape: Optional[Shape] = None,
- dtype: DTypeLikeFloat = dtypes.float_) -> Array:
+ dtype: DTypeLikeFloat = float) -> Array:
r""" Sample lognormal random values with given shape and float dtype.
The values are distributed according to the probability density function:
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1085,7 +1085,7 @@ def testGammaGradType(self):
@jtu.sample_product(
lam=[0.5, 3, 9, 11, 50, 500],
- dtype=[np.int16, np.int32, np.int64],
+ dtype=jtu.dtypes.supported([np.int16, np.int32, np.int64]),
)
def testPoisson(self, lam, dtype):
key = self.make_key(0)
@@ -1662,8 +1662,8 @@ def testWald(self, mean, dtype):
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.invgauss(mu=mean).cdf)
@jtu.sample_product(
- p= [0.2, 0.3, 0.4, 0.5 ,0.6],
- dtype= [np.int16, np.int32, np.int64])
+ p=[0.2, 0.3, 0.4, 0.5 ,0.6],
+ dtype=jtu.dtypes.supported([np.int16, np.int32, np.int64]))
def testGeometric(self, p, dtype):
key = self.make_key(1)
rand = lambda key: random.geometric(key, p, shape=(10000, ), dtype=dtype)
diff --git a/tests/x64_context_test.py b/tests/x64_context_test.py
--- a/tests/x64_context_test.py
+++ b/tests/x64_context_test.py
@@ -112,6 +112,8 @@ def func_x64():
self.assertEqual(x32.result(), jnp.int32)
@jax.legacy_prng_key('allow')
+ @jtu.ignore_warning(category=UserWarning,
+ message="Explicitly requested dtype float64 is not available")
def test_jit_cache(self):
if jtu.device_under_test() == "tpu":
self.skipTest("64-bit random not available on TPU")
| Missing warning on requesting float64 in random.uniform
### Description
It's a known gotcha that by default JAX very aggressively uses fp32, overriding fp64 even when requested explicitly. Warnings about this are very helpful but are not displayed consistently.
`jax.numpy.zeros`:
```python
In [1]: import jax.numpy as jnp
In [2]: jnp.zeros(1, dtype=jnp.float64).dtype
<ipython-input-2-cf3e83ec7758>:1: UserWarning: Explicitly requested dtype <class 'jax.numpy.float64'> requested in zeros is not available, and will be truncated to dtype float32. To enable more dtypes, set the jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell environment variable. See https://github.com/google/jax#current-gotchas for more.
jnp.zeros(1, dtype=jnp.float64).dtype
Out[2]: dtype('float32')
```
`jax.random.uniform`:
```python
In [3]: from jax import random
In [4]: random.uniform(random.PRNGKey(0), dtype=jnp.float64).dtype
Out[4]: dtype('float32')
```
### What jax/jaxlib version are you using?
0.4.14
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks! We're aware of this issue. I have a work-in-progress PR that cleans up dtype handing in `jax.random`, but it stalled because it led to breakages in so many downstream projects: #15177
Good to know, thanks. | 2023-08-31T17:50:32 |
google/jax | 17,709 | google__jax-17709 | [
"17699"
]
| 0ce97c2f46be67cf2a495c9ce4c965dd9f57b34c | diff --git a/jax/_src/dtypes.py b/jax/_src/dtypes.py
--- a/jax/_src/dtypes.py
+++ b/jax/_src/dtypes.py
@@ -327,13 +327,15 @@ def issubdtype(a: DTypeLike, b: DTypeLike) -> bool:
This is like :func:`numpy.issubdtype`, but can handle dtype extensions such as
:obj:`jax.dtypes.bfloat16`.
"""
+ # Handle extended types & canonicalizes all concrete types to np.dtype instances.
if isinstance(a, ExtendedDType):
return _issubclass(a.type, b)
- elif _issubclass(b, extended):
- return False
- # Canonicalizes all concrete types to np.dtype instances
a = a if _is_typeclass(a) else np.dtype(a)
+
+ if _issubclass(b, extended):
+ return False
b = b if _is_typeclass(b) else np.dtype(b)
+
if isinstance(a, np.dtype):
if a in _custom_float_dtypes:
# Avoid implicitly casting list elements below to a dtype.
@@ -584,6 +586,8 @@ def dtype(x: Any, *, canonicalize: bool = False) -> DType:
dt = python_scalar_dtypes[x]
elif type(x) in python_scalar_dtypes:
dt = python_scalar_dtypes[type(x)]
+ elif _issubclass(x, np.generic):
+ return np.dtype(x)
elif issubdtype(getattr(x, 'dtype', None), extended):
dt = x.dtype
else:
diff --git a/jax/random.py b/jax/random.py
--- a/jax/random.py
+++ b/jax/random.py
@@ -194,12 +194,12 @@
# Added September 13, 2023:
"PRNGKeyArray": (
"jax.random.PRNGKeyArray is deprecated. Use jax.Array for annotations, and "
- "jax.dtypes.issubdtype(arr, jax.dtypes.prng_key) for runtime detection of "
+ "jax.dtypes.issubdtype(arr.dtype, jax.dtypes.prng_key) for runtime detection of "
"typed prng keys.", _PRNGKeyArray
),
"KeyArray": (
"jax.random.KeyArray is deprecated. Use jax.Array for annotations, and "
- "jax.dtypes.issubdtype(arr, jax.dtypes.prng_key) for runtime detection of "
+ "jax.dtypes.issubdtype(arr.dtype, jax.dtypes.prng_key) for runtime detection of "
"typed prng keys.", _PRNGKeyArray
),
# Added September 21, 2023
| diff --git a/tests/dtypes_test.py b/tests/dtypes_test.py
--- a/tests/dtypes_test.py
+++ b/tests/dtypes_test.py
@@ -638,6 +638,7 @@ def testArrayRepr(self, dtype, weak_type):
numpy_dtype_promotion=['strict', 'standard']
)
def testSafeToCast(self, input_dtype, output_dtype, numpy_dtype_promotion):
+ print(input_dtype, output_dtype)
with jax.numpy_dtype_promotion(numpy_dtype_promotion):
# First the special cases which are always safe:
always_safe = (
diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -62,7 +62,7 @@ def _prng_key_as_array(key):
def _maybe_unwrap(key):
# TODO(frostig): remove some day when we deprecate "raw" key arrays
unwrap = prng_internal.random_unwrap
- return unwrap(key) if jnp.issubdtype(key, dtypes.prng_key) else key
+ return unwrap(key) if jnp.issubdtype(key.dtype, dtypes.prng_key) else key
PRNG_IMPLS = [('threefry2x32', prng_internal.threefry_prng_impl),
@@ -1742,6 +1742,8 @@ def test_issubdtype(self):
key = random.key(42)
self.assertTrue(jnp.issubdtype(key.dtype, dtypes.prng_key))
self.assertFalse(jnp.issubdtype(key.dtype, np.integer))
+ with self.assertRaisesRegex(TypeError, "Cannot interpret"):
+ jnp.issubdtype(key, dtypes.prng_key)
@skipIf(not config.jax_enable_custom_prng, 'relies on typed key upgrade flag')
def test_construction_upgrade_flag(self):
| Type checking behaviour for RBG PRNG different when upgrading to jax v0.4.16
### Description
When upgrading to jax v0.4.16, the following `DeprecationWarning` is shown:
```
DeprecationWarning: jax.random.PRNGKeyArray is deprecated. Use jax.Array for annotations, and jax.dtypes.issubdtype(arr, jax.dtypes.prng_key) for runtime detection of typed prng keys.
```
This implies that the behaviour of `isinstance(key, jax.random.PRNGKeyArray)` and `jax.dtypes.issubdtype(key, jax.dtypes.prng_key)` should be the same. However, they are not the same when using RBG PRNG.
Code to reproduce:
```python
import os; os.environ['JAX_PLATFORMS'] = 'cpu'
import jax
import jax.random as rand
jax.config.update('jax_enable_custom_prng', True)
jax.config.update('jax_default_prng_impl', 'rbg')
key = rand.PRNGKey(3407)
print(isinstance(key, jax.random.PRNGKeyArray))
print(jax.dtypes.issubdtype(key, jax.dtypes.prng_key))
```
Output:
```
True
False
```
### What jax/jaxlib version are you using?
jax 0.4.16
### Which accelerator(s) are you using?
CPU
### Additional system info
Python 3.11.5, Ubuntu 20.04.6 LTS x86_64
### NVIDIA GPU info
_No response_
| I think you meant this:
```python
jax.dtypes.issubdtype(key.dtype, jax.dtypes.prng_key)
```
With that change, the code should work as expected.
Looks like the snippet in the warning is wrong – I'll correct that.
You can read more about this in https://jax.readthedocs.io/en/latest/jep/9263-typed-keys.html
Fixed in #17709 – thanks for the report! | 2023-09-21T15:56:14 |
google/jax | 17,766 | google__jax-17766 | [
"17761"
]
| 5aaa15df845aeedf999e6197d6d7bbdcf1f4e5c4 | diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -167,6 +167,10 @@ def size(self) -> int: ...
@abc.abstractmethod
def dtype(self): ...
+ @property
+ @abc.abstractmethod
+ def itemsize(self): ...
+
@property
@abc.abstractmethod
def sharding(self): ...
@@ -280,6 +284,10 @@ def ndim(self):
def dtype(self):
return KeyTy(self.impl)
+ @property
+ def itemsize(self):
+ return self.dtype.itemsize
+
_device = property(op.attrgetter('_base_array._device'))
_committed = property(op.attrgetter('_base_array._committed'))
device = property(op.attrgetter('_base_array.device')) # type: ignore[assignment]
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1805,6 +1805,12 @@ def test_key_dtype_attributes(self):
self.assertEqual(key.size * key.dtype.itemsize,
key_raw.size * key_raw.dtype.itemsize)
+ def test_key_attributes(self):
+ key = self.make_keys()
+ self.assertEqual(key.itemsize, key.dtype.itemsize)
+ self.assertEqual(key.size, math.prod(key.shape))
+ self.assertEqual(key.ndim, len(key.shape))
+
def test_isinstance(self):
@jax.jit
def f(k):
| jax.random.key(0).itemsize crashes
### Description
I would expect the following to work, as it works for numpy arrays.
```
In [13]: jax.random.key(3).itemsize
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
Cell In[13], line 1
----> 1 jax.random.key(3).itemsize
File ~/Documents/pythonenvs/mpi4jax/python-3.11.1/lib/python3.11/site-packages/jax/_src/numpy/array_methods.py:779, in _make_abstract_method.<locals>.method(*args, **kwargs)
776 @abc.abstractmethod
777 @wraps(func)
778 def method(*args, **kwargs):
--> 779 raise NotImplementedError(f"Cannot call abstract method {name}")
NotImplementedError: Cannot call abstract method itemsize
```
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| 2023-09-25T15:52:57 |
|
google/jax | 17,925 | google__jax-17925 | [
"17922"
]
| 59d4f4462a27932bb17f10ac44b0f6a9d77fdd55 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -1169,7 +1169,7 @@ def _next_kxv(kxv):
_, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _lax_const(alpha, 2)))
if log_space:
log_samples = lax.neg(exponential(subkey, (), dtype=dtype))
- log_boost = lax.select(boost_mask, zero, lax.mul(log_samples, lax.div(one, alpha_orig)))
+ log_boost = lax.select(boost_mask | (log_samples == 0), zero, lax.mul(log_samples, lax.div(one, alpha_orig)))
return lax.add(lax.add(lax.log(d), lax.log(V)), log_boost)
else:
samples = 1 - uniform(subkey, (), dtype=dtype)
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -451,6 +451,20 @@ def test_threefry_split_vmapped_fold_in_symmetry(self, make_key):
self.assertArraysEqual(f2, s2)
self.assertArraysEqual(f3, s3)
+ @skipIf(config.jax_threefry_partitionable, 'changed random bit values')
+ def test_loggamma_nan_corner_case(self):
+ # regression test for https://github.com/google/jax/issues/17922
+ # This particular key previously led to NaN output.
+ # If the underlying implementation ever changes, this test will no longer
+ # exercise this corner case, so we compare to a particular output value
+ # rather than just checking for lack of NaNs.
+ expected = jnp.float32(-4.595436)
+ key = random.wrap_key_data(
+ jnp.array([3200590325, 713258242], dtype='uint32'))
+ actual = random.loggamma(key, 0.0, dtype='float32')
+ rtol = 1E-4 if jtu.test_device_matches(["tpu"]) else 1E-6
+ self.assertAllClose(expected, actual, rtol=rtol)
+
@parameterized.parameters([params
for d in [
{"seed": 0, "typ": int, "jit": True, "key": [0, 0]},
| `random.dirichlet` sometimes samples `nan` for sparse alpha
### Description
In very particular cases, `jax.random.dirichlet` produces unexpected `nan`s. I found that these sometimes occur when some, but not all, values of `alpha` are zero. I've managed to track down one specific case.
```python
import jax.numpy as jnp
from jax import random
# Specific key to reproduce error.
key = jnp.array([1328709358, 4232441613], dtype=jnp.uint32)
# Specific alpha, inside the zip-file attached to this issue.
# (I was unable to reproduce the error with a smaller matrix alpha, sorry!).
alpha = jnp.load('alpha.npy')
theta = random.dirichlet(key, alpha)
jnp.isnan(theta).any() # Is True.
```
You can download `alpha.npy` from this zip: [alpha.zip](https://github.com/google/jax/files/12803309/alpha.zip)
For this specific case, `random.loggamma` in `random.dirichlet` is the culprit. Specifically, the element corresponding to `alpha[153, 38]` produces the `nan`. But only when I use the entire matrix `alpha` (not just `alpha[153]`).
```python
random.loggamma(key, alpha)[153]
```
I think this is a bug, right?
Let me know if there is anything that I can do to further clarify or help out!
Thanks in advance,
Hylke
### What jax/jaxlib version are you using?
jax-0.4.17/jaxlib-0.4.17
### Which accelerator(s) are you using?
The problem occurs both CPU & GPU.
### Additional system info
Ubuntu 23.04, Python 3.11
### NVIDIA GPU info
I don't think this is GPU specific. At any rate, the output of `nvidia-smi` on the `nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04` Docker container:
```
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.54.03 Driver Version: 535.54.03 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA A40 On | 00000000:00:08.0 Off | 0 |
| 0% 42C P0 78W / 300W | 34334MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA A40 On | 00000000:00:09.0 Off | 0 |
| 0% 41C P0 90W / 300W | 17118MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA A40 On | 00000000:00:0A.0 Off | 0 |
| 0% 45C P0 115W / 300W | 17142MiB / 46068MiB | 68% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 3 NVIDIA A40 On | 00000000:00:0B.0 Off | 0 |
| 0% 41C P0 112W / 300W | 33510MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 4 NVIDIA A40 On | 00000000:00:0C.0 Off | 0 |
| 0% 29C P8 21W / 300W | 4MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 5 NVIDIA A40 On | 00000000:00:0D.0 Off | 0 |
| 0% 29C P8 21W / 300W | 4MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 6 NVIDIA A40 On | 00000000:00:0E.0 Off | 0 |
| 0% 31C P8 21W / 300W | 4MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
| 7 NVIDIA A40 On | 00000000:00:0F.0 Off | 0 |
| 0% 29C P8 21W / 300W | 4MiB / 46068MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
| 0 N/A N/A 19887 C python 34322MiB |
| 1 N/A N/A 4804 C python 17106MiB |
| 2 N/A N/A 11715 C python 17130MiB |
| 3 N/A N/A 23920 C python 33498MiB |
+---------------------------------------------------------------------------------------+
```
| Thanks for the report! It turns out this also reproduces if you use `alpha = jnp.zeros((1797, 50))`, so no need to load your specific data.
I managed to reduce the problematic case to this:
```python
import jax
from jax._src.random import _gamma_one
key=jax.random.wrap_key_data(jax.numpy.array([1057748167, 1356978999], dtype='uint32'))
print(_gamma_one(key, 0.0, log_space=True))
# nan
```
The problem is this:
```python
_, subkey = jax.random.split(key)
print(jax.random.exponential(subkey))
# 0.0
```
This happens to be a key that returns exactly zero in the exponential distribution (because it returns exactly zero in the uniform distribution), so that `log_samples` in this is zero, leading to a NaN in the following statement: https://github.com/google/jax/blob/2fe00f88a0fb336ca76cbf87e335b18e5dae1c70/jax/_src/random.py#L1171-L1172
I'll have to think about whether this is fundamentally a problem with `random.exponential`, or fundamentally a problem with the `random.loggamma` implementation.
#17925 should fix this - thanks for the report!
Wow, thanks for the incredibly fast fix! Happy I could help. | 2023-10-04T17:28:15 |
google/jax | 17,959 | google__jax-17959 | [
"17958"
]
| 68c84a6c5c67b104ec3bf0df2cc32bd644d5414c | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -1710,6 +1710,7 @@ def _nary_lower_hlo(op: Callable, ctx,
_complex_elem_types = {np.float32, np.float64}
_int = {np.integer}
_bool = {np.bool_}
+_signedint = {np.signedinteger}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
@@ -1944,7 +1945,7 @@ def _conj_transpose_rule(t, x, *, input_dtype):
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
-abs_p = unop(_complex_basetype, _num, 'abs')
+abs_p = unop(_complex_basetype, _signedint | _float | _complex, 'abs')
mlir.register_lowering(abs_p, partial(_nary_lower_hlo, hlo.AbsOp))
def _abs_jvp_rule(g, ans, x):
| lax.abs crashes on unsigned ints
Repro:
```python
import jax
jax.lax.abs(jax.numpy.uint32(1))
```
```pytb
Traceback (most recent call last):
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/mlir.py", line 814, in lower_jaxpr_to_module
if not ctx.module.operation.verify():
jaxlib.mlir._mlir_libs.MLIRError: Verification failed:
error: "jit(abs)/jit(main)/abs"("/Users/vanderplas/github/google/jax/tmp.py":2:0): 'stablehlo.abs' op operand #0 must be tensor of 4/8/16/32/64-bit signless integer or f8E4M3B11FNUZ type or f8E4M3FN type or f8E4M3FNUZ type or f8E5M2 type or f8E5M2FNUZ type or 16-bit float or 32-bit float or 64-bit float or bfloat16 type or complex type with 32-bit float or 64-bit float elements or 4/8/16/32-bit uniform quantized signed integer or 4/8/16/32-bit uniform quantized unsigned integer values, but got 'tensor<ui32>'
note: "jit(abs)/jit(main)/abs"("/Users/vanderplas/github/google/jax/tmp.py":2:0): see current operation: %0 = "stablehlo.abs"(%arg0) : (tensor<ui32>) -> tensor<ui32>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/vanderplas/github/google/jax/tmp.py", line 2, in <module>
jax.lax.abs(jax.numpy.uint32(1))
File "/Users/vanderplas/github/google/jax/jax/_src/lax/lax.py", line 368, in abs
return abs_p.bind(x)
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 386, in bind
return self.bind_with_trace(find_top_trace(args), args, params)
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 389, in bind_with_trace
out = trace.process_primitive(self, map(trace.full_raise, args), params)
File "/Users/vanderplas/github/google/jax/jax/_src/core.py", line 869, in process_primitive
return primitive.impl(*tracers, **params)
File "/Users/vanderplas/github/google/jax/jax/_src/dispatch.py", line 128, in apply_primitive
compiled_fun = xla_primitive_callable(
File "/Users/vanderplas/github/google/jax/jax/_src/util.py", line 263, in wrapper
return cached(config._trace_context(), *args, **kwargs)
File "/Users/vanderplas/github/google/jax/jax/_src/util.py", line 256, in cached
return f(*args, **kwargs)
File "/Users/vanderplas/github/google/jax/jax/_src/dispatch.py", line 157, in xla_primitive_callable
computation = sharded_lowering(
File "/Users/vanderplas/github/google/jax/jax/_src/dispatch.py", line 188, in sharded_lowering
return pxla.lower_sharding_computation(
File "/Users/vanderplas/github/google/jax/jax/_src/profiler.py", line 314, in wrapper
return func(*args, **kwargs)
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/pxla.py", line 2049, in lower_sharding_computation
nreps, tuple_args, shape_poly_state) = _cached_lowering_to_hlo(
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/pxla.py", line 1850, in _cached_lowering_to_hlo
lowering_result = mlir.lower_jaxpr_to_module(
File "/Users/vanderplas/github/google/jax/jax/_src/interpreters/mlir.py", line 829, in lower_jaxpr_to_module
raise ValueError("\n".join(msg_lines)) from e
ValueError: Cannot lower jaxpr with verifier errors:
'stablehlo.abs' op operand #0 must be tensor of 4/8/16/32/64-bit signless integer or f8E4M3B11FNUZ type or f8E4M3FN type or f8E4M3FNUZ type or f8E5M2 type or f8E5M2FNUZ type or 16-bit float or 32-bit float or 64-bit float or bfloat16 type or complex type with 32-bit float or 64-bit float elements or 4/8/16/32-bit uniform quantized signed integer or 4/8/16/32-bit uniform quantized unsigned integer values, but got 'tensor<ui32>'
at loc("jit(abs)/jit(main)/abs"("/Users/vanderplas/github/google/jax/tmp.py":2:0))
see current operation: %0 = "stablehlo.abs"(%arg0) : (tensor<ui32>) -> tensor<ui32>
at loc("jit(abs)/jit(main)/abs"("/Users/vanderplas/github/google/jax/tmp.py":2:0))
Module string:
#loc = loc(unknown)
"builtin.module"() <{sym_name = "jit_abs"}> ({
"func.func"() <{arg_attrs = [{mhlo.sharding = "{replicated}"}], function_type = (tensor<ui32>) -> tensor<ui32>, res_attrs = [{}], sym_name = "main", sym_visibility = "public"}> ({
^bb0(%arg0: tensor<ui32> loc(unknown)):
%0 = "stablehlo.abs"(%arg0) : (tensor<ui32>) -> tensor<ui32> loc(#loc2)
"func.return"(%0) : (tensor<ui32>) -> () loc(#loc)
}) : () -> () loc(#loc)
}) {mhlo.num_partitions = 1 : i32, mhlo.num_replicas = 1 : i32} : () -> () loc(#loc)
#loc1 = loc("/Users/vanderplas/github/google/jax/tmp.py":2:0)
#loc2 = loc("jit(abs)/jit(main)/abs"(#loc1))
```
I think the fix is to avoid binding the primitive for unsigned inputs.
| 2023-10-05T17:34:50 |
||
google/jax | 17,962 | google__jax-17962 | [
"17949"
]
| 61bd34cddf0041fc28cd85d0bc3db49bfc9d56c9 | diff --git a/jax/_src/numpy/ufuncs.py b/jax/_src/numpy/ufuncs.py
--- a/jax/_src/numpy/ufuncs.py
+++ b/jax/_src/numpy/ufuncs.py
@@ -201,7 +201,7 @@ def arccosh(x: ArrayLike, /) -> Array:
@jit
def bitwise_count(x: ArrayLike, /) -> Array:
# Following numpy we take the absolute value and return uint8.
- return lax.population_count(lax.abs(x)).astype('uint8')
+ return lax.population_count(abs(x)).astype('uint8')
@_wraps(np.right_shift, module='numpy')
@partial(jit, inline=True)
| diff --git a/tests/lax_numpy_operators_test.py b/tests/lax_numpy_operators_test.py
--- a/tests/lax_numpy_operators_test.py
+++ b/tests/lax_numpy_operators_test.py
@@ -576,7 +576,7 @@ def testBitwiseOp(self, name, rng_factory, shapes, dtypes):
@jtu.sample_product(
shape=array_shapes,
- dtype=int_dtypes,
+ dtype=int_dtypes + unsigned_dtypes,
)
def testBitwiseCount(self, shape, dtype):
# np.bitwise_count added after numpy 1.26, but
| ⚠️ Nightly upstream-dev CI failed ⚠️
[Workflow Run URL](https://github.com/google/jax/actions/runs/6431383221)
<details><summary>Summary of Failures</summary>
```
tests/lax_numpy_test.py::NumpyUfuncTests::testUfuncInputTypes11: jaxlib.mlir._mlir_libs._site_initialize.<locals>.MLIRError: Verification failed:
error: "jit(bitwise_count)/jit(main)/abs"("/home/runner/work/jax/jax/tests/lax_numpy_test.py":5461:8): 'stablehlo.abs' op operand #0 must be tensor of 4/8/16/32/64-bit signless integer or f8E4M3B11FNUZ type or f8E4M3FN type or f8E4M3FNUZ type or f8E5M2 type or f8E5M2FNUZ type or 16-bit float or 32-bit float or 64-bit float or bfloat16 type or complex type with 32-bit float or 64-bit float elements or 4/8/16/32-bit uniform quantized signed integer or 4/8/16/32-bit uniform quantized unsigned integer values, but got 'tensor<1xui64>'
note: "jit(bitwise_count)/jit(main)/abs"("/home/runner/work/jax/jax/tests/lax_numpy_test.py":5461:8): see current operation: %0 = "stablehlo.abs"(%arg0) : (tensor<1xui64>) -> tensor<1xui64>
```
</details>
| 2023-10-05T17:56:07 |
|
google/jax | 17,996 | google__jax-17996 | [
"17995"
]
| f5dc745d715ede6a9db0d2ecf90f7ac5c02bba84 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -1993,7 +1993,7 @@ def _pow_jvp_lhs(g, ans, x, y):
shape = broadcast_shapes(x.shape, y.shape)
x = _maybe_broadcast(shape, x)
y = _maybe_broadcast(shape, y)
- jac = select(eq(y, _const(y, 0)), _ones(y),
+ jac = select(eq(y, _const(y, 0)), _zeros(y),
mul(_replace_zero(y), pow(x, sub(y, _ones(y)))))
else:
jac = mul(y, pow(x, sub(y, _ones(y))))
| diff --git a/tests/lax_autodiff_test.py b/tests/lax_autodiff_test.py
--- a/tests/lax_autodiff_test.py
+++ b/tests/lax_autodiff_test.py
@@ -539,7 +539,22 @@ def testPowSecondDerivative(self):
def testPowIntPowerAtZero(self):
# https://github.com/google/jax/issues/14397
ans = jax.grad(jax.jit(lambda x, n: x ** n))(0., 0)
- self.assertAllClose(ans, 1., check_dtypes=False)
+ self.assertAllClose(ans, 0., check_dtypes=False)
+
+ @jax.numpy_dtype_promotion('standard') # This test explicitly exercises mixed type promotion
+ def testPowIntPowerAtZero2(self):
+ # https://github.com/google/jax/issues/17995
+ a = lambda z: jax.numpy.sum(z**jax.numpy.arange(0, 2, dtype=int))
+ b = lambda z: jax.numpy.sum(z**jax.numpy.arange(0, 2, dtype=float))
+ c = lambda z: 1 + z
+ d = lambda z: z ** 0 + z
+ e = lambda z: z ** 0. + z
+
+ self.assertAllClose(jax.grad(a)(3.14), 1., check_dtypes=False)
+ self.assertAllClose(jax.grad(b)(3.14), 1., check_dtypes=False)
+ self.assertAllClose(jax.grad(c)(3.14), 1., check_dtypes=False)
+ self.assertAllClose(jax.grad(d)(3.14), 1., check_dtypes=False)
+ self.assertAllClose(jax.grad(e)(3.14), 1., check_dtypes=False)
@jtu.sample_product(
[dict(arg_shape=arg_shape, pred_shape=pred_shape)
| pow grad has surprising behavior based on dtype
> Hi @mattjj, thanks for this. I'm finding this choice very confusing, since at the back of my head, I'm dealing with a function in one variable, x, and so it's unexpected that for every expression I write down inside such a function, I need to check to make sure what I think are constants are well-behaved if they were to become variables. But perhaps there are good reasons why this should be.
>
> I'm still running into trouble along these lines though, I think when `arange` is used to build the polynomial. Here is a simple example that breaks:
```
import jax
import jax.numpy as jnp
b = lambda z: jnp.sum(z**jnp.arange(0, 2))
d = lambda z: 1. + z
print(b(2.543), d(2.543)) # check they are the same and I'm not crazy
print(jax.grad(d)(1.)) # gives 1. as expected
print(jax.grad(b)(1.)) # gives 2.
```
_Originally posted by @hongwanliu in https://github.com/google/jax/issues/14397#issuecomment-1703838855_
| Copying a comment from #14397
> If you change the `jnp.arange` to use float dtype, then things agree:
>
> ```python
> b = lambda z: jnp.sum(z**jnp.arange(0, 2, dtype=float))
> d = lambda z: 1 + z
>
> print(jax.grad(d)(1.)) # gives 1. as expected
> print(jax.grad(b)(1.)) # gives 1. also
> ```
>
> I'd say that's pretty surprising though. We probably need to revise this; either there's some bug, or this dtype-based-resolving-of-ambiguities is too subtle and we should raise an error instead (asking the user to be explicit).
Wow, this was just a bug in the JVP rule, and the test I added had the same bug. Sorry! This is what I get for context switching on that PR so many times. | 2023-10-07T00:56:19 |
google/jax | 18,017 | google__jax-18017 | [
"18004"
]
| 84b58ec7f3e0c352f2a5778196999d2ae3a6f1c1 | diff --git a/jax/_src/numpy/ufunc_api.py b/jax/_src/numpy/ufunc_api.py
--- a/jax/_src/numpy/ufunc_api.py
+++ b/jax/_src/numpy/ufunc_api.py
@@ -20,6 +20,7 @@
>>> arr = jnp.add.at(arr, ind, val, inplace=False)
"""
from functools import partial
+import math
import operator
from typing import Any, Callable, Optional
@@ -264,6 +265,7 @@ def at(self, a: ArrayLike, indices: Any, b: Optional[ArrayLike] = None, /, *,
return self._at_via_scan(a, indices, b)
def _at_via_scan(self, a: ArrayLike, indices: Any, *args: Any) -> Array:
+ assert len(args) in {0, 1}
check_arraylike(f"{self.__name__}.at", a, *args)
dtype = jax.eval_shape(self._func, lax_internal._one(a), *(lax_internal._one(arg) for arg in args)).dtype
a = lax_internal.asarray(a).astype(dtype)
@@ -277,7 +279,9 @@ def _at_via_scan(self, a: ArrayLike, indices: Any, *args: Any) -> Array:
if not shape:
return a.at[indices].set(self._call(a.at[indices].get(), *args))
- args = tuple(_broadcast_to(arg, shape).ravel() for arg in args)
+ if args:
+ arg = _broadcast_to(args[0], (*shape, *args[0].shape[len(shape):]))
+ args = (arg.reshape(math.prod(shape), *args[0].shape[len(shape):]),)
indices = [idx if isinstance(idx, slice) else _broadcast_to(idx, shape).ravel() for idx in indices]
def scan_fun(carry, x):
| diff --git a/tests/lax_numpy_ufuncs_test.py b/tests/lax_numpy_ufuncs_test.py
--- a/tests/lax_numpy_ufuncs_test.py
+++ b/tests/lax_numpy_ufuncs_test.py
@@ -289,6 +289,19 @@ def np_fun(x, idx, y):
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
+ def test_at_broadcasting(self):
+ # Regression test for https://github.com/google/jax/issues/18004
+ args_maker = lambda: [np.ones((5, 3)), np.array([0, 4, 2]),
+ np.arange(9.0).reshape(3, 3)]
+ def np_fun(x, idx, y):
+ x_copy = np.copy(x)
+ np.add.at(x_copy, idx, y)
+ return x_copy
+ jnp_fun = partial(jnp.frompyfunc(jnp.add, nin=2, nout=1, identity=0).at, inplace=False)
+
+ self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
+ self._CompileAndCheck(jnp_fun, args_maker)
+
@jtu.sample_product(
SCALAR_FUNCS,
[{'shape': shape, 'axis': axis}
| Issue with JAX's frompyfunc and at methods when compared to NumPy
### Description
I encountered an issue when trying to use JAX's frompyfunc and at methods for a specific use case that works fine in NumPy. Below is the code snippet that demonstrates the issue:
```
import jax.numpy as jnp
def scalar_add(x, y):
# emphasize that only scalar tracers will be passed to this function.
assert jnp.shape(x) == jnp.shape(y) == ()
return x + y
add = jnp.frompyfunc(scalar_add, nin=2, nout=1, identity=0)
x = jnp.ones((5, 3))
indices = jnp.array([0,4,2])
t = jnp.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
x = add.at(x, indices, t, inplace=False)
```
Expected Behavior:
The code above is expected to perform an addition operation using JAX's frompyfunc and at methods, similar to the behavior in NumPy.
Actual Behavior:
However, running this code results in the following error:
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
[<ipython-input-139-8af5f01fcded>](https://localhost:8080/#) in <cell line: 19>()
17
18 # add.accumulate(x) # accumulate() method is cumulative reduction
---> 19 z = add.at(x, indices, t, inplace=False) # at() method is similar to JAX's ndarray.at
[... skipping hidden 12 frame]
3 frames
[/usr/local/lib/python3.10/dist-packages/jax/_src/numpy/ufunc_api.py](https://localhost:8080/#) in at(self, a, indices, b, inplace)
238 return self._at_via_scan(a, indices)
239 else:
--> 240 return self._at_via_scan(a, indices, b)
241
242 def _at_via_scan(self, a, indices, *args):
[/usr/local/lib/python3.10/dist-packages/jax/_src/numpy/ufunc_api.py](https://localhost:8080/#) in _at_via_scan(self, a, indices, *args)
254 return a.at[indices].set(self._call(a.at[indices].get(), *args))
255
--> 256 args = tuple(_broadcast_to(arg, shape).ravel() for arg in args)
257 indices = [idx if isinstance(idx, slice) else _broadcast_to(idx, shape).ravel() for idx in indices]
258
[/usr/local/lib/python3.10/dist-packages/jax/_src/numpy/ufunc_api.py](https://localhost:8080/#) in <genexpr>(.0)
254 return a.at[indices].set(self._call(a.at[indices].get(), *args))
255
--> 256 args = tuple(_broadcast_to(arg, shape).ravel() for arg in args)
257 indices = [idx if isinstance(idx, slice) else _broadcast_to(idx, shape).ravel() for idx in indices]
258
[/usr/local/lib/python3.10/dist-packages/jax/_src/numpy/util.py](https://localhost:8080/#) in _broadcast_to(arr, shape)
397 shape_tail = shape[nlead:]
398 compatible = all(core.definitely_equal_one_of_dim(arr_d, [1, shape_d])
--> 399 for arr_d, shape_d in safe_zip(arr_shape, shape_tail))
400 if nlead < 0 or not compatible:
401 msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
ValueError: safe_zip() argument 2 is shorter than argument 1
```
Additional Information:
This code works as expected in NumPy using the equivalent NumPy functions. The issue appears to be specific to JAX's implementation. Numpy seems to work fine with the following results.
```
array([[ 2., 3., 4.],
[ 1., 1., 1.],
[ 8., 9., 10.],
[ 1., 1., 1.],
[ 5., 6., 7.]])
```
Environment:
JAX version: 0.4.16
Python version: 0.4.16+cuda11.cudnn86
Operating System: Colab
Please let me know if any further information or clarification is needed to address this issue.
### What jax/jaxlib version are you using?
jax: 0.4.16, jaxlib: 0.4.16+cuda11.cudnn86
### Which accelerator(s) are you using?
GPU T4
### Additional system info
On Colab
### NVIDIA GPU info
nvidia-smi
| Thanks for the report! I'm looking into it | 2023-10-09T16:44:25 |
google/jax | 18,075 | google__jax-18075 | [
"18074"
]
| 9ae5a43341a3d081ebfed68e0c35dbd3853a86d3 | diff --git a/jax/_src/callback.py b/jax/_src/callback.py
--- a/jax/_src/callback.py
+++ b/jax/_src/callback.py
@@ -234,6 +234,7 @@ def pure_callback(
may behave in unexpected ways, particularly under transformation.
result_shape_dtypes: pytree whose leaves have ``shape`` and ``dtype`` attributes,
whose structure matches the expected output of the callback function at runtime.
+ :class:`jax.ShapeDtypeStruct` is often used to define leaf values.
*args: arguments to be passed to the callback function
sharding: optional sharding that specifies the device from which the callback should
be invoked.
@@ -480,6 +481,7 @@ def io_callback(
more efficient execution.
result_shape_dtypes: pytree whose leaves have ``shape`` and ``dtype`` attributes,
whose structure matches the expected output of the callback function at runtime.
+ :class:`jax.ShapeDtypeStruct` is often used to define leaf values.
*args: arguments to be passed to the callback function
sharding: optional sharding that specifies the device from which the callback should
be invoked.
| Documentation for jax.pure_callback does not mention ShapeDtypeStruct
### Description
The documentation page for `jax.pure_callback` states that `result_shape_dtypes` needs to be a PyTree whose leaves have `shape` and `dtype` members, but does not mention `jax.ShapeDtypeStruct` that already has those fields. An example in that documentation page would also be helpful, as would better errors for users who pass a raw shape instead of a structure as that argument.
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| 2023-10-12T02:56:28 |
||
google/jax | 18,324 | google__jax-18324 | [
"18313"
]
| b093c4c99b9dff1ac5d1ece48fcfba4167ebda78 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3296,6 +3296,9 @@ def tensordot(a: ArrayLike, b: ArrayLike,
A tuple ``precision`` does not necessarily map to multiple arguments of ``einsum()``;
rather, the specified ``precision`` is forwarded to each ``dot_general`` call used in
the implementation.
+
+:func:`jax.numpy.einsum` also differs from :func:`numpy.einsum` in that the ``optimize``
+keyword defaults to ``"optimal"`` rather than ``False``.
"""
@overload
| The default argument optimize of einsum in the document is 'optimal', not False.
### Description
The default argument optimize of einsum in the document is 'optimal', not False.
It seems like the documentation comes directly from numpy and jax changes the defaults.
<img width="930" alt="Screenshot 2023-10-29 at 1 57 19 PM" src="https://github.com/google/jax/assets/5007613/ee7adac3-9086-4caf-a99b-3816764be545">
<img width="843" alt="Screenshot 2023-10-29 at 2 00 26 PM" src="https://github.com/google/jax/assets/5007613/8bbee68c-fcb6-4dea-9f11-38aa0ccfd88d">
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks - you're correct that the documentation comes straight from NumPy (we note that with *original documentation below* at the top of the docstring). We can add a note about the different default value at the top of the JAX docstring. | 2023-10-30T16:19:05 |
|
google/jax | 18,373 | google__jax-18373 | [
"16816"
]
| 011d49c5187ba5d98547effc6c665908c851094e | diff --git a/jax/_src/basearray.py b/jax/_src/basearray.py
--- a/jax/_src/basearray.py
+++ b/jax/_src/basearray.py
@@ -16,7 +16,11 @@
import abc
import numpy as np
-from typing import Union
+from typing import Any, Sequence, Union
+
+# TODO(jakevdp): fix import cycles and define these.
+Shard = Any
+Sharding = Any
# Array is a type annotation for standard JAX arrays and tracers produced by
# core functions in jax.lax and jax.numpy; it is not meant to include
@@ -46,11 +50,64 @@ def f(x: Array) -> Array: # type annotations are valid for traced and non-trace
__slots__ = ['__weakref__']
- # at property must be defined because we overwrite its docstring in
- # lax_numpy.py
@property
- def at(self):
- raise NotImplementedError("property must be defined in subclasses")
+ @abc.abstractmethod
+ def dtype(self) -> np.dtype:
+ """The data type (:class:`numpy.dtype`) of the array."""
+
+ @property
+ @abc.abstractmethod
+ def ndim(self) -> int:
+ """The number of dimensions in the array."""
+
+ @property
+ @abc.abstractmethod
+ def size(self) -> int:
+ """The total number of elements in the array."""
+
+ @property
+ @abc.abstractmethod
+ def shape(self) -> tuple[int, ...]:
+ """The shape of the array."""
+
+ # Documentation for sharding-related methods and properties defined on ArrayImpl:
+ @abc.abstractmethod
+ def addressable_data(self, index: int) -> "Array":
+ """Return an array of the addressable data at a particular index."""
+
+ @property
+ @abc.abstractmethod
+ def addressable_shards(self) -> Sequence[Shard]:
+ """List of addressable shards."""
+
+ @property
+ @abc.abstractmethod
+ def global_shards(self) -> Sequence[Shard]:
+ """List of global shards."""
+
+ @property
+ @abc.abstractmethod
+ def is_fully_addressable(self) -> bool:
+ """Is this Array fully addressable?
+
+ A jax.Array is fully addressable if the current process can address all of
+ the devices named in the :class:`Sharding`. ``is_fully_addressable`` is
+ equivalent to "is_local" in multi-process JAX.
+
+ Note that fully replicated is not equal to fully addressable i.e.
+ a jax.Array which is fully replicated can span across multiple hosts and is
+ not fully addressable.
+ """
+
+ @property
+ @abc.abstractmethod
+ def is_fully_replicated(self) -> bool:
+ """Is this Array fully replicated?"""
+
+ @property
+ @abc.abstractmethod
+ def sharding(self) -> Sharding:
+ """The sharding for the array."""
Array.__module__ = "jax"
diff --git a/jax/_src/core.py b/jax/_src/core.py
--- a/jax/_src/core.py
+++ b/jax/_src/core.py
@@ -626,11 +626,22 @@ def check_bool_conversion(arr: Array, warn_on_empty=False):
"ambiguous. Use a.any() or a.all()")
+def _aval_property(name):
+ return property(lambda self: getattr(self.aval, name))
+
class Tracer(typing.Array):
__array_priority__ = 1000
__slots__ = ['_trace', '_line_info']
+ dtype = _aval_property('dtype')
+ ndim = _aval_property('ndim')
+ size = _aval_property('size')
+ shape = _aval_property('shape')
+
+ def __init__(self, trace: Trace):
+ self._trace = trace
+
def _error_repr(self):
if self.aval is None:
return f"traced array with aval {self.aval}"
@@ -655,9 +666,6 @@ def tobytes(self, order="C"):
f"The tobytes() method was called on {self._error_repr()}."
f"{self._origin_msg()}")
- def __init__(self, trace: Trace):
- self._trace = trace
-
def __iter__(self):
return iter(self.aval._iter(self))
diff --git a/jax/_src/numpy/array_methods.py b/jax/_src/numpy/array_methods.py
--- a/jax/_src/numpy/array_methods.py
+++ b/jax/_src/numpy/array_methods.py
@@ -240,6 +240,7 @@ def _view(arr: Array, dtype: Optional[DTypeLike] = None, type: None = None) -> A
def _notimplemented_flat(self):
+ """Not implemented: Use :meth:`~jax.Array.flatten` instead."""
raise NotImplementedError("JAX Arrays do not implement the arr.flat property: "
"consider arr.flatten() instead.")
@@ -800,5 +801,3 @@ def register_jax_array_methods():
_set_array_attributes(ArrayImpl)
_set_array_abstract_methods(Array)
-
- Array.at.__doc__ = _IndexUpdateHelper.__doc__
| Multihost gather operation
Currently there exists `jax.experimental.multihost_utils.process_allgather`, but no `process_gather`.
I am running the script on TPU v4-16.
Example:
```python
import jax
from jax import Array
import jax.numpy as jnp
from jax.sharding import Mesh, NamedSharding, PartitionSpec as P
import numpy as np
from types import EllipsisType
def shard_array_to_multihost(arr: Array, axis: int | EllipsisType) -> Array:
shape = arr.shape
devices: np.ndarray = np.array(jax.devices())
if axis is ...:
mesh = Mesh(devices, ('a',))
sharding = NamedSharding(mesh, P(None))
else:
sharding_tuple_ = [1] * len(shape)
sharding_tuple_[axis] = -1
sharding_tuple = tuple(sharding_tuple_)
name_tuple = tuple('abcdefghijklmnopqrstuvwxyz'[:len(shape)])
mesh = Mesh(devices.reshape(sharding_tuple), name_tuple)
sharding = NamedSharding(mesh, P(*name_tuple))
xs = [jax.device_put(arr[i], device) for device, i in sharding.addressable_devices_indices_map(shape).items()]
return jax.make_array_from_single_device_arrays(shape, sharding, xs)
arr = jnp.arange(2 * 16 * 8).reshape((2, 16, 8))
arr = shard_array_to_multihost(arr, axis=2)
arr = jax.experimental.multihost_utils.process_allgather(arr)
if jax.process_index() == 0:
print(arr)
```
However, what I want to achieve is:
```python
if jax.process_index() == 0:
print(process_gather(arr))
```
| Besides, `jax.experimental.multihost_utils.process_allgather` returns a Numpy array instead of a JAX array. Why?
What does process_gather mean? If it means to gather all the local shards of the array then you can just write that on your own using `arr.addressable_shards`.
Thank you! However, I think I cannot find the documentation of `arr.addressable_shards`. It is not included in https://jax.readthedocs.io/en/latest/_autosummary/jax.Array.html
I guess we need to fix that! Thanks for letting me know. | 2023-11-03T16:40:08 |
|
google/jax | 18,379 | google__jax-18379 | [
"13033"
]
| 953f4670d88d2a1c168a4ad0b44ed940f6c58829 | diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py
--- a/jax/_src/numpy/reductions.py
+++ b/jax/_src/numpy/reductions.py
@@ -587,16 +587,17 @@ def nanprod(a: ArrayLike, axis: Axis = None, dtype: Optional[DTypeLike] = None,
def nanmean(a: ArrayLike, axis: Axis = None, dtype: Optional[DTypeLike] = None, out: None = None,
keepdims: bool = False, where: Optional[ArrayLike] = None) -> Array:
check_arraylike("nanmean", a)
- dtypes.check_user_dtype_supported(dtype, "nanmean")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.nanmean is not supported.")
if dtypes.issubdtype(dtypes.dtype(a), np.bool_) or dtypes.issubdtype(dtypes.dtype(a), np.integer):
return mean(a, axis, dtype, out, keepdims, where=where)
if dtype is None:
- dtype = dtypes.dtype(a)
+ dtype = dtypes.to_inexact_dtype(dtypes.dtype(a, canonicalize=True))
+ else:
+ dtypes.check_user_dtype_supported(dtype, "mean")
+ dtype = dtypes.canonicalize_dtype(dtype)
nan_mask = lax_internal.bitwise_not(lax_internal._isnan(a))
- normalizer = sum(nan_mask, axis=axis, dtype=np.int32, keepdims=keepdims, where=where)
- normalizer = lax.convert_element_type(normalizer, dtype)
+ normalizer = sum(nan_mask, axis=axis, dtype=dtype, keepdims=keepdims, where=where)
td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims, where=where), normalizer)
return td
| jnp.nanmean can overflow in the denominator for large arguments
### Description
The following reproduces a phenomenon where the mean of a positive tensor can be negative:
```python
logits = np.random.uniform(size=[2048,64, 32000], low=-4, high=4)
jnp.nanmean(jnp.abs(logits))
=> DeviceArray(-83.333984, dtype=float32)
```
The reason is that the mean normalizer is summed using `np.int32` here: https://github.com/google/jax/blob/540835f979fe13a968f69b894dbafc981f122b36/jax/_src/numpy/reductions.py#L586
The error is silent.
A workaround is to split the mean along two or more axes.
A stopgap fix would be to throw an error if the tensor is too large, so that the user can decide how to deal with it.
Another fix would be to sum in int64.
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks for the report – I vaguely recall this coming up recently in the context of `var` as well.
Perhaps since we end up converting to float in the end anyway, a better fix would be to accumulate the normalizer in float to avoid this kind of overflow? I'm trying to think whether there would be any downsides to that. | 2023-11-03T20:20:32 |
|
google/jax | 18,382 | google__jax-18382 | [
"17904"
]
| 1c1dd7c8c7ff2e5790159d9cdbbbb1f029a92d4b | diff --git a/jax/_src/numpy/util.py b/jax/_src/numpy/util.py
--- a/jax/_src/numpy/util.py
+++ b/jax/_src/numpy/util.py
@@ -356,16 +356,33 @@ def check_no_float0s(fun_name: str, *args: Any):
_check_no_float0s = check_no_float0s
+def check_for_prngkeys(fun_name: str, *args: Any):
+ """Check if args don't match and none of the args have typed prng dtype"""
+ arg_dtypes = [dtypes.dtype(arg) for arg in args]
+ if len(set(arg_dtypes)) < 2:
+ return # Will be caught by extended dtype impl rules.
+ if any(dtypes.issubdtype(dt, dtypes.prng_key) for dt in arg_dtypes):
+ if len(arg_dtypes) == 1:
+ raise TypeError(
+ f"{fun_name} does not accept dtype {str(arg_dtypes[0])}.")
+ else:
+ raise TypeError(
+ f"{fun_name} does not accept dtypes {', '.join(map(str, arg_dtypes))}."
+ )
+
+
def promote_args(fun_name: str, *args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument shape and dtype promotion."""
check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
+ check_for_prngkeys(fun_name, *args)
return promote_shapes(fun_name, *promote_dtypes(*args))
def promote_args_numeric(fun_name: str, *args: ArrayLike) -> list[Array]:
check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
+ check_for_prngkeys(fun_name, *args)
return promote_shapes(fun_name, *promote_dtypes_numeric(*args))
@@ -375,6 +392,7 @@ def promote_args_inexact(fun_name: str, *args: ArrayLike) -> list[Array]:
Promotes non-inexact types to an inexact type."""
check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
+ check_for_prngkeys(fun_name, *args)
return promote_shapes(fun_name, *promote_dtypes_inexact(*args))
| diff --git a/tests/random_lax_test.py b/tests/random_lax_test.py
--- a/tests/random_lax_test.py
+++ b/tests/random_lax_test.py
@@ -1250,7 +1250,7 @@ def test_vmap_split_mapped_key(self):
def test_cannot_add(self):
key = self.make_key(73)
self.assertRaisesRegex(
- ValueError, r'dtype=key<.*> is not a valid dtype for JAX type promotion.',
+ TypeError, r'add does not accept dtypes key<.*>, int.*',
lambda: key + 47)
def test_grad_of_prng_key(self):
@@ -1319,7 +1319,7 @@ def test_cannot_add(self):
if not jnp.issubdtype(key.dtype, dtypes.prng_key):
raise SkipTest('relies on typed key arrays')
self.assertRaisesRegex(
- ValueError, r'dtype=key<.*> is not a valid dtype for JAX type promotion.',
+ TypeError, r'add does not accept dtypes key<.*>, int.*',
lambda: key + 47)
def test_grad_of_prng_key(self):
diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1348,9 +1348,9 @@ def test_scatter(self, idx):
def test_errors(self):
key = random.key(123)
- with self.assertRaisesRegex(ValueError, "dtype=key<fry> is not a valid dtype"):
+ with self.assertRaisesRegex(TypeError, "add does not accept dtypes key<fry>, int.*"):
jnp.add(key, 1)
- with self.assertRaisesRegex(ValueError, "dtype=key<fry> is not a valid dtype"):
+ with self.assertRaisesRegex(TypeError, "add does not accept dtypes key<fry>, int.*"):
key + 1
with self.assertRaisesRegex(TypeError, "add does not accept dtype key<fry>"):
jnp.add(key, key)
| better errors for n-ary ops on incompatible types, especially key arrays
Operations on typed key arrays now err when we want them to, e.g. on `key + 7`. This guard's implementation relies on the failure case of general dtype promotion logic. The resulting error message can be confusing. Example:
```python
>>> jax.random.key(73) + 4
...
ValueError: dtype=key<fry> is not a valid dtype for JAX type promotion.
```
Here is another example, involving only keys, and an operation that's allowed on them when they match, but not on keys of different types:
```python
>>> jax.random.key(73) == jax.random.key(14)
Array(False, dtype=bool)
>>> jax.random.key(73) == jax.random.key(14, impl='rbg')
...
ValueError: dtype=key<rbg> is not a valid dtype for JAX type promotion.
```
It'd be nice to improve these error messages, maybe to something along the lines of "`<op>` is not defined on arguments of type `<A>` and `<B>`"
Would this also be an improvement to other cases where dtype promotion fails, more generally than with key arrays? Nice if so.
| 2023-11-03T21:45:27 |
|
google/jax | 18,413 | google__jax-18413 | [
"18245"
]
| 5f4d4797b2a41b469f52d06d4ea4d7cc8715fee8 | diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py
--- a/jax/experimental/sparse/bcoo.py
+++ b/jax/experimental/sparse/bcoo.py
@@ -20,7 +20,7 @@
from functools import partial
import math
import operator
-from typing import Any, NamedTuple, Optional, Protocol, Union
+from typing import Any, NamedTuple, Protocol
import warnings
import numpy as np
@@ -262,9 +262,10 @@ def bcoo_fromdense(mat: Array, *, nse: int | None = None, n_batch: int = 0,
mat_bcoo: BCOO representation of the matrix.
"""
mat = jnp.asarray(mat)
- if nse is None:
- nse = _count_stored_elements(mat, n_batch, n_dense)
- nse_int = core.concrete_or_error(operator.index, nse, _TRACED_NSE_ERROR)
+ nse_arr: int | Array | None = nse
+ if nse_arr is None:
+ nse_arr = _count_stored_elements(mat, n_batch, n_dense)
+ nse_int = core.concrete_or_error(operator.index, nse_arr, _TRACED_NSE_ERROR)
return BCOO(_bcoo_fromdense(mat, nse=nse_int, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype),
shape=mat.shape, indices_sorted=True, unique_indices=True)
diff --git a/jax/experimental/sparse/bcsr.py b/jax/experimental/sparse/bcsr.py
--- a/jax/experimental/sparse/bcsr.py
+++ b/jax/experimental/sparse/bcsr.py
@@ -194,9 +194,10 @@ def bcsr_fromdense(mat: ArrayLike, *, nse: int | None = None, n_batch: int = 0,
mat_bcsr: BCSR representation of the matrix.
"""
mat_array = jnp.asarray(mat)
- if nse is None:
- nse = _count_stored_elements(mat_array, n_batch, n_dense)
- nse_int: int = core.concrete_or_error(operator.index, nse, _TRACED_NSE_ERROR)
+ nse_arr: int | Array | None = nse
+ if nse_arr is None:
+ nse_arr = _count_stored_elements(mat_array, n_batch, n_dense)
+ nse_int: int = core.concrete_or_error(operator.index, nse_arr, _TRACED_NSE_ERROR)
return BCSR(_bcsr_fromdense(mat_array, nse=nse_int, n_batch=n_batch,
n_dense=n_dense, index_dtype=index_dtype),
shape=mat_array.shape)
diff --git a/jax/experimental/sparse/util.py b/jax/experimental/sparse/util.py
--- a/jax/experimental/sparse/util.py
+++ b/jax/experimental/sparse/util.py
@@ -101,9 +101,9 @@ def _count_stored_elements_per_batch(mat: Array, n_batch: int = 0, n_dense: int
mask = mask.sum(tuple(range(n_batch, mask.ndim)))
return mask
-def _count_stored_elements(mat: Array, n_batch: int = 0, n_dense: int = 0) -> int:
+def _count_stored_elements(mat: Array, n_batch: int = 0, n_dense: int = 0) -> Array:
"""Return the number of stored elements (nse) of the given dense matrix."""
- return int(_count_stored_elements_per_batch(mat, n_batch, n_dense).max(initial=0))
+ return _count_stored_elements_per_batch(mat, n_batch, n_dense).max(initial=0)
def _dot_general_validated_shape(
lhs_shape: tuple[int, ...], rhs_shape: tuple[int, ...],
| `BCOO.fromdense` is not compatible with `jax.vmap`
### Description
Something unrelated that I bumped into whilst investigating #18244:
```python
import jax
import jax.numpy as jnp
from jax.experimental import sparse
matrix = jax.vmap(sparse.BCOO.fromdense)(jnp.arange(16.).reshape(1, 4, 4))
# File ".../jax/experimental/sparse/util.py", line 106, in _count_stored_elements
# return int(_count_stored_elements_per_batch(mat, n_batch, n_dense).max(initial=0))
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# jax.errors.ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: traced array with # shape int32[].
# The problem arose with the `int` function. If trying to convert the data type of a value, try using `x.astype(int)` or # `jnp.array(x, int)` instead.
```
I know about the `fromdense(..., n_batch=...)` argment, but I think it'd be reasonable for `fromdense` to occur within traced code.
### What jax/jaxlib version are you using?
JAX 0.4.19
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| This is working as intended. You need to pass a static value to `nse` in order to use `BCOO.fromdense` within `vmap` and other JAX transformations, because otherwise the output arrays have data-dependent size.
Ah, gotcha. In that case I think treat this as a report that the error message could be improved.
Thanks - I think the intent is that it would hit this line and raise a useful error, but it's clearly not doing that: https://github.com/google/jax/blob/20e583834ee8e8dc6e4d0c43d2eb86cfe9428f58/jax/experimental/sparse/bcoo.py#L289
This is the intended error message: https://github.com/google/jax/blob/20e583834ee8e8dc6e4d0c43d2eb86cfe9428f58/jax/experimental/sparse/bcoo.py#L244-L248 | 2023-11-06T22:01:10 |
|
google/jax | 18,415 | google__jax-18415 | [
"18399"
]
| 7e372944f9f9a1bce2e267a87087f2da332abbd0 | diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py
--- a/jax/_src/numpy/reductions.py
+++ b/jax/_src/numpy/reductions.py
@@ -650,9 +650,16 @@ def __call__(self, a: ArrayLike, axis: Axis = None,
dtype: Optional[DTypeLike] = None, out: None = None) -> Array: ...
+# TODO(jakevdp): should we change these semantics to match those of numpy?
+CUML_REDUCTION_LAX_DESCRIPTION = """
+Unlike the numpy counterpart, when ``dtype`` is not specified the output dtype will always
+match the dtype of the input.
+"""
+
def _make_cumulative_reduction(np_reduction: Any, reduction: Callable[..., Array],
fill_nan: bool = False, fill_value: ArrayLike = 0) -> CumulativeReduction:
- @_wraps(np_reduction, skip_params=['out'])
+ @_wraps(np_reduction, skip_params=['out'],
+ lax_description=CUML_REDUCTION_LAX_DESCRIPTION)
def cumulative_reduction(a: ArrayLike, axis: Axis = None,
dtype: Optional[DTypeLike] = None, out: None = None) -> Array:
return _cumulative_reduction(a, _ensure_optional_axes(axis), dtype, out)
| cumsum / cumprod int dtype documentation inconsistent with behaviour
### Description
Both `cumsum` and `cumprod` have `dtype` arguments that are documented as:
>dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
However, integer types are not raised when no dtype is specified
```python
import jax.numpy as jnp
print(jnp.cumsum(jnp.asarray([2, 3], dtype=jnp.uint8)).dtype) # uint8
print(jnp.cumprod(jnp.asarray([2, 3], dtype=jnp.uint8)).dtype) # uint8
```
Demo on [colab](https://colab.research.google.com/drive/19hJ6am4cRpMPG29IHFKnpmpce1O-eMKx?usp=sharing)
This behaviour (returning the same dtype as the input) makes sense but isn't what is documented.
### What jax/jaxlib version are you using?
jax 0.4.16, jaxlib 0.4.16
### Which accelerator(s) are you using?
CPU and T4 GPU (colab)
### Additional system info
colab
### NVIDIA GPU info
```
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 525.105.17 Driver Version: 525.105.17 CUDA Version: 12.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |
| N/A 48C P0 27W / 70W | 11743MiB / 15360MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
+-----------------------------------------------------------------------------+
```
| Hi - thanks for the report! It's easy to miss, but the per-parameter documentation does not come from JAX, but from NumPy (look for *original docstring below* at the top of the function documentation).
That said, we do try to note the differences between numpy and JAX in the description, so we should probably add some verbiage about that here. | 2023-11-06T22:31:53 |
|
google/jax | 18,431 | google__jax-18431 | [
"16258"
]
| 7f08a0d0ce9740f17664663a28d72e3c089ea13d | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -423,7 +423,7 @@ def histogram_bin_edges(a: ArrayLike, bins: ArrayLike = 10,
if isinstance(bins, str):
raise NotImplementedError("string values for `bins` not implemented.")
util.check_arraylike("histogram_bin_edges", a, bins)
- arr = ravel(a)
+ arr = asarray(a)
dtype = dtypes.to_inexact_dtype(arr.dtype)
if _ndim(bins) == 1:
return asarray(bins, dtype=dtype)
@@ -448,18 +448,18 @@ def histogram(a: ArrayLike, bins: ArrayLike = 10,
density: bool | None = None) -> tuple[Array, Array]:
if weights is None:
util.check_arraylike("histogram", a, bins)
- a = ravel(*util.promote_dtypes_inexact(a))
+ a, = util.promote_dtypes_inexact(a)
weights = ones_like(a)
else:
util.check_arraylike("histogram", a, bins, weights)
if shape(a) != shape(weights):
raise ValueError("weights should have the same shape as a.")
- a, weights = map(ravel, util.promote_dtypes_inexact(a, weights))
+ a, weights = util.promote_dtypes_inexact(a, weights)
bin_edges = histogram_bin_edges(a, bins, range, weights)
bin_idx = searchsorted(bin_edges, a, side='right')
bin_idx = where(a == bin_edges[-1], len(bin_edges) - 1, bin_idx)
- counts = bincount(bin_idx, weights, length=len(bin_edges))[1:]
+ counts = zeros(len(bin_edges), weights.dtype).at[bin_idx].add(weights)[1:]
if density:
bin_widths = diff(bin_edges)
counts = counts / bin_widths / counts.sum()
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2808,7 +2808,7 @@ def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
atol=tol, rtol=tol)
@jtu.sample_product(
- shape=[(5,), (5, 5)],
+ shape=[(5,), (4, 5)],
dtype=default_dtypes,
# We only test explicit integer-valued bin edges because in other cases
# rounding errors lead to flaky tests.
@@ -2819,17 +2819,17 @@ def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
def testHistogram(self, shape, dtype, bins, density, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
- np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,
- weights=_weights(w))
+ def np_fun(a, w):
+ # Numpy can't handle bfloat16
+ a = a.astype('float32') if a.dtype == jnp.bfloat16 else a
+ w = w.astype('float32') if w.dtype == jnp.bfloat16 else w
+ return np.histogram(a, bins=bins, density=density, weights=_weights(w))
jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
- # np.searchsorted errors on bfloat16 with
- # "TypeError: invalid type promotion with custom data type"
- if dtype != jnp.bfloat16:
- self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
- tol=tol)
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
+ tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
| eliminate use of `jnp.ravel` in `jnp.histogram`
In general, `jnp.ravel` tends to behave extremely badly in a distributed context, and should probably be avoided wherever possible - it will at least create large amounts of ICI traffic, and at worst produce giant replicated tensors on every chip.
I've got a draft implementation that isn't quite equivalent enough for a PR.
```py
def bincount(
x,
*,
length: int,
weights=None,
) -> Array:
if not jnp.issubdtype(x.dtype, jnp.integer):
raise TypeError(
f"x argument to bincount must have an integer type; got {x.dtype}"
)
if weights is None:
weights = np.array(1, dtype=jnp.int_)
return jnp.zeros(length, weights.dtype).at[jnp.clip(x, 0)].add(weights)
def efficient_hist(
x: jax.Array,
bins: int = 100,
density: bool = False,
):
min = jnp.min(x)
max = jnp.max(x)
bin_edges = jnp.linspace(min, max, bins + 1)
bin_idx = jnp.searchsorted(bin_edges, x, side="right")
bin_idx = jnp.clip(bin_idx, 0, len(bin_edges) - 1)
counts = bincount(bin_idx, length=len(bin_edges))[1:]
if density:
bin_widths = jnp.diff(bin_edges)
counts = counts / bin_widths / jnp.sum(counts)
return counts, bin_edges
```
I needed the new `bincount` function because the current implementation has an assertion that `x` is one dimensional which is entirely unnecessary.
I've tested and this implementation works without issue in a distributed setting where the intermediate tensors are too large to construct on any single chip.
| Thanks - the reason that `jax.numpy.bincount` raises an error for two-dimensional inputs is that `numpy.bincount` also raises an error in such cases, and the semantics of `jax.numpy` functions by design match the semantics of `numpy` functions.
That said, if this more efficient histogram implementation passes existing tests, we'd be happy to change the implementation. Are you interested in putting together a pull request?
Since `efficient_hist` accepts only evenly spaced bins, you can replace `searchsorted` with `bin_idx = (x - min) // ((max - min) / bins)` | 2023-11-08T00:14:52 |
google/jax | 18,441 | google__jax-18441 | [
"17020"
]
| ba776269342703377415959e0d8c95eb1ef3262d | diff --git a/jax/_src/numpy/vectorize.py b/jax/_src/numpy/vectorize.py
--- a/jax/_src/numpy/vectorize.py
+++ b/jax/_src/numpy/vectorize.py
@@ -263,7 +263,6 @@ def wrapped(*args):
error_context = ("on vectorized function with excluded={!r} and "
"signature={!r}".format(excluded, signature))
excluded_func, args = _apply_excluded(pyfunc, excluded, args)
- args = tuple(map(jnp.asarray, args))
if signature is not None:
input_core_dims, output_core_dims = _parse_gufunc_signature(signature)
@@ -271,6 +270,15 @@ def wrapped(*args):
input_core_dims = [()] * len(args)
output_core_dims = None
+ none_args = {i for i, arg in enumerate(args) if arg is None}
+ if any(none_args):
+ if any(input_core_dims[i] != () for i in none_args):
+ raise ValueError(f"Cannot pass None at locations {none_args} with {signature=}")
+ excluded_func, args = _apply_excluded(excluded_func, none_args, args)
+ input_core_dims = [dim for i, dim in enumerate(input_core_dims) if i not in none_args]
+
+ args = tuple(map(jnp.asarray, args))
+
broadcast_shape, dim_sizes = _parse_input_dimensions(
args, input_core_dims, error_context)
| diff --git a/tests/lax_numpy_vectorize_test.py b/tests/lax_numpy_vectorize_test.py
--- a/tests/lax_numpy_vectorize_test.py
+++ b/tests/lax_numpy_vectorize_test.py
@@ -225,6 +225,22 @@ def test_expand_dims_multiple_outputs_no_signature(self):
self.assertAllClose(xx[1], x)
self.assertIsInstance(xx, tuple)
+ def test_none_arg(self):
+ f = jnp.vectorize(lambda x, y: x if y is None else x + y)
+ x = jnp.arange(10)
+ self.assertAllClose(f(x, None), x)
+
+ y = jnp.arange(10, 20)
+ self.assertAllClose(f(x, y), x + y)
+
+ def test_none_arg_bad_signature(self):
+ f = jnp.vectorize(lambda x, y: x if y is None else x + y,
+ signature='(k),(k)->(k)')
+ args = jnp.arange(10), None
+ msg = r"Cannot pass None at locations \{1\} with signature='\(k\),\(k\)->\(k\)'"
+ with self.assertRaisesRegex(ValueError, msg):
+ f(*args)
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| Vectorizing over None argument returns NaNs
### Description
Hi,
I would like to do the following thing:
```python
import jax.numpy as jnp
def f(x, y):
if y is None:
return x
return x * y
f = jnp.vectorize(f, signature="(),()->()")
```
however, right now, if you pass it a `None` as the second argument, the returned value is `NaN` (or an array thereof).
```python
f(jnp.ones(6), None) # Returns Array([nan, nan, nan, nan, nan, nan], dtype=float32)
f(1., None) # Returns Array(nan, dtype=float32)
```
I don't think this is a valid behaviour and the call should probably either fail explicitly (like `jax.vmap`) or return the "expected" value.
WDYT?
### What jax/jaxlib version are you using?
jax v0.4.13
### Which accelerator(s) are you using?
CPU
### Additional system info
Google Colab
### NVIDIA GPU info
_No response_
| `y is None` will always be false for traced values, because `None` is a Python object and traced values will be `jax.Array` objects. When you vectorize a function, ~it works via `vmap`, which converts all mapped inputs to arrays.~ it converts all inputs to arrays before passing them to `vmap`.
If you want to use statements like `y is None` in your code, you need to make sure that `y` never participates in a JAX transformation. In your case, it might look like this:
```python
f_wrapped = lambda x, y: jnp.vectorize(partial(f, y=y), signature="()->()")(x)
f_wrapped(jnp.ones(6), None)
# Array([1., 1., 1., 1., 1., 1.], dtype=float32)
```
@jakevdp I'm surprised to hear this, since the following code works:
```python
@jax.jit
@partial(jax.vmap, in_axes=(0, 0))
def f(x, y):
if y is None:
return x
return x + y
print(f(jnp.arange(3), None)) # [0, 1, 2]
print(f(jnp.arange(3), jnp.arange(3))) # [0, 2, 4]
```
My mental model had been that `None` is an empty pytree, so when it needs to be traced it gets convert to an empty pytree of tracers. Is this officially unsupported behavior that might change at some point?
Oh, thanks for pointing that out – I think the `None` conversion is not a feature of `jnp.vmap`, but rather a feature of `jnp.vectorize`, which does not accept general pytree arguments as inputs. The reason for this difference is that the `jnp.vectorize` API is modeled after `np.vectorize` and NumPy does not have any notion of pytrees, so it treats all inputs to a vectorized function as arrays.
Ah okay, makes sense. I was worried I was about to need to change a _lot_ of code :sweat_smile:
For what it's worth, your code doesn't work with `np.vectorize` either, becuase it also converts `y` to an array, so that `y is None` will return False:
```python
In [1]: import numpy as np
...:
...: def f(x, y):
...: if y is None:
...: return x
...: return x * y
...:
In [2]: f = np.vectorize(f, signature="(),()->()")
In [3]: f(None, 1)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[3], line 1
----> 1 f(None, 1)
File ~/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.9/site-packages/numpy/lib/function_base.py:2372, in vectorize.__call__(self, *args, **kwargs)
2369 self._init_stage_2(*args, **kwargs)
2370 return self
-> 2372 return self._call_as_normal(*args, **kwargs)
File ~/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.9/site-packages/numpy/lib/function_base.py:2365, in vectorize._call_as_normal(self, *args, **kwargs)
2362 vargs = [args[_i] for _i in inds]
2363 vargs.extend([kwargs[_n] for _n in names])
-> 2365 return self._vectorize_call(func=func, args=vargs)
File ~/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.9/site-packages/numpy/lib/function_base.py:2446, in vectorize._vectorize_call(self, func, args)
2444 """Vectorized call to `func` over positional `args`."""
2445 if self.signature is not None:
-> 2446 res = self._vectorize_call_with_signature(func, args)
2447 elif not args:
2448 res = func()
File ~/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.9/site-packages/numpy/lib/function_base.py:2486, in vectorize._vectorize_call_with_signature(self, func, args)
2483 nout = len(output_core_dims)
2485 for index in np.ndindex(*broadcast_shape):
-> 2486 results = func(*(arg[index] for arg in args))
2488 n_results = len(results) if isinstance(results, tuple) else 1
2490 if nout != n_results:
Cell In[29], line 6, in f(x, y)
4 if y is None:
5 return x
----> 6 return x * y
TypeError: unsupported operand type(s) for *: 'NoneType' and 'int'
```
So I think `jax.numpy.vectorize` is arguably working as expected here.
You got the order of the arguments wrong, it would be f(1, None), which works fine with numpy.
Sorry I lost track of this, I think #18441 should fix the issue. Here are the results with that change:
```python
In [1]: import jax.numpy as jnp
...:
...: def f(x, y):
...: if y is None:
...: return x
...: return x * y
...:
...: f = jnp.vectorize(f, signature="(),()->()")
In [2]: f(jnp.ones(6), None)
Out[2]: Array([1., 1., 1., 1., 1., 1.], dtype=float32)
In [3]: f(1., None)
Out[3]: Array(1., dtype=float32, weak_type=True)
``` | 2023-11-08T19:43:23 |
google/jax | 18,444 | google__jax-18444 | [
"18442"
]
| 6cc6d093643c0265c7de4027f79879f6945e0342 | diff --git a/jax/_src/interpreters/ad.py b/jax/_src/interpreters/ad.py
--- a/jax/_src/interpreters/ad.py
+++ b/jax/_src/interpreters/ad.py
@@ -586,19 +586,19 @@ def zero_jvp(primitive, primals, tangents, **params):
deflinear2(add_jaxvals_p, lambda t, *args: (t, t))
def instantiate_zeros(tangent):
- if type(tangent) is Zero:
- return zeros_like_aval(tangent.aval)
- else:
+ if type(tangent) is not Zero:
return tangent
+ return instantiate_zeros_aval(tangent.aval, tangent)
# This function seems similar to instantiate_zeros, but it is sometimes used
# to instantiate zero abstract units with a different aval
def instantiate_zeros_aval(aval, tangent):
- if type(tangent) is Zero:
- assert tangent.aval == aval
- return zeros_like_aval(aval)
- else:
+ if type(tangent) is not Zero:
return tangent
+ assert tangent.aval == aval
+ if jax.dtypes.issubdtype(aval.dtype, jax.dtypes.extended):
+ return aval.dtype._rules.make_tangent(aval.shape, aval.dtype)
+ return zeros_like_aval(aval)
@lu.transformation_with_aux
def traceable(in_tree, *primals_and_tangents):
diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -472,6 +472,23 @@ def full(shape, fill_value, dtype):
# the outset.
return random_wrap(key_data, impl=dtype._impl)
+ @staticmethod
+ def make_tangent(shape, dtype):
+ physical_shape = (*shape, *dtype._impl.key_shape)
+ def not_implemented(name):
+ def func(*args):
+ raise NotImplementedError(f"Cannot call {name} on tangent of PRNG key.")
+ return func
+ impl = PRNGImpl(
+ key_shape=dtype._impl.key_shape,
+ seed=not_implemented('seed'),
+ split=not_implemented('split'),
+ random_bits=not_implemented('random_bits'),
+ fold_in=not_implemented('fold_in'),
+ name=f"{dtype._impl.name}_tangent",
+ tag=f"{dtype._impl.tag}_t")
+ return random_wrap(jnp.zeros(physical_shape, dtype='uint32'), impl=impl)
+
@staticmethod
def physical_element_aval(dtype) -> core.ShapedArray:
return core.ShapedArray(dtype._impl.key_shape, jnp.dtype('uint32'))
@@ -594,6 +611,20 @@ def device_put_replicated(val, aval, sharding, devices):
return random_wrap(physical_result, impl=aval.dtype._impl)
+class KeyTangentTy(dtypes.ExtendedDType):
+ """A dtype to use for the tangent of a PRNGKey"""
+ _impl: PRNGImpl
+ type = dtypes.prng_key
+
+ @property
+ def _rules(self):
+ raise ValueError("Cannot perform operations on the tangent of a PRNGKey.")
+
+ @property
+ def name(self) -> str:
+ return f'key_tangent<{self._impl.tag}>'
+
+
class KeyTy(dtypes.ExtendedDType):
_impl: PRNGImpl # TODO(mattjj,frostig): protocol really
_rules = KeyTyRules
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1107,6 +1107,36 @@ class A: pass
with self.assertRaisesRegex(TypeError, 'unrecognized type .* PRNG'):
jax.random.key(42, impl=A())
+ def test_keyarray_custom_vjp(self):
+ # Regression test for https://github.com/google/jax/issues/18442
+ @jax.custom_vjp
+ def f(_, state):
+ return state
+ def _f_fwd(_, state):
+ return state, None
+ def _f_bwd(_, state_bar):
+ assert state_bar[1].dtype.name == "key<fry_t>" # key tangent type
+ return state_bar
+ f.defvjp(_f_fwd, _f_bwd)
+ state = (8.0, jax.random.key(123))
+ result = jax.grad(lambda theta: f(theta, state)[0])(3.0)
+ self.assertEqual(result, 1.0)
+
+ def test_keyarray_custom_vjp_symbolic_zeros(self):
+ @jax.custom_vjp
+ def f(_, state):
+ return state
+ def _f_fwd(_, state):
+ return tree_util.tree_map(lambda x: x.value, state), None
+ def _f_bwd(_, state_bar):
+ self.assertTrue(dtypes.issubdtype(state_bar[1].dtype, dtypes.prng_key))
+ self.assertIsInstance(state_bar[1], jax.custom_derivatives.SymbolicZero)
+ return state_bar
+ f.defvjp(_f_fwd, _f_bwd, symbolic_zeros=True)
+ state = (8.0, jax.random.key(123))
+ result = jax.grad(lambda theta: f(theta, state)[0])(3.0)
+ self.assertEqual(result, 1.0)
+
# TODO(frostig,mattjj): more polymorphic primitives tests
| Cannot take gradient of VJP function involving new-style RNG key
### Description
```python
from jax import custom_vjp, grad
from jax.random import key
@custom_vjp
def find_fixed_point(theta, state):
return state
def _ffp_fwd(theta, state):
return state, None
def _ffp_bwd(residuals, state_bar):
assert False
find_fixed_point.defvjp(_ffp_fwd, _ffp_bwd)
def fixed_point_using_while_of_theta(theta) -> float:
state = (8.0, key(123))
x, _ = find_fixed_point(theta, state)
return x
grad(fixed_point_using_while_of_theta)(3.0)
```
gives
```
ValueError: Cannot convert_element_type to dtype=key<fry>
```
@froystig
### What jax/jaxlib version are you using?
0.4.20
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks for the report – if you could figure out where the `convert_element_type` primitive is being called, that would help. You might start by setting `JAX_TRACEBACK_FILTERING='off'` to see the full traceback.
I tried running locally to set a breakpoint, but I'm not able to import `tjax` because of Python version incompatibilities.
@jakevdp Yeah, I tried turning off the filtering, but I'm not so good at understanding the Jax internals :smile: . I've edited the issue and removed the TJax dependency if you have time to look at it again.
Okay, I think it's close to minimal now.
It looks like RNG keys can no longer be primals to custom VJPs?
Thanks for the repro! Definitely an oversight. I think #18444 should fix it (I hit the `assert False` in your minimal example)
I just ran my full TJax tests and they all pass now. Thanks again for the quick fix! | 2023-11-08T21:59:22 |
google/jax | 18,499 | google__jax-18499 | [
"15670"
]
| 6b6d5a9042b594805cd0832ba3ec4b6808edccf2 | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -1719,3 +1719,183 @@ def bernoulli(n: int) -> Array:
k = jnp.arange(2, 50, dtype=bn.dtype) # Choose 50 because 2 ** -50 < 1E-15
q2 = jnp.sum(k[:, None] ** -m[None, :], axis=0)
return bn.at[4::2].set(q1 * (1 + q2))
+
+
+@custom_derivatives.custom_jvp
+@_wraps(osp_special.poch, module='scipy.special', lax_description="""\
+The JAX version only accepts positive and real inputs.""")
+def poch(z: ArrayLike, m: ArrayLike) -> Array:
+ # Factorial definition when m is close to an integer, otherwise gamma definition.
+ z, m = promote_args_inexact("poch", z, m)
+
+ return jnp.where(m == 0., jnp.array(1, dtype=z.dtype), gamma(z + m) / gamma(z))
+
+
+def _poch_z_derivative(z, m):
+ """
+ Defined in :
+ https://functions.wolfram.com/GammaBetaErf/Pochhammer/20/01/01/
+ """
+
+ return (digamma(z + m) - digamma(z)) * poch(z, m)
+
+
+def _poch_m_derivative(z, m):
+ """
+ Defined in :
+ https://functions.wolfram.com/GammaBetaErf/Pochhammer/20/01/02/
+ """
+
+ return digamma(z + m) * poch(z, m)
+
+
+poch.defjvps(
+ lambda z_dot, primal_out, z, m: _poch_z_derivative(z, m) * z_dot,
+ lambda m_dot, primal_out, z, m: _poch_m_derivative(z, m) * m_dot,
+)
+
+
+def _hyp1f1_serie(a, b, x):
+ """
+ Compute the 1F1 hypergeometric function using the taylor expansion
+ See Eq. 3.2 and associated method (a) from PEARSON, OLVER & PORTER 2014
+ https://doi.org/10.48550/arXiv.1407.7786
+ """
+
+ def body(state):
+ serie, k, term = state
+ serie += term
+ term *= (a + k) / (b + k) * x / (k + 1)
+ k += 1
+
+ return serie, k, term
+
+ def cond(state):
+ serie, k, term = state
+
+ return (k < 250) & (lax.abs(term) / lax.abs(serie) > 1e-8)
+
+ init = 1, 1, a / b * x
+
+ return lax.while_loop(cond, body, init)[0]
+
+
+def _hyp1f1_asymptotic(a, b, x):
+ """
+ Compute the 1F1 hypergeometric function using asymptotic expansion
+ See Eq. 3.8 and simplification for real inputs from PEARSON, OLVER & PORTER 2014
+ https://doi.org/10.48550/arXiv.1407.7786
+ """
+
+ def body(state):
+ serie, k, term = state
+ serie += term
+ term *= (b - a + k) * (1 - a + k) / (k + 1) / x
+ k += 1
+
+ return serie, k, term
+
+ def cond(state):
+ serie, k, term = state
+
+ return (k < 250) & (lax.abs(term) / lax.abs(serie) > 1e-8)
+
+ init = 1, 1, (b - a) * (1 - a) / x
+ serie = lax.while_loop(cond, body, init)[0]
+
+ return gamma(b) / gamma(a) * lax.exp(x) * x ** (a - b) * serie
+
+
+@jit
[email protected]
+def _hyp1f1_a_derivative(a, b, x):
+ """
+ Define it as a serie using :
+ https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric1F1/20/01/01/
+ """
+
+ def body(state):
+ serie, k, term = state
+ serie += term * (digamma(a + k) - digamma(a))
+ term *= (a + k) / (b + k) * x / (k + 1)
+ k += 1
+
+ return serie, k, term
+
+ def cond(state):
+ serie, k, term = state
+
+ return (k < 250) & (lax.abs(term) / lax.abs(serie) > 1e-15)
+
+ init = 0, 1, a / b * x
+
+ return lax.while_loop(cond, body, init)[0]
+
+
+@jit
[email protected]
+def _hyp1f1_b_derivative(a, b, x):
+ """
+ Define it as a serie using :
+ https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric1F1/20/01/02/
+ """
+
+ def body(state):
+ serie, k, term = state
+ serie += term * (digamma(b) - digamma(b + k))
+ term *= (a + k) / (b + k) * x / (k + 1)
+ k += 1
+
+ return serie, k, term
+
+ def cond(state):
+ serie, k, term = state
+
+ return (k < 250) & (lax.abs(term) / lax.abs(serie) > 1e-15)
+
+ init = 0, 1, a / b * x
+
+ return lax.while_loop(cond, body, init)[0]
+
+
+@jit
+def _hyp1f1_x_derivative(a, b, x):
+ """
+ Define it as a serie using :
+ https://functions.wolfram.com/HypergeometricFunctions/Hypergeometric1F1/20/01/04/
+ """
+
+ return a / b * hyp1f1(a + 1, b + 1, x)
+
+
+@custom_derivatives.custom_jvp
+@jit
[email protected]
+@_wraps(osp_special.hyp1f1, module='scipy.special', lax_description="""\
+The JAX version only accepts positive and real inputs. Values of a, b and x
+leading to high values of 1F1 might be erroneous, considering enabling double
+precision. Convention for a = b = 0 is 1, unlike in scipy's implementation.""")
+def hyp1f1(a, b, x):
+ """
+ Implementation of the 1F1 hypergeometric function for real valued inputs
+ Backed by https://doi.org/10.48550/arXiv.1407.7786
+ There is room for improvement in the implementation using recursion to
+ evaluate lower values of hyp1f1 when a or b or both are > 60-80
+ """
+ a, b, x = promote_args_inexact('hyp1f1', a, b, x)
+
+ result = lax.cond(lax.abs(x) < 100, _hyp1f1_serie, _hyp1f1_asymptotic, a, b, x)
+ index = (a == 0) * 1 + ((a == b) & (a != 0)) * 2 + ((b == 0) & (a != 0)) * 3
+
+ return lax.select_n(index,
+ result,
+ jnp.array(1, dtype=x.dtype),
+ jnp.exp(x),
+ jnp.array(jnp.inf, dtype=x.dtype))
+
+
+hyp1f1.defjvps(
+ lambda a_dot, primal_out, a, b, x: _hyp1f1_a_derivative(a, b, x) * a_dot,
+ lambda b_dot, primal_out, a, b, x: _hyp1f1_b_derivative(a, b, x) * b_dot,
+ lambda x_dot, primal_out, a, b, x: _hyp1f1_x_derivative(a, b, x) * x_dot
+)
diff --git a/jax/scipy/special.py b/jax/scipy/special.py
--- a/jax/scipy/special.py
+++ b/jax/scipy/special.py
@@ -54,4 +54,6 @@
zeta as zeta,
kl_div as kl_div,
rel_entr as rel_entr,
+ poch as poch,
+ hyp1f1 as hyp1f1,
)
| diff --git a/tests/lax_scipy_special_functions_test.py b/tests/lax_scipy_special_functions_test.py
--- a/tests/lax_scipy_special_functions_test.py
+++ b/tests/lax_scipy_special_functions_test.py
@@ -141,7 +141,8 @@ def op_record(name, nargs, dtypes, rng_factory, test_grad, nondiff_argnums=(), t
op_record(
"rel_entr", 2, float_dtypes, jtu.rand_positive, True,
),
-
+ op_record("poch", 2, float_dtypes, jtu.rand_positive, True),
+ op_record("hyp1f1", 3, float_dtypes, functools.partial(jtu.rand_uniform, low=0.5, high=30), True)
]
| feat (scipy.special): Add hyp1f1 confluent hypergeometric function
I would like to implement a logpdf for the sum of n=2 independent gamma distributions for pymc.
For the special case of n=2 Mathai's Method can be used like Eqn. 3 in [https://doi.org/10.1007/s00180-019-00924-9](https://doi.org/10.1007/s00180-019-00924-9) which involves the confluent hypergeometric function $\prescript{}{1}{\mathbf{F}}{_1}(a,b,z)$.
- scipy.special ships with the hyp1f1 function.
- The C source code can be found [here](https://github.com/scipy/scipy/blob/main/scipy/special/cephes/hyperg.c).
What would be the way to go? Translate the code into pure Python/Numpy and let jax handle it with jit?
I think Tensorflow's XLA has no variant of the function, but one for the (~~confluent~~) hypergeometric function $\prescript{}{2}{\mathbf{F}}{_1}(a,b,z)$ [cf. here](https://www.tensorflow.org/probability/api_docs/python/tfp/math/hypergeometric/hyp2f1_small_argument)
Are there any plans (maybe also considering XLA developement) to add such a functionality?
| This would be useful for me too! Did you find an alternative in the meantime?
For me as well! +1
(It's also related to [#2991](https://github.com/google/jax/issues/2991), given that the two functions (and the way their derivatives are computed) are quite similar.)
I think that the implementation proposed by [Gil & al 2023](https://link.springer.com/article/10.1007/s11075-023-01515-y) might be suited for JAX. I'll try to translate it in LAX/JAX in the next days | 2023-11-13T14:37:28 |
google/jax | 18,546 | google__jax-18546 | [
"18542"
]
| 5c3da219c032345dc0c16a5dc5486d79ddf4e77e | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -4774,6 +4774,7 @@ def _expand_bool_indices(idx, shape):
total_dims = sum(_ndim(e) if _is_boolean_index(e) else 1 for e in idx
if e is not None and e is not Ellipsis)
ellipsis_offset = 0
+ newaxis_offset = 0
for dim_number, i in enumerate(idx):
try:
abstract_i = core.get_aval(i)
@@ -4791,7 +4792,7 @@ def _expand_bool_indices(idx, shape):
raise TypeError("JAX arrays do not support boolean scalar indices")
else:
i_shape = _shape(i)
- start = len(out) + ellipsis_offset
+ start = len(out) + ellipsis_offset - newaxis_offset
expected_shape = shape[start: start + _ndim(i)]
if i_shape != expected_shape:
raise IndexError("boolean index did not match shape of indexed array in index "
@@ -4801,6 +4802,8 @@ def _expand_bool_indices(idx, shape):
out.append(i)
if i is Ellipsis:
ellipsis_offset = len(shape) - total_dims - 1
+ if i is None:
+ newaxis_offset += 1
return tuple(out)
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -974,6 +974,30 @@ def testBooleanIndexingShapeMismatch(self):
with self.assertRaisesRegex(IndexError, "boolean index did not match shape.*"):
x[idx]
+ def testBooleanIndexingWithNone(self):
+ # Regression test for https://github.com/google/jax/issues/18542
+ x = jnp.arange(6).reshape(2, 3)
+ idx = (None, jnp.array([True, False]))
+ ans = x[idx]
+ expected = jnp.arange(3).reshape(1, 1, 3)
+ self.assertAllClose(ans, expected)
+
+ def testBooleanIndexingWithNoneAndEllipsis(self):
+ # Regression test for https://github.com/google/jax/issues/18542
+ x = jnp.arange(6).reshape(2, 3)
+ mask = jnp.array([True, False, False])
+ ans = x[None, ..., mask]
+ expected = jnp.array([0, 3]).reshape(1, 2, 1)
+ self.assertAllClose(ans, expected)
+
+ def testBooleanIndexingWithEllipsisAndNone(self):
+ # Regression test for https://github.com/google/jax/issues/18542
+ x = jnp.arange(6).reshape(2, 3)
+ mask = jnp.array([True, False, False])
+ ans = x[..., None, mask]
+ expected = jnp.array([0, 3]).reshape(2, 1, 1)
+ self.assertAllClose(ans, expected)
+
def testNontrivialBooleanIndexing(self):
# Test nontrivial corner case in boolean indexing shape validation
rng = jtu.rand_default(self.rng())
| Array indexing: boolean mask after `newaxis` leads to error
Working example in NumPy:
```python
import numpy as np
x = np.arange(6).reshape(2, 3)
mask = np.array([True, True])
x[np.newaxis, mask]
# array([[[0, 1, 2],
# [3, 4, 5]]])
```
Error in JAX:
```python
import jax.numpy as jnp
x = jnp.arange(6).reshape(2, 3)
mask = jnp.array([True, True])
x[jnp.newaxis, mask]
# IndexError: boolean index did not match shape of indexed array in index 1: got (2,), expected (3,)
```
| 2023-11-15T17:03:40 |
|
google/jax | 18,550 | google__jax-18550 | [
"18548"
]
| 946819fc0e54163b8859a5e6d7f64eecbc6e6fe1 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -1089,8 +1089,8 @@ def vmap(fun: F,
Args:
fun: Function to be mapped over additional axes.
- in_axes: An integer, None, or (nested) standard Python container
- (tuple/list/dict) thereof specifying which input array axes to map over.
+ in_axes: An integer, None, or sequence of values specifying which input
+ array axes to map over.
If each positional argument to ``fun`` is an array, then ``in_axes`` can
be an integer, a None, or a tuple of integers and Nones with length equal
@@ -1101,11 +1101,12 @@ def vmap(fun: F,
range ``[-ndim, ndim)`` for each array, where ``ndim`` is the number of
dimensions (axes) of the corresponding input array.
- If the positional arguments to ``fun`` are container (pytree) types, the
- corresponding element of ``in_axes`` can itself be a matching container,
- so that distinct array axes can be mapped for different container
- elements. ``in_axes`` must be a container tree prefix of the positional
- argument tuple passed to ``fun``. See this link for more detail:
+ If the positional arguments to ``fun`` are container (pytree) types, ``in_axes``
+ must be a sequence with length equal to the number of positional arguments to
+ ``fun``, and for each argument the corresponding element of ``in_axes`` can
+ be a container with a matching pytree structure specifying the mapping of its
+ container elements. In other words, ``in_axes`` must be a container tree prefix
+ of the positional argument tuple passed to ``fun``. See this link for more detail:
https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees
Either ``axis_size`` must be provided explicitly, or at least one
@@ -1233,18 +1234,23 @@ def vmap(fun: F,
# rather than raising an error. https://github.com/google/jax/issues/2367
in_axes = tuple(in_axes)
- if not all(type(l) is int or type(l) in batching.spec_types
- for l in tree_leaves(in_axes)):
+ if not (in_axes is None or type(in_axes) in {int, tuple, *batching.spec_types}):
+ raise TypeError("vmap in_axes must be an int, None, or a tuple of entries corresponding "
+ f"to the positional arguments passed to the function, but got {in_axes}.")
+ if not all(type(l) in {int, *batching.spec_types} for l in tree_leaves(in_axes)):
raise TypeError("vmap in_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {in_axes}.")
- if not all(type(l) is int or type(l) in batching.spec_types
- for l in tree_leaves(out_axes)):
+ if not all(type(l) in {int, *batching.spec_types} for l in tree_leaves(out_axes)):
raise TypeError("vmap out_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {out_axes}.")
@wraps(fun, docstr=docstr)
@api_boundary
def vmap_f(*args, **kwargs):
+ if isinstance(in_axes, tuple) and len(in_axes) != len(args):
+ raise ValueError("vmap in_axes must be an int, None, or a tuple of entries corresponding "
+ "to the positional arguments passed to the function, "
+ f"but got {len(in_axes)=}, {len(args)=}")
args_flat, in_tree = tree_flatten((args, kwargs), is_leaf=batching.is_vmappable)
f = lu.wrap_init(fun)
flat_fun, out_tree = batching.flatten_fun_for_vmap(f, in_tree)
diff --git a/jax/_src/api_util.py b/jax/_src/api_util.py
--- a/jax/_src/api_util.py
+++ b/jax/_src/api_util.py
@@ -404,7 +404,6 @@ def flatten_axes(name, treedef, axis_tree, *, kws=False, tupled_args=False):
# the given treedef, build a complete axis spec tree with the same structure
# and return the flattened result
# TODO(mattjj,phawkins): improve this implementation
-
proxy = object()
dummy = tree_unflatten(treedef, [object()] * treedef.num_leaves)
axes = []
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -2981,15 +2981,31 @@ def f(dct, x, y):
out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)
self.assertAllClose(out1, out2)
+ def test_vmap_in_axes_non_tuple_error(self):
+ # https://github.com/google/jax/issues/18548
+ with self.assertRaisesRegex(
+ TypeError,
+ re.escape("vmap in_axes must be an int, None, or a tuple of entries corresponding "
+ "to the positional arguments passed to the function, but got {'a': 0}.")):
+ jax.vmap(lambda x: x['a'], in_axes={'a': 0})
+
+ def test_vmap_in_axes_wrong_length_tuple_error(self):
+ # https://github.com/google/jax/issues/18548
+ with self.assertRaisesRegex(
+ ValueError,
+ re.escape("vmap in_axes must be an int, None, or a tuple of entries corresponding to the "
+ "positional arguments passed to the function, but got len(in_axes)=2, len(args)=1")):
+ jax.vmap(lambda x: x['a'], in_axes=(0, {'a': 0}))({'a': jnp.zeros((3, 3))})
+
def test_vmap_in_axes_tree_prefix_error(self):
# https://github.com/google/jax/issues/795
value_tree = jnp.ones(3)
self.assertRaisesRegex(
ValueError,
"vmap in_axes specification must be a tree prefix of the corresponding "
- r"value, got specification \(0, 0\) for value tree "
+ r"value, got specification \(\[0\],\) for value tree "
+ re.escape(f"{tree_util.tree_structure((value_tree,))}."),
- lambda: api.vmap(lambda x: x, in_axes=(0, 0))(value_tree)
+ lambda: api.vmap(lambda x: x, in_axes=([0],))(value_tree)
)
def test_vmap_in_axes_leaf_types(self):
| vmap in_axes seems to be failing with most pytrees
### Description
According to the doc, I should be able to specify a pytree matching the inputs in the argument in_axes. But even simple pytrees such as native dicts give an error:
```
import jax
import jax.numpy as jnp
def f(x):
return x['a']
input = {'a': jnp.zeros((3, 3))}
jax.vmap(f, in_axes={'a': 0})(input)
```
gives the following error
```
ValueError Traceback (most recent call last)
in <module>()
2 return x['a']
3 input = {'a': jnp.zeros((3, 3))}
----> 4 jax.vmap(f, in_axes={'a': 0})(input)
ValueError: vmap in_axes specification must be a tree prefix of the corresponding value, got specification {'a': 0} for value tree PyTreeDef(({'a': *},)).
```
I believe this is a bug, unless I'm missing something?
### What jax/jaxlib version are you using?
jax 0.4.20, jaxlib 0.4.20
### Which accelerator(s) are you using?
cpu
### Additional system info
on a public google colab
### NVIDIA GPU info
_No response_
| 2023-11-15T19:57:37 |
|
google/jax | 18,682 | google__jax-18682 | [
"18680"
]
| 523f36153f7e39f5bc87a4d9fb346fdeb5122f22 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -4055,7 +4055,8 @@ def _canonicalize_float_for_sort(x):
# and NaNs in the output.
result = select(eq(x, _zero(x)), _zeros(x), x)
- result = select(_isnan(x), full_like(result, np.nan), result)
+ with jax.debug_nans(False):
+ result = select(_isnan(x), full_like(result, np.nan), result)
return result
| NaN Error only in combination of `disable_jit` and `debug_nans`
Is this expected behavior?
(jax+jaxlib 0.4.20)
```python
from jax import config
config.update("jax_debug_nans", True)
config.update('jax_disable_jit', True)
import jax.numpy as jnp
import jax
xp = jnp.array([0. , 0.16 , 0.35 , 0.39999998, 0.53999996,
0.62 , 0.78999996, 0.95 , 1.25 , 1.36 ,
1.43 , 1.5799999 , 1.77 , 1.86 , 1.9499999 ,
2.02 , 2.09 , 2.29 , 2.52 , 2.74 ,
2.8899999 , 3.1499999 , 3.35 , 3.4499998 , 3.59 ,
3.8 , 3.86 , 4.06 , 4.2 , 4.46 ,
4.62 , 4.8599997 , 4.97 , 5.15 , 5.15 ,
])
x = jnp.arange(0, 5.0, step=0.01)
@jax.jit
def f(x, xp):
return jnp.searchsorted(xp, x, side="right")
# throws error only if both config flags are enabled
f(x, xp)
```
throws `FloatingPointError: invalid value (nan) encountered in jit(convert_element_type)`
Edit: Ops sorry i must have clicked on enhancement, but this is a potential bug report.
| As is stated in `jax_debug_nans`'s [limitations](https://jax.readthedocs.io/en/latest/debugging/flags.html#limitations), `jax_debug_nans` could produce
> Errors on false positives (e.g. intentionally created NaNs)
This might be the case for `searchsorted`, because IEEE754 floating-point numbers do not have a total ordering and a correct `searchsorted` should deal with `nan` appropriately. This is done by `nan values are sorted to the end`. Meanwhile, some explicitly-created `nan` might be involved in this process. | 2023-11-27T16:42:08 |
|
google/jax | 18,706 | google__jax-18706 | [
"11479"
]
| c855bb0371fd7df3e2c33c0d153a23299b4f1988 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -49,6 +49,7 @@
from jax._src import api_util
from jax._src import config
from jax._src import core
+from jax._src.custom_derivatives import custom_jvp
from jax._src import dispatch
from jax._src import dtypes
from jax._src.api_util import _ensure_index_tuple
@@ -2644,6 +2645,7 @@ def meshgrid(*xi: ArrayLike, copy: bool = True, sparse: bool = False,
return output
+@custom_jvp
@util._wraps(np.i0)
@jit
def i0(x: ArrayLike) -> Array:
@@ -2653,6 +2655,11 @@ def i0(x: ArrayLike) -> Array:
x_arr = lax.abs(x_arr)
return lax.mul(lax.exp(x_arr), lax.bessel_i0e(x_arr))
[email protected]
+def _i0_jvp(primals, tangents):
+ primal_out, tangent_out = jax.jvp(i0.fun, primals, tangents)
+ return primal_out, where(primals[0] == 0, 0.0, tangent_out)
+
@util._wraps(np.ix_)
def ix_(*args: ArrayLike) -> tuple[Array, ...]:
| diff --git a/tests/lax_numpy_operators_test.py b/tests/lax_numpy_operators_test.py
--- a/tests/lax_numpy_operators_test.py
+++ b/tests/lax_numpy_operators_test.py
@@ -641,6 +641,11 @@ def __rmul__(self, other):
self.assertIsInstance(b * a, MyArray)
self.assertIsInstance(jax.jit(operator.mul)(b, a), MyArray)
+ def testI0Grad(self):
+ # Regression test for https://github.com/google/jax/issues/11479
+ dx = jax.grad(jax.numpy.i0)(0.0)
+ self.assertArraysEqual(dx, 0.0)
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| The gradient of `i0` for input 0 should be 0
Based on the definition of `i0`, its gradient for input 0 should be 0. It is differentiable at the point 0, unlike `abs`
```py
import jax
a = jax.numpy.array([0.])
print(jax.jacrev(jax.numpy.i0)(a))
# [[1.]]
# It should be 0
```
| any update of this bug?
Thanks - sorry this fell through the cracks. It looks like this could be fixed by adding a custom JVP rule to `jax.numpy.i0`. Is this something you'd be interested in contributing? If not we can have someone on the team take care of it. | 2023-11-28T20:24:51 |
google/jax | 18,708 | google__jax-18708 | [
"14901"
]
| 1d269ed364ba34beb001b2a3ba8cfb0683573d79 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2298,7 +2298,12 @@ def _check_forgot_shape_tuple(name, shape, dtype) -> str | None: # type: ignore
def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = False) -> Array:
try:
a1, a2 = asarray(a1), asarray(a2)
- except Exception:
+ except Exception as err:
+ # TODO(jakevdp): Deprecated 2023-11-23; change to error.
+ warnings.warn("Inputs to array_equal() cannot be coerced to array. "
+ "Returning False; in the future this will raise an exception.\n"
+ f"{err!r}",
+ DeprecationWarning, stacklevel=2)
return bool_(False)
if shape(a1) != shape(a2):
return bool_(False)
@@ -2312,7 +2317,12 @@ def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = False) -> Array:
def array_equiv(a1: ArrayLike, a2: ArrayLike) -> Array:
try:
a1, a2 = asarray(a1), asarray(a2)
- except Exception:
+ except Exception as err:
+ # TODO(jakevdp): Deprecated 2023-11-23; change to error.
+ warnings.warn("Inputs to array_equiv() cannot be coerced to array. "
+ "Returning False; in the future this will raise an exception.\n"
+ f"{err!r}",
+ DeprecationWarning, stacklevel=2)
return bool_(False)
try:
eq = ufuncs.equal(a1, a2)
| Should `array_equal` and `array_equiv` enforce array like for their arguments?
Type hints says yes, yet they are permissive in their arguments.
```python
jnp.array_equal(np.array([1, 2, 3]), [1, 2, 3])
# Array(True)
```
Does #7737 apply here?
| Yes, it probably should. We’ve been slow to implement #7737 because every change is an API breakage that needs a deprecation cycle, and that can be difficult to land due to necessary downstream changes. | 2023-11-28T20:56:27 |
|
google/jax | 18,714 | google__jax-18714 | [
"17448"
]
| cb7c2ed848853ef75d7044e395f8b8ee5a737a28 | diff --git a/jax/experimental/sparse/_base.py b/jax/experimental/sparse/_base.py
--- a/jax/experimental/sparse/_base.py
+++ b/jax/experimental/sparse/_base.py
@@ -16,6 +16,7 @@
import abc
from collections.abc import Sequence
import math
+import operator
import jax
from jax._src import core
@@ -44,7 +45,7 @@ def ndim(self) -> int:
return len(self.shape)
def __init__(self, args: tuple[Array, ...], *, shape: Sequence[int]):
- self.shape = tuple(shape)
+ self.shape = core.canonicalize_shape(shape)
def __repr__(self):
name = self.__class__.__name__
| BCOO matrix shape behaves differently than dense matrices
### Description
Not a big issue... but this behavior confused me.
```
import jax.numpy as jnp
from jax import jit
i = jnp.int32(1)
# I was able to fix my issue with BCOO by casting to a python int dtype
# i = int(i)
array_shape = (i,)
m_dense = jnp.ones(shape=array_shape)
m_sparse = BCOO((jnp.array([1]), jnp.array([[0]])), shape=array_shape)
# Should be the same?
print(m_dense.shape) # --> (1,)
print(m_sparse.shape) # --> (Array(1, dtype=int32),)
print(m_sparse.todense().shape) # --> (1,)
jit(lambda _: m_dense[0])(None) # Works fine
jit(lambda _: m_sparse[0])(None) # Throws TracerBoolConversionError
```
### What jax/jaxlib version are you using?
jax==0.4.14 jaxlib==0.4.14+cuda12.cudnn89
### Which accelerator(s) are you using?
GPU
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| Thanks for the report – the shape is meant to be a sequence of integers, not a sequence of arrays: https://github.com/google/jax/blob/7224c24521b4bbd25af3ed3619cd59d377d6828e/jax/experimental/sparse/bcoo.py#L2469
It seems the implementation never validates this, but you may encounter errors if you pass unexpected values! | 2023-11-28T22:39:19 |
|
google/jax | 18,740 | google__jax-18740 | [
"18737"
]
| 57e19db104d545f68ff06df5a5069ea727262ada | diff --git a/jax/experimental/shard_map.py b/jax/experimental/shard_map.py
--- a/jax/experimental/shard_map.py
+++ b/jax/experimental/shard_map.py
@@ -46,7 +46,8 @@
from jax._src.core import Tracer
from jax._src.api import _shared_code_pmap, _prepare_pmap
from jax._src.lax import (lax, parallel as lax_parallel, slicing,
- windowed_reductions, fft, linalg, control_flow)
+ windowed_reductions, convolution, fft, linalg,
+ control_flow)
from jax._src.util import (HashableFunction, HashablePartial, unzip2, unzip3,
as_hashable_function, memoize, partition_list,
merge_lists, split_list, subs_list2)
@@ -966,7 +967,8 @@ def _standard_collective_rewrite(prim, mesh, in_rep, x, axis_name, **params):
for o in it.chain(lax.__dict__.values(), slicing.__dict__.values(),
- windowed_reductions.__dict__.values(), fft.__dict__.values(),
+ windowed_reductions.__dict__.values(),
+ convolution.__dict__.values(), fft.__dict__.values(),
linalg.__dict__.values(), ops.__dict__.values(),
ad_util.__dict__.values(), prng.__dict__.values()):
if isinstance(o, core.Primitive):
| diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -1264,6 +1264,22 @@ def f(x):
jax.grad(loss)(3.0, jnp.arange(8.)) # don't crash
+ def test_conv_general_dilated(self):
+ mesh = jtu.create_global_mesh((4,), ('i',))
+
+ dot = partial(lax.conv_general_dilated, window_strides=(),
+ padding='VALID', dimension_numbers=('NC', 'IO', 'NC'))
+
+ @partial(shard_map, mesh=mesh, in_specs=(P(None, 'i'), P('i', None)),
+ out_specs=P(None, None))
+ def f(x, y):
+ return lax.psum(dot(x, y), 'i')
+
+ a = jnp.ones((16, 32))
+ b = jnp.ones((32, 8))
+ y = f(a, b) # don't crash
+ self.assertAllClose(y, a @ b, check_dtypes=False, atol=1e-2, rtol=1e-2)
+
class FunSpec(NamedTuple):
name: str
| Add replication rules for convolutions
### Description
I'm running with shard_map in an attempt to replicate my previous pmap setup and am getting the following:
```
error:NotImplementedError: No replication rule for conv_general_dilated. As a workaround, pass the `check_rep=False` argument to `shard_map`. To get this fixed, open an issue at https://github.com/google/jax/issues
```
### What jax/jaxlib version are you using?
0.4.20 0.4.18
### Which accelerator(s) are you using?
CPU/GPU/TPU
### Additional system info?
_No response_
### NVIDIA GPU info
_No response_
| 2023-11-30T00:59:16 |
|
google/jax | 18,797 | google__jax-18797 | [
"18792"
]
| a137edc4f30d2abecd8c6f095b848e6caf2a1de2 | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -58,6 +58,14 @@ def gamma(x: ArrayLike) -> Array:
)(_betaln_impl)
+@_wraps(osp_special.factorial, module='scipy.special')
+def factorial(n: ArrayLike, exact: bool = False) -> Array:
+ if exact:
+ raise NotImplementedError("factorial with exact=True")
+ n, = promote_args_inexact("factorial", n)
+ return jnp.where(n < 0, 0, lax.exp(lax.lgamma(n + 1)))
+
+
@_wraps(osp_special.beta, module='scipy.special')
def beta(x: ArrayLike, y: ArrayLike) -> Array:
x, y = promote_args_inexact("beta", x, y)
diff --git a/jax/scipy/special.py b/jax/scipy/special.py
--- a/jax/scipy/special.py
+++ b/jax/scipy/special.py
@@ -30,6 +30,7 @@
expi as expi,
expit as expit,
expn as expn,
+ factorial as factorial,
gammainc as gammainc,
gammaincc as gammaincc,
gammaln as gammaln,
| diff --git a/tests/lax_scipy_special_functions_test.py b/tests/lax_scipy_special_functions_test.py
--- a/tests/lax_scipy_special_functions_test.py
+++ b/tests/lax_scipy_special_functions_test.py
@@ -89,6 +89,9 @@ def op_record(name, nargs, dtypes, rng_factory, test_grad, nondiff_argnums=(), t
op_record(
"gammaln", 1, float_dtypes, jtu.rand_positive, False
),
+ op_record(
+ "factorial", 1, float_dtypes, jtu.rand_default, True
+ ),
op_record(
"i0", 1, float_dtypes, jtu.rand_default, True
),
| Add factorial to `jax.scipy.special`
It would be useful to have the `scipy.special.factorial` function, even just to have the floating point approximate form from the gamma function.
```
def factorial(n):
return gamma (n + 1)
```
Thanks!
Please:
- [x] Check for duplicate requests.
- [ ] Describe your goal, and if possible provide a code snippet with a motivating example.
| Makes sense – we could probably implement `exact=True` as well by pre-computing the 13 or so factorials that are representable in int32. | 2023-12-04T14:14:22 |
google/jax | 18,845 | google__jax-18845 | [
"18802"
]
| eba08ed74de2a28bf8692aad50d322702bf88fec | diff --git a/jax/_src/core.py b/jax/_src/core.py
--- a/jax/_src/core.py
+++ b/jax/_src/core.py
@@ -3086,7 +3086,10 @@ def _pp_eqn(eqn, context, settings) -> pp.Doc:
rhs = [pp.text(eqn.primitive.name, annotation=name_stack_annotation),
pp_kv_pairs(sorted(eqn.params.items()), context, settings),
pp.text(" ") + pp_vars(eqn.invars, context)]
- return pp.concat([lhs, pp.text(" = ", annotation=annotation), *rhs])
+ if lhs.format():
+ return pp.concat([lhs, pp.text(" = ", annotation=annotation), *rhs])
+ else:
+ return pp.concat(rhs)
CustomPpEqnRule = Callable[[JaxprEqn, JaxprPpContext, JaxprPpSettings], pp.Doc]
pp_eqn_rules: dict[Primitive, CustomPpEqnRule] = {}
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -6197,6 +6197,14 @@ def test_convert_element_type_literal_constant_folding(self):
jaxpr = api.make_jaxpr(lambda: cet(3.))()
self.assertLen(jaxpr.eqns, 0)
+ def test_eqn_repr_with_no_lhs(self):
+ def f(x):
+ jax.debug.print("{}", x)
+ return x
+ jaxpr = jax.make_jaxpr(f)(np.int32(0))
+ self.assertEqual(jaxpr.eqns[0].primitive, jax._src.debugging.debug_callback_p)
+ self.assertStartsWith(str(jaxpr.eqns[0]), "debug_callback[", )
+
class DCETest(jtu.JaxTestCase):
| `debug.print` looks strange in jaxpr representation
```python
import jax
def f(x):
jax.debug.print("{}", x)
return x
jax.make_jaxpr(f)(1)
```
```
{ lambda ; a:i32[]. let
= debug_callback[
callback=<function debug_callback.<locals>._flat_callback at 0x7c30cf833ac0>
effect=Debug
] a
in (a,) }
```
Should there be a variable (or a `_`) in front of the `=` sign? Or should the `=` sign be removed?
| 2023-12-06T18:45:59 |
|
google/jax | 18,854 | google__jax-18854 | [
"18851"
]
| 5bdc3035706887ba5dd3e6244d815cc27654dd72 | diff --git a/jax/experimental/shard_map.py b/jax/experimental/shard_map.py
--- a/jax/experimental/shard_map.py
+++ b/jax/experimental/shard_map.py
@@ -48,7 +48,7 @@
from jax._src.api import _shared_code_pmap, _prepare_pmap
from jax._src.lax import (lax, parallel as lax_parallel, slicing,
windowed_reductions, convolution, fft, linalg,
- control_flow)
+ special, control_flow)
from jax._src.util import (HashableFunction, HashablePartial, unzip2, unzip3,
as_hashable_function, memoize, partition_list,
merge_lists, split_list, subs_list2,
@@ -909,9 +909,10 @@ def _standard_collective_rewrite(prim, mesh, in_rep, x, axis_name, **params):
for o in it.chain(lax.__dict__.values(), slicing.__dict__.values(),
windowed_reductions.__dict__.values(),
- convolution.__dict__.values(), fft.__dict__.values(),
- linalg.__dict__.values(), ops.__dict__.values(),
- ad_util.__dict__.values(), prng.__dict__.values()):
+ special.__dict__.values(), convolution.__dict__.values(),
+ fft.__dict__.values(), linalg.__dict__.values(),
+ ops.__dict__.values(), ad_util.__dict__.values(),
+ prng.__dict__.values()):
if isinstance(o, core.Primitive):
register_standard_check(o)
register_standard_rewrite(o)
| diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -1298,6 +1298,12 @@ def test_custom_jvp_inside_jit(self):
mesh=mesh, in_specs=P('batch'),
out_specs=P('batch'))(jnp.arange(16.)) # don't crash
+ def test_random_normal_rules(self):
+ mesh = jtu.create_global_mesh((4,), ('i',))
+ keys = jax.random.split(jax.random.key(0), 4)
+ shard_map(lambda k: jax.random.normal(k[0], (1,)),
+ mesh=mesh, in_specs=P('i'), out_specs=P('i'))(keys) # don't crash
+
class FunSpec(NamedTuple):
name: str
| `shard_map` throws a `NotImplementedError: No replication rule for erf_inv`
### Description
`shard_map` throws a `NotImplementedError: No replication rule for erf_inv`
Repro:
```
# Create a mesh and annotate the axis with a name.
device_mesh = mesh_utils.create_device_mesh((8,))
mesh = Mesh(devices=device_mesh, axis_names=('data',))
data_sharding = NamedSharding(mesh, PartitionSpec('data',))
class Model(nn.Module):
@nn.compact
def __call__(self, x, add_noise):
x = nn.Dense(1)(x)
# use jnp.where for control flow; for more details see: https://jax.readthedocs.io/en/latest/errors.html#jax.errors.TracerBoolConversionError
return jnp.where(
add_noise, x + jax.random.normal(self.make_rng('params'), x.shape), x
)
module = Model()
init_rng, apply_rng = jax.random.split(jax.random.key(0))
# get 8 different rng's that will be used by the 8 devices when doing forward inference
apply_rng = jax.random.split(apply_rng, 8)
x = jnp.ones((8, 1))
variables = module.init(init_rng, x, False)
def forward(variables, x, add_noise, rng_key_batch):
# rng_key_batch is a batch of size 1 containing 1 PRNG key
# index slice into the rng_key_batch to access the PRNG key
return module.apply(
variables, x, add_noise, rngs={'params': rng_key_batch[0]}
)
# define partition specifications
data_pspec = PartitionSpec('data')
no_pspec = PartitionSpec()
# shard the inputs x and rng keys across devices
# replicate the variables and add_noise boolean across devices
# shard the output across devices
# set `check_rep=False`, otherwise we get a Jax error
shmap_forward = shard_map(
forward,
mesh=mesh,
in_specs=(no_pspec, data_pspec, no_pspec, data_pspec),
out_specs=data_pspec,
)
out = shmap_forward(variables, x, True, apply_rng)
```
If we add `check_rep=False`, we don't get an error:
```
shmap_forward = shard_map(
forward,
mesh=mesh,
in_specs=(no_pspec, data_pspec, no_pspec, data_pspec),
out_specs=data_pspec,
check_rep=False,
)
```
### What jax/jaxlib version are you using?
0.4.21
### Which accelerator(s) are you using?
CPU
### Additional system info?
_No response_
### NVIDIA GPU info
_No response_
| 2023-12-06T23:06:22 |
|
google/jax | 18,946 | google__jax-18946 | [
"18937"
]
| f210b0f95a15181d37803c669fa91fe0fc4f9cb7 | diff --git a/jax/_src/core.py b/jax/_src/core.py
--- a/jax/_src/core.py
+++ b/jax/_src/core.py
@@ -2102,6 +2102,9 @@ def _canonicalize_dimension(dim: DimSize) -> DimSize:
except TypeError as e:
type_error = e
if isinstance(dim, Tracer) and config.dynamic_shapes.value:
+ if not (dim.ndim == 0 and (dtypes.issubdtype(dim.dtype, np.integer)
+ or isinstance(dim.dtype, bint))):
+ raise TypeError(f"Dimensions must be integer scalars; got {dim.ndim=} {dim.dtype=}")
return dim
elif (config.dynamic_shapes.value and isinstance(dim, DArray) and
type(dim._aval.dtype) is bint and not dim._aval.shape):
@@ -2138,11 +2141,16 @@ def canonicalize_dim(d: DimSize, context: str="") -> DimSize:
return canonicalize_shape((d,), context)[0]
def _invalid_shape_error(shape: Shape, context: str=""):
- msg = ("Shapes must be 1D sequences of concrete values of integer type, "
- f"got {shape}.")
+ if config.dynamic_shapes.value:
+ msg = ("Shapes must be 1D sequences of integer scalars, "
+ f"got {shape}")
+ else:
+ msg = ("Shapes must be 1D sequences of concrete values of integer type, "
+ f"got {shape}.")
if context:
msg += f" {context}."
- if any(isinstance(x, Tracer) and isinstance(get_aval(x), ShapedArray)
+ if not config.dynamic_shapes.value and any(
+ isinstance(x, Tracer) and isinstance(get_aval(x), ShapedArray)
and not isinstance(get_aval(x), ConcreteArray) for x in shape):
msg += ("\nIf using `jit`, try using `static_argnums` or applying `jit` to "
"smaller subfunctions.")
| diff --git a/tests/dynamic_api_test.py b/tests/dynamic_api_test.py
--- a/tests/dynamic_api_test.py
+++ b/tests/dynamic_api_test.py
@@ -621,6 +621,14 @@ def test_flattening_basic(self):
jaxpr = jax.make_jaxpr(lambda x: x.reshape(-1, 12), abstracted_axes={0: 'n'})(x)
self.assertLessEqual(len(jaxpr.jaxpr.eqns), 3)
+ def test_shape_validation(self):
+ # Regression test for https://github.com/google/jax/issues/18937
+ msg = r"Shapes must be 1D sequences of integer scalars, got .+"
+ with self.assertRaisesRegex(TypeError, msg):
+ jax.make_jaxpr(jnp.ones)(5.0)
+ with self.assertRaisesRegex(TypeError, msg):
+ jax.make_jaxpr(jnp.ones)(jnp.ones((2, 2)))
+
@unittest.skip("Test does not work with jax.Array")
@jtu.with_config(jax_dynamic_shapes=True, jax_numpy_rank_promotion="allow")
class DynamicShapeAutodiffTest(jtu.JaxTestCase):
| [jax_dynamic_shapes] specifying shapes with floating point abstract values does not raise a TypeError
### Description
The following example gets to produce valid StableHLO but should fail during tracing. Note that the StableHLO is valid for alternative lowering strategies. XLA's lowering strategy will raise an error during compilation.
```python
import jax
jax.config.update("jax_enable_x64", True)
jax.config.update("jax_platform_name", "cpu")
jax.config.update("jax_dynamic_shapes", True)
@jax.jit
def test_this(size):
return jax.numpy.ones([size]) # <-- size is a float
print(test_this(1.0))
```
Normally without dynamic_shapes the following error is raised:
```python
import jax
jax.config.update("jax_enable_x64", True)
jax.config.update("jax_platform_name", "cpu")
@jax.jit
def test_this(size):
return jax.numpy.ones([size]) # <-- size is a float
print(test_this(1.0))
```
```
Traceback (most recent call last):
File "/home/erick.ochoalopez/Code/cataliist/test.py", line 10, in <module>
print(test_this(1.0))
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/traceback_util.py", line 177, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/pjit.py", line 255, in cache_miss
outs, out_flat, out_tree, args_flat, jaxpr = _python_pjit_helper(
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/pjit.py", line 161, in _python_pjit_helper
args_flat, _, params, in_tree, out_tree, _, _, _ = infer_params_fn(
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/api.py", line 317, in infer_params
return pjit.common_infer_params(pjit_info_args, *args, **kwargs)
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/pjit.py", line 491, in common_infer_params
jaxpr, consts, canonicalized_out_shardings_flat, out_layouts_flat = _pjit_jaxpr(
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/pjit.py", line 989, in _pjit_jaxpr
jaxpr, final_consts, out_type = _create_pjit_jaxpr(
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/linear_util.py", line 349, in memoized_fun
ans = call(fun, *args)
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/pjit.py", line 934, in _create_pjit_jaxpr
jaxpr, global_out_avals, consts = pe.trace_to_jaxpr_dynamic(
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/profiler.py", line 334, in wrapper
return func(*args, **kwargs)
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/interpreters/partial_eval.py", line 2283, in trace_to_jaxpr_dynamic
jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/interpreters/partial_eval.py", line 2305, in trace_to_subjaxpr_dynamic
ans = fun.call_wrapped(*in_tracers_)
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/linear_util.py", line 191, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/home/erick.ochoalopez/Code/cataliist/test.py", line 9, in test_this
return jax.numpy.ones([size])
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py", line 2291, in ones
shape = canonicalize_shape(shape)
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py", line 82, in canonicalize_shape
return core.canonicalize_shape(shape, context) # type: ignore
File "/home/erick.ochoalopez/Code/env/lib/python3.10/site-packages/jax/_src/core.py", line 2077, in canonicalize_shape
raise _invalid_shape_error(shape, context)
TypeError: Shapes must be 1D sequences of concrete values of integer type, got [Traced<ShapedArray(float64[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>].
If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.
The error occurred while tracing the function test_this at /home/erick.ochoalopez/Code/cataliist/test.py:7 for jit. This concrete value was not available in Python because it depends on the value of the argument size.
```
I've narrowed down the error to [`_canonicalize_dimension`](https://github.com/google/jax/blob/b077483bfaaf197b79717a86bee3e626474e93f2/jax/_src/core.py#L2056C1-L2070C21) inlined below:
```python
def _canonicalize_dimension(dim: DimSize) -> DimSize:
# Dimensions are most commonly integral (by far), so we check that first.
try:
return operator.index(dim)
except TypeError as e:
type_error = e
if isinstance(dim, Tracer) and config.dynamic_shapes.value: # This check is not sufficient. It should also check that the tracer is of integer type.
return dim
elif (config.dynamic_shapes.value and isinstance(dim, DArray) and
type(dim._aval.dtype) is bint and not dim._aval.shape):
return dim
elif is_dim(dim):
return dim
else:
raise type_error
```
Happy to submit a PR perhaps on the next year.
### What jax/jaxlib version are you using?
0.4.21
### Which accelerator(s) are you using?
CPU
### Additional system info?
1.26.1 3.10.11 (main, May 13 2023, 12:07:51) [GCC 9.4.0] uname_result(system='Linux', node='DL7420-GS4N1J3', release='5.15.0-88-generic', version='#98~20.04.1-Ubuntu SMP Mon Oct 9 16:43:45 UTC 2023', machine='x86_64')
### NVIDIA GPU info
_No response_
| 2023-12-12T20:57:06 |
|
google/jax | 18,961 | google__jax-18961 | [
"18955"
]
| 77c08b4adaedf8404997e5ee76cd0f5ed751a1df | diff --git a/jax/experimental/shard_map.py b/jax/experimental/shard_map.py
--- a/jax/experimental/shard_map.py
+++ b/jax/experimental/shard_map.py
@@ -917,6 +917,12 @@ def _standard_collective_rewrite(prim, mesh, in_rep, x, axis_name, **params):
register_standard_check(o)
register_standard_rewrite(o)
+for p in [control_flow.loops.cumsum_p, control_flow.loops.cumlogsumexp_p,
+ control_flow.loops.cumprod_p, control_flow.loops.cummax_p,
+ control_flow.loops.cummin_p]:
+ register_standard_check(p)
+ register_standard_rewrite(p)
+
@register_check(lax_parallel.psum_p)
def _psum_check(_, *in_rep, axes, axis_index_groups):
| diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -1294,6 +1294,12 @@ def f(x, y):
y = f(a, b) # don't crash
self.assertAllClose(y, a @ b, check_dtypes=False, atol=1e-2, rtol=1e-2)
+ def test_cumsum(self):
+ mesh = jtu.create_global_mesh((4,), ('i',))
+ x = jnp.arange(8.)
+ shard_map(jnp.cumsum, mesh=mesh, in_specs=P('i'), out_specs=P('i')
+ )(x) # don't crash
+
def test_custom_jvp_inside_jit(self):
mesh = jtu.create_global_mesh((4,), ('batch',))
x = shard_map(jax.jit(jax.nn.relu),
| Shard map + CumSum/conv_general_dilated missing replication rule
I stumbled upon
> NotImplementedError: No replication rule for cumsum. As a workaround, pass the `check_rep=False` argument to `shard_map`. To get this fixed, open an issue at https://github.com/google/jax/issues (edited)
And in another code
> No replication rule for conv_general_dilated. As a workaround, pass the `check_rep=False` argument to `shard_map`. To get this fixed, open an issue at
Do you need reproducers?
| Nope, I suspect the error is self-explanatory to @mattjj
Thanks for reporting this! No need for a reproducer. I'm surprised I missed conv_general_dilated...
What's your JAX version? I think #18740 added `conv_general_dilated` two weeks ago on HEAD. | 2023-12-13T17:54:33 |
google/jax | 19,034 | google__jax-19034 | [
"19011"
]
| cc2a3eb5641a1dc65f958efba819d3c22519d4eb | diff --git a/jax/_src/dtypes.py b/jax/_src/dtypes.py
--- a/jax/_src/dtypes.py
+++ b/jax/_src/dtypes.py
@@ -100,6 +100,13 @@ def type(self) -> type: ...
_float8_e5m2_dtype: np.dtype = np.dtype(float8_e5m2)
_float8_e5m2fnuz_dtype: np.dtype = np.dtype(float8_e5m2fnuz)
+def supports_inf(dtype: DTypeLike) -> bool:
+ """Return true if the dtype supports infinity, else return False."""
+ typ = np.dtype(dtype).type
+ if typ in {float8_e4m3b11fnuz, float8_e4m3fn, float8_e4m3fnuz, float8_e5m2fnuz}:
+ return False
+ return issubdtype(dtype, np.inexact)
+
# bfloat16 support
bfloat16: type[np.generic] = ml_dtypes.bfloat16
_bfloat16_dtype: np.dtype = np.dtype(bfloat16)
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -1085,7 +1085,8 @@ def _get_prod_identity(dtype: DTypeLike) -> np.ndarray:
def _get_max_identity(dtype: DTypeLike) -> np.ndarray:
if dtypes.issubdtype(dtype, np.inexact):
- return np.array(-np.inf, dtype)
+ return np.array(-np.inf if dtypes.supports_inf(dtype) else dtypes.finfo(dtype).min,
+ dtype=dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
@@ -1095,7 +1096,8 @@ def _get_max_identity(dtype: DTypeLike) -> np.ndarray:
def _get_min_identity(dtype: DTypeLike) -> np.ndarray:
if dtypes.issubdtype(dtype, np.inexact):
- return np.array(np.inf, dtype)
+ return np.array(np.inf if dtypes.supports_inf(dtype) else dtypes.finfo(dtype).max,
+ dtype=dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py
--- a/jax/_src/numpy/reductions.py
+++ b/jax/_src/numpy/reductions.py
@@ -167,6 +167,10 @@ def _reduction_init_val(a: ArrayLike, init_val: Any) -> np.ndarray:
a_dtype = dtypes.canonicalize_dtype(dtypes.dtype(a))
if a_dtype == 'bool':
return np.array(init_val > 0, dtype=a_dtype)
+ if (np.isinf(init_val) and dtypes.issubdtype(a_dtype, np.floating)
+ and not dtypes.supports_inf(a_dtype)):
+ init_val = np.array(dtypes.finfo(a_dtype).min if np.isneginf(init_val)
+ else dtypes.finfo(a_dtype).max, dtype=a_dtype)
try:
return np.array(init_val, dtype=a_dtype)
except OverflowError:
| diff --git a/jax/_src/public_test_util.py b/jax/_src/public_test_util.py
--- a/jax/_src/public_test_util.py
+++ b/jax/_src/public_test_util.py
@@ -93,8 +93,14 @@ def _assert_numpy_allclose(a, b, atol=None, rtol=None, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
return
- custom_float_dtypes = [_dtypes.float8_e4m3b11fnuz, _dtypes.float8_e4m3fn,
- _dtypes.float8_e5m2, _dtypes.bfloat16]
+ custom_float_dtypes = [
+ _dtypes.float8_e4m3b11fnuz,
+ _dtypes.float8_e4m3fn,
+ _dtypes.float8_e4m3fnuz,
+ _dtypes.float8_e5m2,
+ _dtypes.float8_e5m2fnuz,
+ _dtypes.bfloat16,
+ ]
def maybe_upcast(x):
if x.dtype in custom_float_dtypes:
return x.astype(np.float32)
diff --git a/tests/lax_numpy_reducers_test.py b/tests/lax_numpy_reducers_test.py
--- a/tests/lax_numpy_reducers_test.py
+++ b/tests/lax_numpy_reducers_test.py
@@ -46,6 +46,7 @@
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
+custom_float_dtypes = jtu.dtypes.custom_floats
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
@@ -113,8 +114,8 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, [],
tolerance={jnp.bfloat16: 2e-2}),
- op_record("max", 1, all_dtypes, all_shapes, jtu.rand_default, []),
- op_record("min", 1, all_dtypes, all_shapes, jtu.rand_default, []),
+ op_record("max", 1, all_dtypes + custom_float_dtypes, all_shapes, jtu.rand_default, []),
+ op_record("min", 1, all_dtypes + custom_float_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanprod", 1, inexact_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("nansum", 1, inexact_dtypes, all_shapes, jtu.rand_default, [],
tolerance={jnp.bfloat16: 3e-2}),
| jnp.min or jnp.max returns nan if input array dtype is fp8
### Description
Below is a toy example to reproduce. Can be reproduced with CPU, TPU and GPU. Tested on colab.
```python
import jax
import jax.numpy as jnp
import jax.lax as lax
# Define a function to generate random arrays
def random_array(shape):
return jax.random.uniform(jax.random.PRNGKey(0), shape, minval=-1, maxval=1
test = random_array((2,))
print(test, test.astype(jnp.float8_e4m3fn))
print(jnp.isnan(test))
print(jnp.min(test), jnp.max(test))
print(jnp.isnan(test.astype(jnp.float8_e4m3fn)))
print(jnp.min(test.astype(jnp.float8_e4m3fn)), jnp.max(test.astype(jnp.float8_e4m3fn)))
[0.21629536 0.8041241 ] [0.21875 0.8125]
[False False]
0.21629536 0.8041241
[False False]
nan nan
```
### What jax/jaxlib version are you using?
jax v0.4.22
### Which accelerator(s) are you using?
CPU/GPU/TPU
### Additional system info?
1.26.2 3.11.6 (stable, redacted, redacted) [Clang google3-trunk (af7a1453526a88a0e242baf156244aa4ae42ae4b)] uname_result(system='Linux', node='11a0354b588110fa-30029b6032f.borgtask.google.com', release='5.10.0-smp-1100.464.0.0', version='#1 [v5.10.0-1100.464.0.0] SMP @1700037056', machine='x86_64')
### NVIDIA GPU info
```
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 525.85.12 Driver Version: 525.85.12 CUDA Version: 12.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla V100-SXM2... Off | 00000000:B3:00.0 Off | 0 |
| N/A 40C P0 72W / 300W | 12449MiB / 16384MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| 0 N/A N/A 88841 C .../mount/server/ml_notebook 12446MiB |
+-----------------------------------------------------------------------------+
```
| Hi, thanks for the question! 8-bit floating point types are experimental and in general you should not expect them to be compatible with most operations in JAX (we focus on matrix multiplication and a few related operations that `float8*` were designed for).
I believe the particular behavior here is caused by the fact that `float8_e4m3fn` cannot represent infinities (the "`f`" in the type name stands for "finite only") and the `min` and `max` computation assume the presence of $\pm\infty$ as the identity for the reduction.
With that in mind, this working as expected for the time being – we could certainly modify the `reduce_min`/`reduce_max` lowering code to account for this special case, but it's only one of many examples where `float8` inputs will not be correctly handled in JAX, and it's not clear at the moment that addressing those cases more comprehensively is a goal for the project.
If you are doing general floating point operations, you should stick with non-experimental dtypes. For this particular application, you might try `float8_e5m2` which is able to represent infinite values.
So thinking more about this, we probably should make `min` and `max` do something sane in this case, and we need to do a better job of explicitly erroring for unsupported fp8 operations. I'm going to take a look | 2023-12-18T21:38:25 |
google/jax | 19,061 | google__jax-19061 | [
"19059"
]
| 9d493997a9cf1493dd4a91f3c8e499711182c95c | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -2382,7 +2382,9 @@ def _convert_elt_type_folding_rule(consts, eqn):
if (type(c) in {np.ndarray, *dtypes.python_scalar_dtypes} and
isinstance(o.aval, core.UnshapedArray) and not np.shape(c) and
not dtypes.issubdtype(eqn.params['new_dtype'], dtypes.extended)):
- out = np.array(c, eqn.params['new_dtype'])
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore', np.ComplexWarning)
+ out = np.array(c).astype(eqn.params['new_dtype'])
if not o.aval.weak_type:
return [out], None
out = out.item()
| diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2712,6 +2712,27 @@ def f(x):
jax.hessian(f)(1.0) # don't crash
+ def test_constant_folding_complex_to_real_scan_regression(self):
+ # regression test for github.com/google/jax/issues/19059
+ def g(hiddens):
+ hiddens_aug = jnp.vstack((hiddens[0], hiddens))
+ new_hiddens = hiddens_aug.copy()
+ diff = new_hiddens[:-1] - hiddens
+ diff = new_hiddens[:-1] - hiddens
+ out = jnp.trace(jnp.conj(diff).T @ diff).real
+ return jnp.array(out, dtype=jnp.complex64)
+
+
+ def _step(carry, arg):
+ primals, f_vjp = jax.vjp(
+ g,
+ jax.random.normal(jax.random.key(0), (9, 8), dtype=jnp.complex64),
+ )
+ out = f_vjp(np.array(1.0 + 0j, 'complex64'))[0]
+ return carry, carry
+
+ a, b = jax.lax.scan(_step, 0, jnp.arange(4, dtype=jnp.complex64))
+
class LazyConstantTest(jtu.JaxTestCase):
def _Check(self, make_const, expected):
| `float()` argument `TypeError` in a scan.
### Discussed in https://github.com/google/jax/discussions/19057
<div type='discussions-op-text'>
<sup>Originally posted by **xaviergonzalez** December 19, 2023</sup>
I have a really weird jax bug with scan, I'm wondering if anyone else has seen similar issues?
Here's a minimal reproducible example:
```
import jax
import jax.numpy as jnp
import jax.random as jr
def g(hiddens):
hiddens_aug = jnp.vstack((hiddens[0], hiddens))
new_hiddens = hiddens_aug.copy()
diff = new_hiddens[:-1] - hiddens
diff = new_hiddens[:-1] - hiddens
out = jnp.trace(jnp.conj(diff).T @ diff).real
return jnp.array(out, dtype=jnp.complex64)
def _step(carry, arg):
primals, f_vjp = jax.vjp(
g,
jr.normal(jr.PRNGKey(0), (9, 8), dtype=jnp.complex64),
)
out = f_vjp(1.0 + 0j)[0]
return carry, carry
```
So far, so good. I've defined a function g, that basically maps from \C^8 to \R (casted to be complex for typing reasons), and used vjp to differentiate it (grad behaves a bit weird with these functions). Now, I'd just like to do a lot of these iterations with a scan.
The following code works completely fine
```
primals, f_vjp = jax.vjp(g, jr.normal(jr.PRNGKey(0), (9,8), dtype=jnp.complex64))
out = f_vjp(1.0 + 0j)[0]
```
out has shape (9,8), and dtype complex64, as desired.
Note that this code are basically the two lines of the _step, except they aren't even returned.
However, the following code returns a TypeError
```
a, b = jax.lax.scan(_step, 0, jnp.arange(4, dtype=jnp.complex64))
```
Gives error:
```
TypeError Traceback (most recent call last)
<ipython-input-4-e5de119df33f> in <cell line: 1>()
----> 1 a, b = jax.lax.scan(_step, 0, jnp.arange(4, dtype=jnp.complex64))
[... skipping hidden 10 frame]
/usr/local/lib/python3.10/dist-packages/jax/_src/lax/lax.py in _convert_elt_type_folding_rule(consts, eqn)
2375 isinstance(o.aval, core.UnshapedArray) and not np.shape(c) and
2376 not dtypes.issubdtype(eqn.params['new_dtype'], dtypes.extended)):
-> 2377 out = np.array(c, eqn.params['new_dtype'])
2378 if not o.aval.weak_type:
2379 return [out], None
TypeError: float() argument must be a string or a real number, not 'complex'
```
Notice that `_step` doesn't even return anything relating to its intermediate computations! Moreover, I can see that when I comment out the line
```
out = f_vjp(1.0 + 0j)[0]
```
the error goes away.
I'm really at a loss as to why this line is completely fine outside of a scan, but crashes the scan, even though it is unrelated to either the input or output of `_step`. I've really been struggling to debug inside of the scan.
If anyone has seen this type of error before, or even just has tips about how to debug inside of a scan, I'd be super grateful to hear them.</div>
| We're basically doing this inside a constant folding step:
```python
import numpy as np
np.array(1+0j, np.dtype('float32'))
```
which gives
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[2], line 1
----> 1 np.array(1+0j, np.dtype('float32'))
TypeError: float() argument must be a string or a real number, not 'complex'
```
So our constant folding logic is wrong, just in that apparently NumPy doesn't like when we try to ask for a float32 array containing a complex (builtin) type. | 2023-12-20T04:56:10 |
google/jax | 19,072 | google__jax-19072 | [
"19066"
]
| 965fefdbbf040581b09490b2fa1fed31ac89bb50 | diff --git a/jax/_src/numpy/vectorize.py b/jax/_src/numpy/vectorize.py
--- a/jax/_src/numpy/vectorize.py
+++ b/jax/_src/numpy/vectorize.py
@@ -11,7 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import annotations
+from collections.abc import Collection, Sequence
import functools
import re
from typing import Any, Callable
@@ -158,25 +160,27 @@ def wrapped(*args):
return wrapped
-def _apply_excluded(func, excluded, args):
+def _apply_excluded(func: Callable[..., Any],
+ excluded: Collection[int | str],
+ args: Sequence[Any],
+ kwargs: dict[str, Any]) -> tuple[Callable[..., Any], Sequence[Any], dict[str, Any]]:
"""Partially apply positional arguments in `excluded` to a function."""
if not excluded:
- return func, args
-
- if max(excluded) >= len(args):
- raise ValueError("excluded={!r} is invalid for {!r} argument(s)"
- .format(excluded, len(args)))
+ return func, args, kwargs
dynamic_args = [arg for i, arg in enumerate(args) if i not in excluded]
- static_args = [(i, args[i]) for i in sorted(excluded)]
+ dynamic_kwargs = {key: val for key, val in kwargs.items() if key not in excluded}
+ static_args = [(i, args[i]) for i in sorted(e for e in excluded if isinstance(e, int))
+ if i < len(args)]
+ static_kwargs = {key: val for key, val in kwargs.items() if key in excluded}
- def new_func(*args):
+ def new_func(*args, **kwargs):
args = list(args)
for i, arg in static_args:
args.insert(i, arg)
- return func(*args)
+ return func(*args, **kwargs, **static_kwargs)
- return new_func, dynamic_args
+ return new_func, dynamic_args, dynamic_kwargs
def vectorize(pyfunc, *, excluded=frozenset(), signature=None):
@@ -252,17 +256,17 @@ def vectorize(pyfunc, *, excluded=frozenset(), signature=None):
Traceback (most recent call last):
TypeError: dot_general requires contracting dimensions to have the same shape, got [3] and [4].
"""
- if any(not isinstance(exclude, int) for exclude in excluded):
- raise TypeError("jax.numpy.vectorize can only exclude integer arguments, "
+ if any(not isinstance(exclude, (str, int)) for exclude in excluded):
+ raise TypeError("jax.numpy.vectorize can only exclude integer or string arguments, "
"but excluded={!r}".format(excluded))
- if excluded and min(excluded) < 0:
+ if any(isinstance(e, int) and e < 0 for e in excluded):
raise ValueError(f"excluded={excluded!r} contains negative numbers")
@functools.wraps(pyfunc)
- def wrapped(*args):
+ def wrapped(*args, **kwargs):
error_context = ("on vectorized function with excluded={!r} and "
"signature={!r}".format(excluded, signature))
- excluded_func, args = _apply_excluded(pyfunc, excluded, args)
+ excluded_func, args, kwargs = _apply_excluded(pyfunc, excluded, args, kwargs)
if signature is not None:
input_core_dims, output_core_dims = _parse_gufunc_signature(signature)
@@ -274,7 +278,7 @@ def wrapped(*args):
if any(none_args):
if any(input_core_dims[i] != () for i in none_args):
raise ValueError(f"Cannot pass None at locations {none_args} with {signature=}")
- excluded_func, args = _apply_excluded(excluded_func, none_args, args)
+ excluded_func, args, _ = _apply_excluded(excluded_func, none_args, args, {})
input_core_dims = [dim for i, dim in enumerate(input_core_dims) if i not in none_args]
args = tuple(map(jnp.asarray, args))
| diff --git a/tests/lax_numpy_vectorize_test.py b/tests/lax_numpy_vectorize_test.py
--- a/tests/lax_numpy_vectorize_test.py
+++ b/tests/lax_numpy_vectorize_test.py
@@ -15,6 +15,7 @@
from functools import partial
from absl.testing import absltest
+import numpy as np
import jax
from jax import numpy as jnp
@@ -166,20 +167,33 @@ def f(x, y):
self.assertAllClose(x, f(x, 'foo'))
self.assertAllClose(x, jax.jit(f, static_argnums=1)(x, 'foo'))
+ def test_exclude_kwargs(self):
+ @partial(np.vectorize, excluded=(2, 'func'))
+ def f_np(x, y, func=np.add):
+ assert np.ndim(x) == np.ndim(y) == 0
+ return func(x, y)
+
+ @partial(jnp.vectorize, excluded=(2, 'func'))
+ def f_jnp(x, y, func=jnp.add):
+ assert x.ndim == y.ndim == 0
+ return func(x, y)
+
+ x = np.arange(4, dtype='int32')
+ y = np.int32(2)
+
+ self.assertArraysEqual(f_np(x, y), f_jnp(x, y))
+ self.assertArraysEqual(f_np(x, y, np.power), f_jnp(x, y, jnp.power))
+ self.assertArraysEqual(f_np(x, y, func=np.power), f_jnp(x, y, func=jnp.power))
+
def test_exclude_errors(self):
with self.assertRaisesRegex(
TypeError, "jax.numpy.vectorize can only exclude"):
- jnp.vectorize(lambda x: x, excluded={'foo'})
+ jnp.vectorize(lambda x: x, excluded={1.5})
with self.assertRaisesRegex(
ValueError, r"excluded=\{-1\} contains negative numbers"):
jnp.vectorize(lambda x: x, excluded={-1})
- f = jnp.vectorize(lambda x: x, excluded={1})
- with self.assertRaisesRegex(
- ValueError, r"excluded=\{1\} is invalid for 1 argument\(s\)"):
- f(1.0)
-
def test_bad_inputs(self):
matmat = jnp.vectorize(jnp.dot, signature='(n,m),(m,k)->(n,k)')
with self.assertRaisesRegex(
| `jnp.vectorize` should support keyword arguments in `excluded`
Requested in #19064
For example, this does not work:
```python
import jax.numpy as jnp
from functools import partial
@partial(jnp.vectorize, signature="(),()->()", excluded=[2, "func"])
def f(x, y, func=jnp.add):
return func(x, y)
print(f(jnp.arange(4), 2))
# TypeError: jax.numpy.vectorize can only exclude integer arguments, but excluded=[2, 'func']
```
But the equivalent does in numpy:
```python
import numpy as np
from functools import partial
@partial(np.vectorize, signature="(),()->()", excluded=[2, "func"])
def f(x, y, func=np.add):
return func(x, y)
print(f(np.arange(4), 2))
# [2 3 4 5]
print(f(np.arange(4), 2, np.subtract))
# [-2 -1 0 1]
print(f(np.arange(4), 2, func=np.subtract))
# [-2 -1 0 1]
```
| 2023-12-20T23:18:56 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.