repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
google/jax
976
google__jax-976
[ "969" ]
b9b49a3b3abc1cd1cc65174433abee2a9cd05df8
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -110,6 +110,9 @@ def _jit(fun, static_argnums, device_values=True): def f_jitted(*args, **kwargs): if _jit_is_disabled or config.read('jax_disable_jit'): return fun(*args, **kwargs) + if isinstance(static_argnums, int): + # static_argnums is a tuple of ints + static_argnums = (static_argnums,) if static_argnums and max(static_argnums) >= len(args): msg = ("Jitted function has static_argnums={} but was called with only {}" " positional arguments.")
convert integer for `static_argnums` to tuple automatically I've made the same mistake a few times on `static_argnums` on `jit` function, by not using a tuple e.g., `jit(f, static_argnums=1)` would it be possible / reasonable to add a simple `if` statement to convert it to tuple? where the correct way is `jit(f, static_argnums=(1,) )` also note `(1)` is not a tuple, but `(1,)` is
2019-07-04T17:30:33
google/jax
981
google__jax-981
[ "979" ]
527fe14838eadc48fede68bd42ce4364e269fed3
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1608,12 +1608,12 @@ def _brcast_to(x, shape): ad.deflinear(real_p, lambda t: [complex(t, onp.zeros((), _dtype(t)))]) imag_p = unop(_complex_basetype, _complex, 'imag') -ad.deflinear(imag_p, lambda t: [complex(onp.zeros((), _dtype(t)), t)]) +ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g))) _complex_dtype = lambda dtype, *args: (onp.zeros((), dtype) + onp.zeros((), onp.complex64)).dtype complex_p = binop(_complex_dtype, [_complex_elem_types, _complex_elem_types], 'complex') -ad.deflinear(complex_p, lambda t: [real(t), imag(t)]) +ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))]) conj_p = unop(_complex_dtype, _float | _complex, 'conj')
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1523,10 +1523,10 @@ def grad_test_spec(op, nargs, order, rng, dtypes, name=None, tol=None): grad_test_spec(lax.real, nargs=1, order=2, rng=jtu.rand_default(), dtypes=[onp.complex64]), - # grad_test_spec(lax.imag, nargs=1, order=2, rng=jtu.rand_default(), - # dtypes=[onp.complex64]), # TODO(mattjj): enable - # grad_test_spec(lax.complex, nargs=2, order=2, rng=jtu.rand_default(), - # dtypes=[onp.float32]), # TODO(mattjj): enable + grad_test_spec(lax.imag, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.complex64]), + grad_test_spec(lax.complex, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float32]), grad_test_spec(lax.conj, nargs=1, order=2, rng=jtu.rand_default(), dtypes=[onp.float32, onp.complex64]), grad_test_spec(lax.abs, nargs=1, order=2, rng=jtu.rand_positive(),
Inconsistent convention for complex derivatives The convention for the handling of complex derivatives in JAX seems to be mixed. Consider the following two implementations of the abolute value function. ```py import jax.numpy as np from jax import grad def f(x): return np.abs(x) def g(x): return np.sqrt(np.real(x)**2 + np.imag(x)**2) df = grad(f) dg = grad(g) print('gradient by df: ',df(1.0+0.5j)) print('gradient by dg: ',dg(1.0+0.5j)) ``` The respective results differ by complex conjugation: ``` gradient by df: (0.8944272-0.4472136j) gradient by dg: (0.8944272+0.4472136j) ```
Thanks for catching this! To check against our [Autograd](https://github.com/hips/autograd) implementation, I changed the two import lines at the top to ```python import autograd.numpy as np from autograd import grad ``` And got this: ``` ('gradient by df: ', (0.8944271909999159-0.4472135954999579j)) ('gradient by dg: ', (0.8944271909999159-0.4472135954999579j)) ``` So we've got a bug here, but at least we got it right when we wrote this in Autograd, and can perhaps compare to Autograd's implementation to figure out where we're going wrong. The same issue appears if we get rid of the `np.sqrt` and evaluate `np.abs(x) ** 2`. This may be a discrepancy: for `imag`, Autograd's [forward-mode](https://github.com/HIPS/autograd/blob/387c373115ddd54cff2c8ba6a9fc619f28639cfb/autograd/numpy/numpy_jvps.py#L119) and [reverse-mode](https://github.com/HIPS/autograd/blob/387c373115ddd54cff2c8ba6a9fc619f28639cfb/autograd/numpy/numpy_vjps.py#L126) rules both multiply by `-1j`, whereas in [JAX's rules](https://github.com/google/jax/blob/527fe14838eadc48fede68bd42ce4364e269fed3/jax/lax/lax.py#L1611) we don't and instead just treat it as a linear projection on R^2. That's a lead on what's going wrong. Separately I want to figure out why our tests didn't catch this. I think Autograd's JVP effectively just takes the imaginary part (because the Autograd JVP effectively evaluates `real(-1j * g)` and so if `g = a + bj` then `-1j * (a + bj) = b - aj`), which matches [JAX's JVP](https://github.com/google/jax/blob/master/jax/lax/lax.py#L1611) because `ad.deflinear` just means apply the primitive to the tangent. But Autograd's VJP rule, which corresponds to composing JAX's JVP rule with its transpose rule, introduces a negative sign: it evaluates `-1j * g`, whereas [JAX's transpose rule doesn't include a negative sign](https://github.com/google/jax/blob/527fe14838eadc48fede68bd42ce4364e269fed3/jax/lax/lax.py#L1611). By replacing JAX's transpose rule with this, all our tests pass _and_ the code in this thread works as expected: ```python ad.deflinear(imag_p, lambda t: [complex(onp.zeros((), _dtype(t)), neg(t))]) ``` But I still don't have a clear view of what's going on. The transpose rules are meant to be transposes rather than adjoints, and so I'd thought we could model `imag` as the action of the matrix `[0 1]` on R^2... Finally getting back to this! And I paged back in the [complex number convention](https://github.com/HIPS/autograd/blob/master/docs/tutorial.md#complex-numbers). We define JVPs and VJPs for a function f : C -> C in terms of this matrix as a linear map R^2 -> R^2: ![image](https://user-images.githubusercontent.com/1458824/60742793-9e4c8f00-9f23-11e9-9aa4-0bc2c1cf2839.png) where f(x, y) = u(x, y) + i * v(x, y). (We do this because if f is holomorphic then this matrix recovers the complex derivative, i.e. it's the real matrix representation of a single complex number, and because it looks just like the Jacobian of f up to flipping the signs of the off-diagonals, i.e. up to complex conjugation of the input and output.) In particular, to model the `imag` function we have f(x, y) = (y, 0) and so the matrix above has the form ![image](https://user-images.githubusercontent.com/1458824/60742495-70b31600-9f22-11e9-9afa-da5c20187705.png) That's where the negative comes from! As to why the tests didn't catch it, it seems [we left some complex derivative tests commented out](https://github.com/google/jax/blob/527fe14838eadc48fede68bd42ce4364e269fed3/tests/lax_test.py#L1526-L1527) and then forgot about them! Oops.
2019-07-05T21:32:48
google/jax
1,002
google__jax-1002
[ "1000" ]
29039bf155cbeedef72f720324c0dab4a9ad589b
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -116,8 +116,7 @@ def process_call(self, call_primitive, f, tracers, params): out_pv_const, consts = call_primitive.bind(fun, *in_consts, **params) out_pv, jaxpr, env = aux() const_tracers = map(self.new_instantiated_const, consts) - env_tracers = map(self.full_raise, env) - bound_subjaxpr = (jaxpr, const_tracers, env_tracers) + bound_subjaxpr = (jaxpr, const_tracers, map(self.full_raise, env)) eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,), False, False, params) return JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn) @@ -130,12 +129,11 @@ def process_map(self, map_primitive, f, tracers, params): out_pv_reduced, jaxpr, env = aux() out_pv = add_axis_to_pv(params['axis_size'], out_pv_reduced) const_tracers = map(self.new_instantiated_const, consts) - env_tracers = map(self.full_raise, env) jaxpr_converted = jaxpr.copy() jaxpr_converted.constvars = [] jaxpr_converted.invars = list(it.chain(jaxpr.constvars, jaxpr.invars)) invars = tuple(it.chain(const_tracers, tracers)) - bound_subjaxpr = (jaxpr_converted, (), env) + bound_subjaxpr = (jaxpr_converted, (), map(self.full_raise, env)) eqn = JaxprEqn(invars, None, map_primitive, (bound_subjaxpr,), False, False, params) return JaxprTracer(self, PartialVal((out_pv, out_const)), eqn) @@ -465,6 +463,7 @@ def tracers_to_jaxpr(in_tracers, out_tracer): if isinstance(recipe, JaxprEqn): eqns.append(eqn_tracer_to_var(var, [var(t)], recipe)) elif isinstance(recipe, LambdaBinding): + assert any(t is in_tracer for in_tracer in in_tracers) assert in_tracers, "Lambda binding with no args" elif isinstance(recipe, FreeVar): env[var(t)] = recipe.val
invalid jaxpr from composition of soft_pmap, _papply, jit, and jit The following: ``` @jit def f(x): y = 2 * x @jit def g(z): h, axis_name =_papply(lambda x: x * y) return soft_pmap(h, axis_name)(z) return g(x) f(onp.arange(4.).reshape((2, 2))) ``` results in the following invalid jaxpr: ``` { lambda b ; ; a. let c = reshape[ new_sizes=(1, 2, 2) dimensions=None old_sizes=(2, 2) ] a d = mul e 2.0 f = xla_pmap[ axis_size=1 axis_name=<axis 0x7fa985eaa890> ] b c { lambda ; b ; c a. let d = gather[ operand_shape=(2, 2) slice_sizes=(1, 2) dimension_numbers=GatherDimensionNumbers(offset_dims=(1, 2), collapsed_slice_dims=(), start_index_map=(0, 1)) ] b c e = reshape[ new_sizes=(2, 2) dimensions=None old_sizes=(2, 1, 2) ] d f = mul a e in f } [ ; d ] g = reshape[ new_sizes=(2, 2) dimensions=None old_sizes=(1L, 2, 2) ] f in g } ```
Simplified to the following: ``` @jit def f(x): y = 2 * x @jit def g(z): return pmap(lambda x: x * y)(z) return g(x) f(onp.arange(1.).reshape((1, 1))) ``` which results in: ``` { lambda ; ; a. let b = mul c 2.0 d = xla_pmap[ axis_size=1 axis_name=<axis 0x7f699c8ea050> ] a { lambda ; c ; a. let b = reshape[ new_sizes=(1, 1) dimensions=None old_sizes=(1,) ] a d = mul b c in d } [ ; b ] in d } ```
2019-07-09T01:35:20
google/jax
1,015
google__jax-1015
[ "1011" ]
a12161435f4327cc4578c00ff0842e66c541a7dc
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3397,9 +3397,27 @@ def _reduce_window_translation_rule(c, operand, init_value, jaxpr, consts, return c.ReduceWindow(operand, init_value, xla_computation, window_dimensions, window_strides, padding) +def _generic_reduce_window_batch_rule( + batched_args, batch_dims, jaxpr, consts, window_dimensions, window_strides, + padding): + operand, init = batched_args + bdim, init_bdim = batch_dims + if init_bdim is not None: + raise NotImplementedError("reduce_window batching is not implemented for " + "initial values") + + def reduce_window(x, window_dimensions, window_strides, padding): + return reduce_window_p.bind( + x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions, + window_strides=window_strides, padding=padding) + return _reduce_window_batch_rule(reduce_window, (operand,), (bdim,), + window_dimensions, window_strides, padding) + + reduce_window_p = standard_primitive( _reduce_window_shape_rule, _input_dtype, 'reduce_window', _reduce_window_translation_rule) +batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule def _reduce_window_sum_shape_rule(operand, window_dimensions, window_strides,
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -548,6 +548,11 @@ def f(params, x): per_example_direct = np.concatenate(per_example_direct, axis=0) self.assertAllClose(per_example, per_example_direct, check_dtypes=True) + def testCumProd(self): + x = np.arange(9).reshape(3, 3) + 1 + y = vmap(lambda x: np.cumprod(x, axis=-1))(x) + self.assertAllClose(onp.cumprod(x, axis=1), y, check_dtypes=True) + def testSelect(self): pred = onp.array([True, False]) on_true = onp.array([0, 1])
[FR] support batching rule for np.cumprod It is strange to me that we have batching rule for `reduce_window` but `np.cumprod` raises `NotImplementedError: Batching rule for 'reduce_window' not implemented`. Here is a simple repro code ```python from jax import vmap import jax.numpy as np def f(x): return np.cumprod(x, axis=-1) vmap(f)(np.ones((3, 3))) ```
2019-07-13T14:23:16
google/jax
1,020
google__jax-1020
[ "988" ]
d2c5fd2c8c373f37b67e79000d64f35fd91c96e3
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -563,10 +563,13 @@ def broadcast(operand, sizes): def broadcast_in_dim(operand, shape, broadcast_dimensions): if operand.ndim == len(shape) and not len(broadcast_dimensions): return operand - else: - return broadcast_in_dim_p.bind( - operand, shape=tuple(shape), - broadcast_dimensions=tuple(broadcast_dimensions)) + if any(x < 0 or x >= len(shape) for x in broadcast_dimensions): + msg = ("broadcast dimensions must be >= 0 and < ndim(shape), got {} for " + "shape {}") + raise ValueError(msg.format(broadcast_dimensions, shape)) + return broadcast_in_dim_p.bind( + operand, shape=tuple(shape), + broadcast_dimensions=tuple(broadcast_dimensions)) def reshape(operand, new_sizes, dimensions=None): """Wraps XLA's `Reshape diff --git a/jax/ops/scatter.py b/jax/ops/scatter.py --- a/jax/ops/scatter.py +++ b/jax/ops/scatter.py @@ -18,6 +18,8 @@ from __future__ import division from __future__ import print_function +import collections + import numpy as onp from ..abstract_arrays import ShapedArray, ConcreteArray @@ -39,6 +41,10 @@ def _is_advanced_int_indexer(idx): isinstance(idx, tuple) and all(onp.ndim(elt) == 0 for elt in idx)) return out and np._is_advanced_int_indexer(idx) +def _triggers_unpack(x): + return (isinstance(x, np.ndarray) or isinstance(x, collections.Sequence) + or isinstance(x, slice) or x is Ellipsis or x is None) + def _scatter_update(x, idx, y, scatter_op): """Helper for indexed updates. @@ -67,51 +73,48 @@ def _scatter_update(x, idx, y, scatter_op): y_shape = np.shape(y) y = lax.convert_element_type(y, lax.dtype(x)) - # Check if there's advanced indexing going on, and handle differently based on - # whether it is or isn't mixed with basic indexing. - if _is_advanced_int_indexer(idx): - if np._is_advanced_int_indexer_without_slices(idx): - if isinstance(idx, (tuple, list)): - if any(onp.shape(e) for e in idx): - # At least one sequence element in the index list means broadcasting. - idx = np.broadcast_arrays(*idx) - else: - # The index list is a flat list of integers. - idx = [lax.concatenate([lax.reshape(e, (1,)) for e in idx], 0)] + # "Basic slicing is initiated if the selection object is a non-array, + # non-tuple sequence containing slice objects, [Ellipses, or newaxis + # objects]". Detects this case and canonicalizes to a tuple. + if not isinstance(idx, tuple): + if isinstance(idx, collections.Sequence) and not isinstance(idx, np.ndarray): + if any(_triggers_unpack(i) for i in idx): + idx = tuple(idx) else: - # The indexer is just a single integer array. - idx = [idx] - - stacked_idx = np.concatenate( - [np.mod(np.reshape(a, (-1, 1)), np._constant_like(a, x.shape[i])) - for i, a in enumerate(idx)], axis=1) - - y = np.broadcast_to(y, idx[0].shape + onp.shape(x)[len(idx):]) - y = lax.reshape(y, (stacked_idx.shape[0],) + onp.shape(x)[len(idx):]) - - dnums = lax.ScatterDimensionNumbers( - update_window_dims=tuple(range(1, y.ndim)), - inserted_window_dims=tuple(range(len(idx))), - scatter_dims_to_operand_dims=tuple(range(len(idx)))) - return scatter_op(x, stacked_idx, y, dnums) - elif np._is_advanced_int_indexer(idx): - # TODO(mattjj, phawkins): one of us is going to implement this case someday - msg = "Unimplemented case for indexed update. Open a feature request!" - raise NotImplementedError(msg) + idx = (idx,) else: - assert False # unreachable - - # At this point there's no advanced indexing going on, so we process each - # element of the index one at a time to build up a scatter. - if not isinstance(idx, tuple): - idx = (idx,) + idx = (idx,) # Remove ellipses and add trailing slice(None)s. idx = np._canonicalize_tuple_index(x, idx) + # Check for advanced indexing. + + # Do the advanced indexing axes appear contiguously? If not, NumPy semantics + # move the advanced axes to the front. + advanced_axes_are_contiguous = False + + advanced_indexes = None + + # The positions of the advanced indexes in `idx`. + idx_advanced_axes = [] + + # The positions of the advanced indexes in x's shape. + x_advanced_axes = None + + if _is_advanced_int_indexer(idx): + idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None] + advanced_pairs = ( + (np.asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones) + if (isinstance(e, collections.Sequence) or isinstance(e, np.ndarray))) + advanced_pairs = ((np.mod(e, np._constant_like(e, x_shape[j])), i, j) + for e, i, j in advanced_pairs) + advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs) + advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1) + _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer) - x_axis = 0 + x_axis = 0 # Current axis in x. y_axis = 0 # Current axis in y, before collapsing. See below. collapsed_y_axis = 0 # Current axis in y, after collapsing. @@ -132,7 +135,35 @@ def _scatter_update(x, idx, y, scatter_op): # Finally, we reverse reversed_y_dims to handle slices with negative strides. reversed_y_dims = [] - for i in idx: + + for idx_pos, i in enumerate(idx): + # If the advanced indices are not contiguous they are moved to the front + # of the slice. Otherwise, they replace the chunk of advanced indices. + if (advanced_indexes is not None and + (advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or + not advanced_axes_are_contiguous and idx_pos == 0)): + advanced_indexes = np.broadcast_arrays(*advanced_indexes) + shape = advanced_indexes[0].shape + ndim = len(shape) + advanced_indexes = [ + lax.convert_element_type(lax.reshape(a, shape + (1,)), np.int32) + for a in advanced_indexes] + + scatter_indices = lax.broadcast_in_dim( + scatter_indices, onp.insert(scatter_indices.shape, -1, shape), + tuple(range(scatter_indices.ndim - 1)) + (scatter_indices.ndim + ndim - 1,)) + scatter_indices = np.concatenate([scatter_indices] + advanced_indexes, -1) + scatter_dims_to_operand_dims.extend(x_advanced_axes) + inserted_window_dims.extend(x_advanced_axes) + slice_shape.extend(shape) + collapsed_slice_shape.extend(shape) + y_axis += ndim + collapsed_y_axis += ndim + + if idx_pos in idx_advanced_axes: + x_axis += 1 + continue + try: abstract_i = core.get_aval(i) except TypeError: @@ -201,7 +232,7 @@ def _scatter_update(x, idx, y, scatter_op): dnums = lax.ScatterDimensionNumbers( update_window_dims = tuple(update_window_dims), - inserted_window_dims = tuple(inserted_window_dims), + inserted_window_dims = tuple(sorted(inserted_window_dims)), scatter_dims_to_operand_dims = tuple(scatter_dims_to_operand_dims) ) return scatter_op(x, scatter_indices, y, dnums)
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -316,7 +316,7 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): ]), ] -MIXED_ADVANCED_INDEXING_TESTS = [ +MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS = [ ("SlicesAndOneIntArrayIndex", [IndexSpec(shape=(2, 3), indexer=(onp.array([0, 1]), slice(1, 2))), IndexSpec(shape=(2, 3), indexer=(slice(0, 2), @@ -325,7 +325,7 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): onp.array([0, 2]), slice(None))), IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, - onp.array([[0, 2], [1, 1]]), + onp.array([[0, 2], [1, 3]]), slice(None))), ]), ("SlicesAndTwoIntArrayIndices", @@ -346,10 +346,7 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): onp.array([-1, 2]))), IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]), slice(None, None, 2), - onp.array([-1, 2, -1]))), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]), - Ellipsis, - onp.array([[1, 0], [1, 0]]))), + onp.array([-1, 2, 1]))), ]), ("NonesAndIntArrayIndices", [IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2]), @@ -370,6 +367,22 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): ]), ] +MIXED_ADVANCED_INDEXING_TESTS = MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + [ + ("SlicesAndOneIntArrayIndex", + [ + IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, + onp.array([[0, 2], [1, 1]]), + slice(None))), + ]), + ("SlicesAndTwoIntArrayIndices", + [IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]), + slice(None, None, 2), + onp.array([-1, 2, -1]))), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]), + Ellipsis, + onp.array([[1, 0], [1, 0]]))), + ]),] + class IndexingTest(jtu.JaxTestCase): """Tests for Numpy indexing translation rules.""" @@ -794,6 +807,28 @@ def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype, self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True) self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list({ + "testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format( + name, jtu.format_shape_dtype_string(shape, dtype), indexer, + jtu.format_shape_dtype_string(update_shape, update_dtype), op.name), + "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer, + "update_shape": update_shape, "update_dtype": update_dtype, + "op": op + } for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + for shape, indexer in index_specs + for op in UpdateOps + for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes) + for update_shape in _broadcastable_shapes(_update_shape(shape, indexer)) + for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes) + for rng in [jtu.rand_default()])) + def testMixedAdvancedIndexing(self, shape, dtype, update_shape, update_dtype, + rng, indexer, op): + args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)] + onp_fn = lambda x, y: UpdateOps.onp_fn(op, indexer, x, y) + jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y) + self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True) + self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list({ "testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format( name, jtu.format_shape_dtype_string(shape, dtype), indexer,
Implement indexed updates that mix basic and advanced indexing I'm trying to update certain rows of a matrix with corresponding gradients. I tried "index_add" and it raised NotImplementedError for _scattter_update error for now. This is crucial for my use case where weight matrix selection is data-dependent. ```new_weights = index_add(weights, ops.index[neuron_indices + context_values, :], - learning_rate * gradient)```
Thanks for the issue report! In general it's very helpful to me if you can give reproductions that are self-contained so I can run them without guessing at the shapes of things like `weights`. The issue here is that we don't yet implement a mixture of NumPy advanced and basic indexing at the same time in the same update expression. We should do that. Fortunately, it turns out your use of basic indexing probably isn't necessary. Try this: ``` new_weights = index_add(weights, ops.index[neuron_indices + context_values], - learning_rate * gradient) ``` i.e., no `, :` in the index expression. Numpy indexing automatically applies to the leading dimensions, so there's no need for the explicit `:`. That said, we should definitely implement this case! Yeah dropping `:` definitely works for me, thanks ! :+1:
2019-07-15T22:03:19
google/jax
1,025
google__jax-1025
[ "1006" ]
d90993f798ff00404e2d5869f78d3642d99b6a33
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2207,160 +2207,288 @@ def take_along_axis(arr, indices, axis): ### Indexing +def _rewriting_take(arr, idx): + # Computes arr[idx]. + # All supported cases of indexing can be implemented as an XLA gather, + # followed by an optional reverse and a reshape. + arr = asarray(arr) + indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update -def _rewriting_take(arr, idx, axis=0): - """A function like numpy.take that handles boxes and rewrites to LAX.""" + y = lax.gather(arr, indexer.gather_indices, indexer.dnums, + indexer.gather_slice_shape) - # Handle special indexers: (), Ellipsis, slice(None), and None. - # TODO(mattjj): don't compare empty tuple identity (though works for CPython) - if idx is () or idx is Ellipsis or _is_slice_none(idx): # pylint: disable=literal-comparison - return arr - elif idx is None: - return expand_dims(arr, 0) + # Reverses axes with negative strides. + if indexer.reversed_y_dims: + y = lax.rev(y, indexer.reversed_y_dims) + + # This adds np.newaxis/None dimensions. + return lax.reshape(y, indexer.slice_shape) + +_Indexer = collections.namedtuple("_Indexer", [ + # The expected shape of the slice output. + "slice_shape", + + # The slice shape to pass to lax.gather(). + "gather_slice_shape", + + # The gather indices to use. + "gather_indices", + + # A GatherDimensionNumbers object describing the gather to perform. + "dnums", + + # Slice dimensions that have negative strides, and so must be reversed after + # the gather. + "reversed_y_dims", + + # For scatters, we must eliminate any axes created by `newaxis`, which + # are the following dimensions, which must be of size 1. For gathers, we + # simply reshape to `slice_shape` to introduce the new axes. + "newaxis_dims", +]) + +def _index_to_gather(x_shape, idx): + # Convert list indices to tuples in cases (deprecated by NumPy.) + idx = _eliminate_deprecated_list_indexing(idx) + + # Expand any (concrete) boolean indices. We can then use advanced integer + # indexing logic to handle them. + idx = _expand_bool_indices(idx) + + # Remove ellipses and add trailing slice(None)s. + idx = _canonicalize_tuple_index(len(x_shape), idx) + + # Check for advanced indexing: + # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing + # Do the advanced indexing axes appear contiguously? If not, NumPy semantics + # move the advanced axes to the front. + advanced_axes_are_contiguous = False + + advanced_indexes = None + + # The positions of the advanced indexing axes in `idx`. + idx_advanced_axes = [] + + # The positions of the advanced indexes in x's shape. + # collapsed, after None axes have been removed. See below. + x_advanced_axes = None + + if _is_advanced_int_indexer(idx): + idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None] + advanced_pairs = ( + (asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones) + if (isinstance(e, collections.Sequence) or isinstance(e, ndarray))) + advanced_pairs = ((mod(e, _constant_like(e, x_shape[j])), i, j) + for e, i, j in advanced_pairs) + advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs) + advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1) - # Handle int index _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer) - try: - abstract_idx = core.get_aval(idx) - except TypeError: - abstract_idx = None - - if isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx): - return lax.index_in_dim(arr, idx, axis, False) - elif isinstance(abstract_idx, ShapedArray) and _int(abstract_idx): - idx = mod(idx, _constant_like(idx, arr.shape[axis])) - return lax.dynamic_index_in_dim(arr, idx, axis, False) - - # Handle slice index (only static, otherwise an error is raised) - elif isinstance(idx, slice): - if not _all(elt is None or type(core.get_aval(elt)) is ConcreteArray - for elt in (idx.start, idx.stop, idx.step)): - msg = ("Array slice indices must have static start/stop/step to be used " - "with Numpy indexing syntax. Try lax.dynamic_slice instead.") - raise IndexError(msg) - else: - start, limit, stride, needs_rev = _static_idx(idx, arr.shape[axis]) - result = lax.slice_in_dim(arr, start, limit, stride, axis=axis) - return lax.rev(result, [axis]) if needs_rev else result - - # Handle non-advanced bool index (only static, otherwise an error is raised) - elif (isinstance(abstract_idx, ShapedArray) and onp.issubdtype(abstract_idx.dtype, onp.bool_) - or isinstance(idx, list) and _all(not _shape(e) and onp.issubdtype(_dtype(e), onp.bool_) - for e in idx)): - if isinstance(idx, list): - idx = array(idx) - abstract_idx = core.get_aval(idx) - - if not type(abstract_idx) is ConcreteArray: - msg = ("Array boolean indices must be static (e.g. no dependence on an " - "argument to a jit or vmap function).") - raise IndexError(msg) - else: - if idx.ndim > arr.ndim or idx.shape != arr.shape[:idx.ndim]: - msg = "Boolean index shape did not match indexed array shape prefix." + + x_axis = 0 # Current axis in x. + y_axis = 0 # Current axis in y, before collapsing. See below. + collapsed_y_axis = 0 # Current axis in y, after collapsing. + + # Scatter dimension numbers. + offset_dims = [] + collapsed_slice_dims = [] + start_index_map = [] + + gather_indices = zeros((0,), dtype=int32) + + # We perform three transformations to y before the scatter op, in order: + # First, y is broadcast to slice_shape. In general `y` only need broadcast to + # the right shape. + slice_shape = [] + + # Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None` + # indices, which the scatter cannot remove itself. + newaxis_dims = [] + + # Finally, we reverse reversed_y_dims to handle slices with negative strides. + reversed_y_dims = [] + + gather_slice_shape = [] + + for idx_pos, i in enumerate(idx): + # Handle the advanced indices here if: + # * the advanced indices were not contiguous and we are the start. + # * we are at the position of the first advanced index. + if (advanced_indexes is not None and + (advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or + not advanced_axes_are_contiguous and idx_pos == 0)): + advanced_indexes = broadcast_arrays(*advanced_indexes) + shape = advanced_indexes[0].shape + ndim = len(shape) + advanced_indexes = [ + lax.convert_element_type(lax.reshape(a, shape + (1,)), int32) + for a in advanced_indexes] + + # Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k]. + gather_indices = lax.broadcast_in_dim( + gather_indices, onp.insert(gather_indices.shape, -1, shape), + tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,)) + gather_indices = concatenate([gather_indices] + advanced_indexes, -1) + start_index_map.extend(x_advanced_axes) + collapsed_slice_dims.extend(x_advanced_axes) + slice_shape.extend(shape) + y_axis += ndim + collapsed_y_axis += ndim + + # Per-index bookkeeping for advanced indexes. + if idx_pos in idx_advanced_axes: + x_axis += 1 + gather_slice_shape.append(1) + continue + + try: + abstract_i = core.get_aval(i) + except TypeError: + abstract_i = None + # Handle basic int indexes. + if (isinstance(abstract_i, ConcreteArray) or + isinstance(abstract_i, ShapedArray)) and _int(abstract_i): + i = mod(i, _constant_like(i, x_shape[x_axis])) + i = lax.convert_element_type(i, int32) + i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,)) + gather_indices = concatenate((gather_indices, i), -1) + collapsed_slice_dims.append(x_axis) + gather_slice_shape.append(1) + start_index_map.append(x_axis) + x_axis += 1 + # Handle np.newaxis (None) + elif i is None: + slice_shape.append(1) + newaxis_dims.append(y_axis) + y_axis += 1 + # Handle slice(None) + elif _is_slice_none(i): + slice_shape.append(x_shape[x_axis]) + gather_slice_shape.append(x_shape[x_axis]) + offset_dims.append(collapsed_y_axis) + collapsed_y_axis += 1 + y_axis += 1 + x_axis += 1 + # Handle slice index (only static, otherwise an error is raised) + elif isinstance(i, slice): + if not _all(elt is None or type(core.get_aval(elt)) is ConcreteArray + for elt in (i.start, i.stop, i.step)): + msg = ("Array slice indices must have static start/stop/step to be used " + "with Numpy indexing syntax. Try lax.dynamic_slice/" + "dynamic_update_slice instead.") raise IndexError(msg) + start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis]) + if needs_rev: + reversed_y_dims.append(collapsed_y_axis) + if stride == 1: + i = lax.convert_element_type(start, int32) + i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,)) + gather_indices = concatenate((gather_indices, i), -1) + slice_shape.append(limit - start) + gather_slice_shape.append(limit - start) + offset_dims.append(collapsed_y_axis) + start_index_map.append(x_axis) else: - reshaped_arr = arr.reshape((-1,) + arr.shape[idx.ndim:]) - int_idx, = onp.where(idx.ravel()) - return lax.index_take(reshaped_arr, (int_idx,), (0,)) - - # Handle non-advanced tuple indices by recursing once - elif isinstance(idx, tuple) and _all(onp.ndim(elt) == 0 for elt in idx): - canonical_idx = _canonicalize_tuple_index(arr, idx) - result, axis = arr, 0 - # TODO(mattjj): could generate a single HLO here, rather than one for each - # elt in canonical idx. For example, x[0, :, 0] generates three HLOs now. - for elt in (elt for elt in canonical_idx if elt is not None): - result = _rewriting_take(result, elt, axis=axis) - axis += isinstance(elt, slice) # advance axis index if not eliminated - unexpanded_shape_itr = iter(result.shape) - result_shape = tuple(1 if elt is None else next(unexpanded_shape_itr) - for elt in canonical_idx if isinstance(elt, (type(None), slice))) - return lax.reshape(result, result_shape) if result_shape else result - - # Handle advanced indexing (non-tuple sequence, ndarray of dtype int or bool, - # or a tuple with at least one sequence object). - # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing - # https://gist.github.com/seberg/976373b6a2b7c4188591 - - # Handle integer array indexing *without* ellipsis/slices/nones - # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#integer-array-indexing - if _is_advanced_int_indexer_without_slices(idx): - if isinstance(idx, (tuple, list)): - if _any(_shape(e) for e in idx): - # At least one sequence element in the index list means broadcasting. - idx = broadcast_arrays(*idx) - else: - # The index list is a flat list of integers. - idx = [lax.concatenate([lax.reshape(e, (1,)) for e in idx], 0)] + i = arange(start, limit, stride, dtype=int32) + size = i.shape[0] + slice_shape.append(size) + gather_slice_shape.append(1) + gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,) + i = lax.broadcast_in_dim( + i, shape=gather_indices_shape + (1,), + broadcast_dimensions=(len(gather_indices_shape) - 1,)) + gather_indices = lax.broadcast_in_dim( + gather_indices, + shape=gather_indices_shape + (len(start_index_map),), + broadcast_dimensions=( + tuple(range(len(gather_indices_shape) - 1)) + + (len(gather_indices_shape),))) + gather_indices = concatenate( + (gather_indices, i), len(gather_indices_shape)) + start_index_map.append(x_axis) + collapsed_slice_dims.append(x_axis) + + collapsed_y_axis += 1 + y_axis += 1 + x_axis += 1 else: - # The indexer is just a single integer array. - idx = [idx] - - flat_idx = tuple([mod(ravel(x), _constant_like(x, arr.shape[i])) - for i, x in enumerate(idx)]) - # TODO(mattjj): if we instead lower directly to lax.gather, we can probably - # eliminate the reshape here. - out = lax.index_take(arr, flat_idx, tuple(range(len(idx)))) - return lax.reshape(out, idx[0].shape + _shape(arr)[len(idx):]) - - # Handle integer array indexing *with* ellipsis/slices/nones by recursing once - # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing - elif _is_advanced_int_indexer(idx): - canonical_idx = _canonicalize_tuple_index(arr, tuple(idx)) - idx_noadvanced = [slice(None) if _is_int_arraylike(e) else e - for e in canonical_idx] - arr_sliced = _rewriting_take(arr, tuple(idx_noadvanced)) - - advanced_pairs = ((e, i) for i, e in enumerate(canonical_idx) if _is_int_arraylike(e)) - idx_advanced, axes = zip(*advanced_pairs) - idx_advanced = broadcast_arrays(*idx_advanced) - - flat_idx = tuple(mod(ravel(x), _constant_like(x, arr_sliced.shape[i])) - for i, x in zip(axes, idx_advanced)) - # TODO(mattjj): if we instead lower directly to lax.gather, we can probably - # eliminate the reshape here. - out = lax.index_take(arr_sliced, flat_idx, axes) - shape_suffix = tuple(onp.delete(_shape(arr_sliced), axes)) - out = lax.reshape(out, idx_advanced[0].shape + shape_suffix) - - axes_are_contiguous = onp.all(onp.diff(axes) == 1) - if axes_are_contiguous: - start = axes[0] - naxes = idx_advanced[0].ndim - out = moveaxis(out, list(range(naxes)), list(range(start, start + naxes))) - return out + msg = "Indexing mode not yet supported. Open a feature request!\n{}" + raise IndexError(msg.format(idx)) - msg = "Indexing mode not yet supported. Open a feature request!\n{}" - raise IndexError(msg.format(idx)) + dnums = lax.GatherDimensionNumbers( + offset_dims = tuple(offset_dims), + collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)), + start_index_map = tuple(start_index_map) + ) + return _Indexer( + slice_shape=slice_shape, + newaxis_dims=tuple(newaxis_dims), + gather_slice_shape=gather_slice_shape, + reversed_y_dims=reversed_y_dims, + dnums=dnums, + gather_indices=gather_indices) + +def _should_unpack_list_index(x): + """Helper for _eliminate_deprecated_list_indexing.""" + return (isinstance(x, ndarray) and onp.ndim(x) != 0 + or isinstance(x, collections.Sequence) + or isinstance(x, slice) or x is Ellipsis or x is None) + +def _eliminate_deprecated_list_indexing(idx): + # "Basic slicing is initiated if the selection object is a non-array, + # non-tuple sequence containing slice objects, [Ellipses, or newaxis + # objects]". Detects this case and canonicalizes to a tuple. This case is + # deprecated by NumPy and exists for backward compatibility. + if not isinstance(idx, tuple): + if isinstance(idx, collections.Sequence) and not isinstance(idx, ndarray): + if _any(_should_unpack_list_index(i) for i in idx): + idx = tuple(idx) + else: + idx = (idx,) + else: + idx = (idx,) + return idx +def _expand_bool_indices(idx): + """Converts concrete bool indexes into advanced integer indexes.""" + out = [] + for i in idx: + try: + abstract_i = core.get_aval(i) + except TypeError: + abstract_i = None + if (isinstance(abstract_i, ShapedArray) and onp.issubdtype(abstract_i.dtype, onp.bool_) + or isinstance(i, list) and _all(not _shape(e) and onp.issubdtype(_dtype(e), onp.bool_) + for e in i)): + if isinstance(i, list): + i = array(i) + abstract_i = core.get_aval(i) + + if not type(abstract_i) is ConcreteArray: + msg = ("Array boolean indices must be static (e.g. no dependence on an " + "argument to a jit or vmap function).") + raise IndexError(msg) + else: + out.extend(onp.where(i)) + else: + out.append(i) + return tuple(out) def _is_slice_none(idx): """Return True if idx is equal to slice(None), False otherwise.""" if isinstance(idx, slice): return idx.start is None and idx.stop is None and idx.step is None - +# TODO(mattjj): clean up this logic def _is_advanced_int_indexer(idx): """Returns True if idx should trigger int array indexing, False otherwise.""" # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing - if isinstance(idx, (tuple, list)) and _any(onp.ndim(elt) != 0 for elt in idx): - return _all(e is None or e is Ellipsis or isinstance(e, slice) - or _is_int_arraylike(e) for e in idx) - else: - return _is_int_arraylike(idx) - - -def _is_advanced_int_indexer_without_slices(idx): - """Returns True iff idx is an advanced int idx without slice/ellipsis/none.""" - if _is_advanced_int_indexer(idx): - if isinstance(idx, (tuple, list)): - return not _any(e is None or e is Ellipsis or isinstance(e, slice) - for e in idx) - else: - return True - + assert isinstance(idx, tuple) + if _all(onp.ndim(elt) == 0 for elt in idx): + return False + return _all(e is None or e is Ellipsis or isinstance(e, slice) + or _is_int_arraylike(e) for e in idx) def _is_int_arraylike(x): """Returns True if x is array-like with integer dtype, False otherwise.""" @@ -2369,22 +2497,22 @@ def _is_int_arraylike(x): or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x)) -def _canonicalize_tuple_index(arr, idx): +def _canonicalize_tuple_index(arr_ndim, idx): """Helper to remove Ellipsis and add in the implicit trailing slice(None).""" len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis) - if len_without_none > arr.ndim: + if len_without_none > arr_ndim: msg = "Too many indices for array: {} non-None/Ellipsis indices for dim {}." - raise IndexError(msg.format(len_without_none, arr.ndim)) + raise IndexError(msg.format(len_without_none, arr_ndim)) ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis) ellipsis_index = next(ellipses, None) if ellipsis_index is not None: if next(ellipses, None) is not None: msg = "Multiple ellipses (...) not supported: {}." raise IndexError(msg.format(list(map(type, idx)))) - colons = (slice(None),) * (arr.ndim - len_without_none) + colons = (slice(None),) * (arr_ndim - len_without_none) idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:] - elif len_without_none < arr.ndim: - colons = (slice(None),) * (arr.ndim - len_without_none) + elif len_without_none < arr_ndim: + colons = (slice(None),) * (arr_ndim - len_without_none) idx = tuple(idx) + colons return idx diff --git a/jax/ops/scatter.py b/jax/ops/scatter.py --- a/jax/ops/scatter.py +++ b/jax/ops/scatter.py @@ -22,29 +22,10 @@ import numpy as onp -from ..abstract_arrays import ShapedArray, ConcreteArray -from .. import core from .. import lax from ..numpy import lax_numpy as np -# TODO(mattjj): clean up this logic -def _is_advanced_int_indexer(idx): - _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer) - try: - abstract_idx = core.get_aval(idx) - except TypeError: - abstract_idx = None - out = not (isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx) or - isinstance(abstract_idx, ShapedArray) and _int(abstract_idx) or - isinstance(idx, slice) or - isinstance(idx, tuple) and all(onp.ndim(elt) == 0 for elt in idx)) - return out and np._is_advanced_int_indexer(idx) - -def _triggers_unpack(x): - return (isinstance(x, np.ndarray) or isinstance(x, collections.Sequence) - or isinstance(x, slice) or x is Ellipsis or x is None) - def _scatter_update(x, idx, y, scatter_op): """Helper for indexed updates. @@ -64,178 +45,31 @@ def _scatter_update(x, idx, y, scatter_op): Returns: An ndarray representing an updated `x` after performing the scatter-update. """ - # For more clues on the logic of this implementation, see the code for - # jax.numpy._rewriting_take (which has links to NumPy docs). x = np.asarray(x) y = np.asarray(y) - x_shape = np.shape(x) - y_shape = np.shape(y) y = lax.convert_element_type(y, lax.dtype(x)) - # "Basic slicing is initiated if the selection object is a non-array, - # non-tuple sequence containing slice objects, [Ellipses, or newaxis - # objects]". Detects this case and canonicalizes to a tuple. - if not isinstance(idx, tuple): - if isinstance(idx, collections.Sequence) and not isinstance(idx, np.ndarray): - if any(_triggers_unpack(i) for i in idx): - idx = tuple(idx) - else: - idx = (idx,) - else: - idx = (idx,) - - # Remove ellipses and add trailing slice(None)s. - idx = np._canonicalize_tuple_index(x, idx) - - # Check for advanced indexing. - - # Do the advanced indexing axes appear contiguously? If not, NumPy semantics - # move the advanced axes to the front. - advanced_axes_are_contiguous = False - - advanced_indexes = None - - # The positions of the advanced indexes in `idx`. - idx_advanced_axes = [] - - # The positions of the advanced indexes in x's shape. - x_advanced_axes = None - - if _is_advanced_int_indexer(idx): - idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None] - advanced_pairs = ( - (np.asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones) - if (isinstance(e, collections.Sequence) or isinstance(e, np.ndarray))) - advanced_pairs = ((np.mod(e, np._constant_like(e, x_shape[j])), i, j) - for e, i, j in advanced_pairs) - advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs) - advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1) - - _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer) - - x_axis = 0 # Current axis in x. - y_axis = 0 # Current axis in y, before collapsing. See below. - collapsed_y_axis = 0 # Current axis in y, after collapsing. - - # Scatter dimension numbers. - update_window_dims = [] - inserted_window_dims = [] - scatter_dims_to_operand_dims = [] - - scatter_indices = np.zeros((0,), dtype=np.int32) - - # We perform three transformations to y before the scatter op, in order: - # First, y is broadcast to slice_shape. In general `y` only need broadcast to - # the right shape. - slice_shape = [] - # Next, y is reshaped to collapsed_slice_shape. This is to handle `None` - # indices, which the scatter cannot remove itself. - collapsed_slice_shape = [] - # Finally, we reverse reversed_y_dims to handle slices with negative strides. - reversed_y_dims = [] - - - for idx_pos, i in enumerate(idx): - # If the advanced indices are not contiguous they are moved to the front - # of the slice. Otherwise, they replace the chunk of advanced indices. - if (advanced_indexes is not None and - (advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or - not advanced_axes_are_contiguous and idx_pos == 0)): - advanced_indexes = np.broadcast_arrays(*advanced_indexes) - shape = advanced_indexes[0].shape - ndim = len(shape) - advanced_indexes = [ - lax.convert_element_type(lax.reshape(a, shape + (1,)), np.int32) - for a in advanced_indexes] - - scatter_indices = lax.broadcast_in_dim( - scatter_indices, onp.insert(scatter_indices.shape, -1, shape), - tuple(range(scatter_indices.ndim - 1)) + (scatter_indices.ndim + ndim - 1,)) - scatter_indices = np.concatenate([scatter_indices] + advanced_indexes, -1) - scatter_dims_to_operand_dims.extend(x_advanced_axes) - inserted_window_dims.extend(x_advanced_axes) - slice_shape.extend(shape) - collapsed_slice_shape.extend(shape) - y_axis += ndim - collapsed_y_axis += ndim - - if idx_pos in idx_advanced_axes: - x_axis += 1 - continue - - try: - abstract_i = core.get_aval(i) - except TypeError: - abstract_i = None - if (isinstance(abstract_i, ConcreteArray) or - isinstance(abstract_i, ShapedArray)) and _int(abstract_i): - i = np.mod(i, np._constant_like(i, x.shape[x_axis])) - i = lax.convert_element_type(i, np.int32) - i = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,)) - scatter_indices = np.concatenate((scatter_indices, i), -1) - inserted_window_dims.append(x_axis) - scatter_dims_to_operand_dims.append(x_axis) - x_axis += 1 - elif i is None: - slice_shape.append(1) - y_axis += 1 - elif np._is_slice_none(i): - slice_shape.append(x_shape[x_axis]) - collapsed_slice_shape.append(x_shape[x_axis]) - update_window_dims.append(collapsed_y_axis) - collapsed_y_axis += 1 - y_axis += 1 - x_axis += 1 - elif isinstance(i, slice): - start, limit, stride, needs_rev = np._static_idx(i, x.shape[x_axis]) - if needs_rev: - reversed_y_dims.append(collapsed_y_axis) - if stride == 1: - i = lax.convert_element_type(start, np.int32) - i = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,)) - scatter_indices = np.concatenate((scatter_indices, i), -1) - slice_shape.append(limit - start) - collapsed_slice_shape.append(limit - start) - update_window_dims.append(collapsed_y_axis) - scatter_dims_to_operand_dims.append(x_axis) - else: - i = np.arange(start, limit, stride, dtype=np.int32) - size = i.shape[0] - slice_shape.append(size) - collapsed_slice_shape.append(size) - scatter_indices_shape = tuple(scatter_indices.shape[:-1]) + (size,) - i = lax.broadcast_in_dim( - i, shape=scatter_indices_shape + (1,), - broadcast_dimensions=(len(scatter_indices_shape) - 1,)) - scatter_indices = lax.broadcast_in_dim( - scatter_indices, - shape=scatter_indices_shape + (len(scatter_dims_to_operand_dims),), - broadcast_dimensions=( - tuple(range(len(scatter_indices_shape) - 1)) + - (len(scatter_indices_shape),))) - scatter_indices = np.concatenate( - (scatter_indices, i), len(scatter_indices_shape)) - scatter_dims_to_operand_dims.append(x_axis) - inserted_window_dims.append(x_axis) - - collapsed_y_axis += 1 - y_axis += 1 - x_axis += 1 - else: - raise IndexError("Unknown index type ", i) - - y = np.broadcast_to(y, tuple(slice_shape)) - y = lax.reshape(y, collapsed_slice_shape) - if reversed_y_dims: - y = lax.rev(y, reversed_y_dims) + # XLA gathers and scatters are very similar in structure; the scatter logic + # is more or less a transpose of the gather equivalent. + indexer = np._index_to_gather(np.shape(x), idx) + + # Broadcast `y` to the slice output shape. + y = np.broadcast_to(y, tuple(indexer.slice_shape)) + # Collapse any `None`/`np.newaxis` dimensions. + y = np.squeeze(y, axis=indexer.newaxis_dims) + if indexer.reversed_y_dims: + y = lax.rev(y, indexer.reversed_y_dims) + + # Transpose the gather dimensions into scatter dimensions (cf. + # lax._gather_transpose_rule) dnums = lax.ScatterDimensionNumbers( - update_window_dims = tuple(update_window_dims), - inserted_window_dims = tuple(sorted(inserted_window_dims)), - scatter_dims_to_operand_dims = tuple(scatter_dims_to_operand_dims) + update_window_dims=indexer.dnums.offset_dims, + inserted_window_dims=indexer.dnums.collapsed_slice_dims, + scatter_dims_to_operand_dims=indexer.dnums.start_index_map ) - return scatter_op(x, scatter_indices, y, dnums) + return scatter_op(x, indexer.gather_indices, y, dnums) class _Indexable(object):
diff --git a/tests/parallel_test.py b/tests/parallel_test.py --- a/tests/parallel_test.py +++ b/tests/parallel_test.py @@ -17,6 +17,7 @@ from __future__ import print_function import itertools +import unittest from unittest import SkipTest import numpy as onp @@ -181,6 +182,7 @@ def f(x, y): ans = _parallelize(f)(x, y) self.assertAllClose(ans, expected, check_dtypes=False) + @unittest.skip("Missing cases in gather papply rule") def testOuter(self): x = onp.arange(10) y = 2 * onp.arange(10) @@ -197,6 +199,7 @@ def f(y): return x[:, None] * y ans = _parallelize(f)(y) self.assertAllClose(ans, expected, check_dtypes=False) + @unittest.skip("Missing cases in gather papply rule") def testOuter3(self): x = onp.arange(10) y = 2 * onp.arange(10)
Load from 2d array within vmap results in two gathers rather than just one ``` import jax import jax.numpy as np import numpy as onp def test(arr, x, y): return arr[x, y] rand_x = onp.random.randint(0, 200, size=1024) rand_y = onp.random.randint(0, 200, size=1024) rand_arr = onp.random.uniform(0, 1, size=(200,200)) test_vmap = jax.vmap(test, in_axes=(None, 0, 0)) jax.jit(test_vmap)(rand_arr, rand_x, rand_y) ``` The `jax.jit` results in an XLA computation with two gathers, but the expression can be implemented with just one. Two gathers is quite bad, because unnecessarily it materializes out a large array; you really don't want to do this. I am not blocked on this, because I've worked around it by flattening the array and doing `x * width + y`. But this is not ideal because it forces a layout upon XLA. Also this is something that nobody other than an XLA expert is going to notice and work around in their code.
Just adding more info to this: this behavior is because of how we implemented NumPy indexing in [`jax.numpy._rewriting_take`](https://github.com/google/jax/blob/8e794ad98806d61fdfed4bf243d81c8545f741cc/jax/numpy/lax_numpy.py#L2167), in that we basically handle one index at a time just for simplicity's sake (and because that code predates the Scatter and Gather HLOs!). We have slightly "better" logic in [other places](https://github.com/google/jax/blob/8e794ad98806d61fdfed4bf243d81c8545f741cc/jax/ops/scatter.py#L42) in that it handles NumPy indexing and produces a single Gather/Scatter, but [it's not as complete yet](https://github.com/google/jax/blob/8e794ad98806d61fdfed4bf243d81c8545f741cc/jax/ops/scatter.py#L98). I think we always hoped that XLA would fuse multiple Gathers / slices together under a `jit`. How tricky is that? > I think we always hoped that XLA would fuse multiple Gathers / slices together under a jit. How tricky is that? It's certainly possible, but not trivial. If it was simply gather-of-gather that would be one thing (still nontrivial), but I'm observing it's gather-of-reshape-of-gather, and now this is "interesting". I generally err on implementing these kinds of optimizations in the compiler even if it's somewhat complicated, because that's what it's there for. But OTOH I don't know how hard it would be to change this on your end.
2019-07-16T18:26:59
google/jax
1,030
google__jax-1030
[ "1024" ]
7c4cf2d942dd3db9cdc9917088818a15874c42b2
diff --git a/jax/lax/lax_fft.py b/jax/lax/lax_fft.py --- a/jax/lax/lax_fft.py +++ b/jax/lax/lax_fft.py @@ -20,11 +20,15 @@ from jax.core import Primitive from jax.interpreters import xla from ..interpreters import ad +from ..interpreters import batching def fft(x, fft_type, fft_lengths=None): if fft_lengths is None: fft_lengths = x.shape + elif len(fft_lengths) == 0: + # XLA FFT doesn't support 0-rank. + return x else: fft_lengths = tuple(fft_lengths) return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths) @@ -41,8 +45,15 @@ def fft_translation_rule(c, x, fft_type, fft_lengths): def fft_transpose_rule(t, fft_type, fft_lengths): return fft(t, fft_type, fft_lengths), +def fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths): + x, = batched_args + bd, = batch_dims + x = batching.bdim_at_front(x, bd) + return fft(x, fft_type, fft_lengths), 0 + fft_p = Primitive('fft') fft_p.def_impl(fft_impl) fft_p.def_abstract_eval(fft_abstract_eval) xla.translations[fft_p] = fft_translation_rule ad.deflinear(fft_p, fft_transpose_rule) +batching.primitive_batchers[fft_p] = fft_batching_rule
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2645,6 +2645,21 @@ def fun(operand): for bdims in all_bdims(shape): self._CheckBatching(fun, 3, bdims, (shape,), dtype, rng) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_bdims={}_fft_ndims={}" + .format(shape, bdims, fft_ndims), + "shape": shape, "bdims": bdims, "fft_ndims": fft_ndims, "rng": rng} + for shape in [(5,), (3, 4, 5), (2, 3, 4, 5)] + for bdims in all_bdims(shape) + for fft_ndims in range(0, min(3, len(shape)) + 1) + for rng in [jtu.rand_default()])) + def testFft(self, fft_ndims, shape, bdims, rng): + ndims = len(shape) + axes = range(ndims - fft_ndims, ndims) + fft_lengths = [shape[axis] for axis in axes] + op = lambda x: lax.fft(x, xla_bridge.xla_client.FftType.FFT, fft_lengths) + self._CheckBatching(op, 5, bdims, [shape], onp.complex64, rng) + # TODO Concatenate # TODO Reverse # TODO DynamicSlice
FFT Hessian broken Been writing some FFT code lately so hitting some bumps in the road--it looks like Hessians are broken for FFTs, for now. It's blocking my research project, so I'd be happy to work on this, with some guidance for what to add (not sure if it's adding a batching rule or something tricker related to #1021). But, it would probably take less time for someone already familiar with the codebase. Reproducing example: ```python import jax import jax.numpy as np fft_hess = jax.hessian(np.fft.fftn) fft_hess(np.array([1.0, 2.0])) # Errors ``` This produces the following stack trace: ``` --------------------------------------------------------------------------- KeyError Traceback (most recent call last) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/batching.py in get_primitive_batcher(p) 231 try: --> 232 return primitive_batchers[p] 233 except KeyError: KeyError: fft During handling of the above exception, another exception occurred: NotImplementedError Traceback (most recent call last) in ----> 1 fft_hess(np.array([1.0, 2.0])) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/api.py in jacfun(*args, **kwargs) 342 holomorphic or tree_map(_check_real_input_jacfwd, dyn_args) 343 pushfwd = partial(jvp, f_partial, dyn_args) --> 344 y, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args)) 345 example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args 346 return tree_map(partial(_unravel_array_into_pytree, example_args, -1), jac) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/api.py in batched_fun(*args, **kwargs) 491 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) 492 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees) --> 493 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes) 494 return build_tree(out_tree(), out_flat) 495 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/batching.py in batch(fun, in_vals, in_dims, out_dim_dst) 43 elif len(sizes) == 1: 44 sz = sizes.pop() ---> 45 return batch_transform(fun, sz, in_dims, out_dim_dst).call_wrapped(in_vals) 46 else: 47 raise TypeError("got inconsistent map dimension sizes: {}".format(sizes)) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 145 146 del gen --> 147 ans = self.f(*args, **dict(self.params, **kwargs)) 148 del args 149 while stack: ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/api.py in jvp(fun, primals, tangents) 761 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents)) 762 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) --> 763 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat) 764 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent)) 765 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 145 146 del gen --> 147 ans = self.f(*args, **dict(self.params, **kwargs)) 148 del args 149 while stack: ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/api.py in jacfun(*args, **kwargs) 383 f = lu.wrap_init(fun, kwargs) 384 f_partial, dyn_args = _argnums_partial(f, argnums, args) --> 385 y, pullback = vjp(f_partial, *dyn_args) 386 holomorphic or tree_map(_check_real_output_jacrev, y) 387 jac = vmap(pullback)(_std_basis(y)) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/api.py in vjp(fun, *primals, **kwargs) 896 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) 897 if not has_aux: --> 898 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat) 899 else: 900 out_primal, out_vjp, aux = ad.vjp(jaxtree_fun, primals_flat, has_aux=True) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/ad.py in vjp(traceable, primals, has_aux) 103 def vjp(traceable, primals, has_aux=False): 104 if not has_aux: --> 105 out_primal, pval, jaxpr, consts = linearize(traceable, *primals) 106 else: 107 out_primal, pval, jaxpr, consts, aux = linearize(traceable, *primals, has_aux=True) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/ad.py in linearize(traceable, *primals, **kwargs) 92 in_pvals = (pe.PartialVal((None, pack(primals))), 93 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit))) ---> 94 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals) 95 pval_primal, pval_tangent = unpair_pval(out_pval) 96 aval_primal, const_primal = pval_primal ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, **kwargs) 399 with new_master(JaxprTrace) as master: 400 fun = trace_to_subjaxpr(fun, master, instantiate) --> 401 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals) 402 assert not env 403 del master ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 145 146 del gen --> 147 ans = self.f(*args, **dict(self.params, **kwargs)) 148 del args 149 while stack: ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/numpy/fft.py in fftn(a, s, axes, norm) 68 s = [a.shape[axis] for axis in axes] 69 a = _promote_to_complex(a) ---> 70 return lax.fft(a, xla_client.FftType.FFT, s) 71 72 for func in get_module_functions(onp.fft): ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/lax/lax_fft.py in fft(x, fft_type, fft_lengths) 28 else: 29 fft_lengths = tuple(fft_lengths) ---> 30 return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths) 31 32 def fft_impl(x, fft_type, fft_lengths): ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/core.py in bind(self, *args, **kwargs) 145 146 tracers = map(top_trace.full_raise, args) --> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 148 return full_lower(out_tracer) 149 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params) 250 "Forward-mode differentiation rule for '{}' not implemented" 251 .format(primitive)) --> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params) 253 return JVPTracer(self, primal_out, tangent_out) 254 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/ad.py in linear_jvp(primitive, primals, tangents, **params) 356 357 def linear_jvp(primitive, primals, tangents, **params): --> 358 val_out = primitive.bind(*primals, **params) 359 if all(tangent is zero for tangent in tangents): 360 return val_out, zero ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/core.py in bind(self, *args, **kwargs) 145 146 tracers = map(top_trace.full_raise, args) --> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 148 return full_lower(out_tracer) 149 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params) 250 "Forward-mode differentiation rule for '{}' not implemented" 251 .format(primitive)) --> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params) 253 return JVPTracer(self, primal_out, tangent_out) 254 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/ad.py in linear_jvp(primitive, primals, tangents, **params) 361 else: 362 tangents = map(instantiate_zeros, primals, tangents) --> 363 return val_out, primitive.bind(*tangents, **params) 364 365 def linear_transpose(transpose_rule, cotangent, *args, **kwargs): ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/core.py in bind(self, *args, **kwargs) 145 146 tracers = map(top_trace.full_raise, args) --> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 148 return full_lower(out_tracer) 149 ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params) 121 else: 122 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here --> 123 batched_primitive = get_primitive_batcher(primitive) 124 val_out, dim_out = batched_primitive(vals_in, dims_in, **params) 125 return BatchTracer(self, val_out, dim_out) ~/.pyenv/versions/graf/lib/python3.7/site-packages/jax/interpreters/batching.py in get_primitive_batcher(p) 233 except KeyError: 234 raise NotImplementedError( --> 235 "Batching rule for '{}' not implemented".format(p)) 236 237 def defvectorized(prim): NotImplementedError: Batching rule for 'fft' not implemented ```
Try adding this to `lax_fft.py` (lightly tested): ``` def fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths): x, = batched_args bd, = batch_dims x = batching.bdim_at_front(x, bd) return fft(x, fft_type, fft_lengths), 0 batching.primitive_batchers[fft_p] = fft_batching_rule ``` This seems to work--many thanks for the quick response! I'll comment if any issues arise. (feel free to close--unless you want to leave the issue open for tracking) further edit: The JIT-ed code is ridiculously fast, put a huge smile on my face. Thanks for making a tool that makes users happy! :) Great! One caution about microbenchmarking JAX: make sure you read this first so you are timing the right things: https://jax.readthedocs.io/en/latest/async_dispatch.html
2019-07-18T19:11:11
google/jax
1,057
google__jax-1057
[ "967" ]
97a5148a0d2b5dad6066a89410e75e39dbfc9856
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -205,7 +205,8 @@ def add_batch_dim_to_aval(bdim, size, aval): return ShapedArray(aval.shape, aval.dtype) else: assert 0 <= bdim <= aval.ndim - batched_shape = tuple(onp.insert(aval.shape, bdim, size)) + batched_shape = tuple( + onp.insert(onp.asarray(aval.shape, onp.intp), bdim, size)) return ShapedArray(batched_shape, aval.dtype) else: raise TypeError(t) diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -619,6 +619,7 @@ def __array__(self, dtype=None, context=None): __complex__ = partialmethod(_forward_to_value, complex) __hex__ = partialmethod(_forward_to_value, hex) __oct__ = partialmethod(_forward_to_value, oct) + __index__ = partialmethod(_forward_to_value, op.index) # pickle saves and loads just like an ndarray __reduce__ = partialmethod(_forward_to_value, op.methodcaller("__reduce__")) diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -72,6 +72,22 @@ def broadcast_shapes(*shapes): .format(tuple(map(tuple, shapes)))) return tuple(result_shape) +def _canonicalize_shape(shape): + """Canonicalizes and checks for errors in a user-provided shape value. + + Args: + shape: a Python value that represents a shape. + + Returns: + A tuple of integers. + """ + try: + return tuple(map(operator.index, shape)) + except TypeError: + pass + msg = ("Shapes must be 1D sequences of concrete values of integer type, " + "got {}") + raise TypeError(msg.format(shape)) def _identity(x): return x @@ -576,7 +592,7 @@ def reshape(operand, new_sizes, dimensions=None): <https://www.tensorflow.org/xla/operation_semantics#reshape>`_ operator. """ - new_sizes = tuple(new_sizes) + new_sizes = _canonicalize_shape(new_sizes) same_shape = onp.shape(operand) == new_sizes same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand))) if onp.shape(operand) and same_shape and same_dims: @@ -666,7 +682,7 @@ def gather(operand, start_indices, dimension_numbers, slice_sizes): """ return gather_p.bind( operand, start_indices, dimension_numbers=dimension_numbers, - slice_sizes=tuple(slice_sizes), operand_shape=operand.shape) + slice_sizes=_canonicalize_shape(slice_sizes), operand_shape=operand.shape) def scatter_add(operand, scatter_indices, updates, dimension_numbers): """Scatter-add operator. @@ -980,7 +996,7 @@ def full(shape, fill_value, dtype=None): will be cast to `dtype`. """ try: - shape = tuple(map(int, shape)) + shape = _canonicalize_shape(shape) except TypeError: msg = ("`full` requires shapes to be concrete. If using `jit`, try using " "`static_argnums` or applying `jit` to smaller subfunctions instead.") @@ -1015,7 +1031,7 @@ def broadcasted_iota(dtype, shape, dimension): operator. """ dtype = xla_bridge.canonicalize_dtype(dtype) - shape = tuple(map(int, shape)) + shape = _canonicalize_shape(shape) dimension = int(dimension) return _IotaConstant(dtype, shape, dimension) @@ -1026,7 +1042,7 @@ def broadcasted_eye(dtype, shape, axes): if not isinstance(axes, (list, tuple)) or not len(axes) >= 2: raise TypeError("make_diagonal `axes` must be a tuple with len at least 2.") dtype = xla_bridge.canonicalize_dtype(dtype) - shape = tuple(map(int, shape)) + shape = _canonicalize_shape(shape) axes = tuple(map(int, axes)) return _EyeConstant(shape, axes, dtype) @@ -1212,7 +1228,7 @@ def full_like(x, fill_value, dtype=None, shape=None): An ndarray with the same shape as `x` with its entries set equal to `fill_value`, similar to the output of np.full. """ - shape = onp.shape(x) if shape is None else shape + shape = onp.shape(x) if shape is None else _canonicalize_shape(shape) out = full(shape, fill_value, dtype or _dtype(x)) return tie_in(x, out)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1758,6 +1758,8 @@ def testCov(self, shape, dtype, rowvar, ddof, bias, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + def testIssue967(self): + self.assertRaises(TypeError, lambda: lnp.zeros(1.5)) if __name__ == "__main__": absltest.main() diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2240,6 +2240,17 @@ def f2(x, y): expected = onp.array(0.0) self.assertAllClose(ans, expected, check_dtypes=False) + def testReshapeWithUnusualShapes(self): + ans = lax.reshape(onp.ones((3,), onp.float32), (lax.add(1, 2), 1)) + self.assertAllClose(ans, onp.ones((3, 1), onp.float32), check_dtypes=True) + + jtu.check_raises_regexp( + lambda: lax.reshape(onp.ones(3,), (onp.array([3, 1]),)), TypeError, + "Shapes must be 1D sequences of concrete values of integer type.*") + + jtu.check_raises_regexp( + lambda: lax.reshape(onp.ones(3,), (1.5, 2.0)), TypeError, + "Shapes must be 1D sequences of concrete values of integer type.*") def all_bdims(*shapes): bdims = (itertools.chain([None], range(len(shape) + 1)) for shape in shapes)
Don't allow float indices and shapes Numpy is throwing an error when using a float index or or shape, e.g. `np.zeros(0.5)` . Jax.numpy on the other hand silently converts to integer: ``` > jnp.zeros(1.5) _FilledConstant([ 0.], dtype=float32) ``` Jax.numpy should probably enforce the same type strictness here to prevent silent bugs.
2019-07-23T20:22:05
google/jax
1,058
google__jax-1058
[ "1027" ]
4c4ab3acb042abb4cc9352acec8c0511bc479c24
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -68,6 +68,10 @@ "Disable JIT compilation and just call original Python.") +def _check_callable(fun): + if not callable(fun): + raise TypeError("Expected a callable value, got {}".format(fun)) + def jit(fun, static_argnums=()): """Sets up `fun` for just-in-time compilation with XLA. @@ -106,6 +110,7 @@ def jit(fun, static_argnums=()): return _jit(fun, static_argnums) def _jit(fun, static_argnums, device_values=True): + _check_callable(fun) if isinstance(static_argnums, int): static_argnums = (static_argnums,) @@ -248,6 +253,7 @@ def xla_computation(fun, static_argnums=(), axis_env=None): ROOT tuple.18 = (f32[], f32[], f32[]) tuple(all-reduce.7, all-reduce.12, all-reduce.17) } """ + _check_callable(fun) def pv_like(x): aval = xla.abstractify(x) @@ -362,6 +368,8 @@ def value_and_grad(fun, argnums=0, has_aux=False, holomorphic=False): "of {fun} and the second element is the gradient, which has the " "same shape as the arguments at positions {argnums}.") + _check_callable(fun) + @wraps(fun, docstr=docstr, argnums=argnums) def value_and_grad_f(*args, **kwargs): f = lu.wrap_init(fun, kwargs) @@ -568,6 +576,8 @@ def vmap(fun, in_axes=0, out_axes=0): docstr = ("Vectorized version of {fun}. Takes similar arguments as {fun} " "but with additional array axes over which {fun} is mapped.") + _check_callable(fun) + if (not isinstance(in_axes, (list, tuple, type(None), int)) or not isinstance(out_axes, (list, tuple, type(None), int))): msg = ("vmap arguments in_axes and out_axes must each be an integer, None, " @@ -674,6 +684,7 @@ def pmap(fun, axis_name=None): >>> print(doubly_normed.sum((0, 1))) 1.0 """ + _check_callable(fun) axis_name = _TempAxisName() if axis_name is None else axis_name @wraps(fun) @@ -723,6 +734,7 @@ def _aval_axis_size(aval): def soft_pmap(fun, axis_name=None): + _check_callable(fun) axis_name = _TempAxisName() if axis_name is None else axis_name @wraps(fun) @@ -1066,6 +1078,8 @@ def make_jaxpr(fun): (l) = id k in l } """ + _check_callable(fun) + def pv_like(x): aval = xla.abstractify(x) return pe.PartialVal((aval, core.unit))
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -895,6 +895,9 @@ def f(x): xla_comp = api.xla_computation(f) xla_comp(np.arange(8)).GetHloText() # doesn't crash + def test_jit_of_noncallable(self): + jtu.check_raises_regexp(lambda: api.jit(3), TypeError, + "Expected a callable value.*") if __name__ == '__main__': absltest.main()
`jax.jit` should report an error immediately for uncallable arguments `jax.jit(3)` silently succeeds (although `jax.jit(3)(4)` returns the expected error). The `callable` builtin seems applicable here.
2019-07-23T21:04:14
google/jax
1,063
google__jax-1063
[ "1062" ]
27b46e66154d155f1b4804629a4d747a5c538da7
diff --git a/jax/interpreters/pxla.py b/jax/interpreters/pxla.py --- a/jax/interpreters/pxla.py +++ b/jax/interpreters/pxla.py @@ -399,6 +399,8 @@ def tuple_element_handler(axis_size, aval): xla.canonicalize_dtype_handlers[ShardedDeviceTuple] = \ xla.canonicalize_dtype_handlers[xla.DeviceTuple] +xb.register_constant_handler(ShardedDeviceTuple, xla._device_tuple_constant_handler) + class ShardedDeviceArray(ShardedDeviceValue, xla.DeviceArray): """A ShardedDeviceArray is an ndarray sharded across devices.
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -17,6 +17,7 @@ from __future__ import print_function import collections +from functools import partial from absl.testing import absltest import numpy as onp @@ -29,6 +30,7 @@ from jax.interpreters import ad from jax.interpreters.xla import DeviceArray, DeviceTuple from jax.abstract_arrays import concretization_err_msg +from jax.lib import xla_bridge as xb from jax import test_util as jtu from jax.config import config @@ -899,5 +901,26 @@ def test_jit_of_noncallable(self): jtu.check_raises_regexp(lambda: api.jit(3), TypeError, "Expected a callable value.*") + def test_issue_1062(self): + # code from https://github.com/google/jax/issues/1062 @shoyer + # this tests, among other things, whether ShardedDeviceTuple constants work + device_count = xb.device_count() + + @jit + def multi_step(state, count): + return lax.fori_loop(0, count, lambda i, s: s, state) + + @jit + def multi_step_pmap(state, count=2): + @partial(api.pmap, axis_name='x') + def pmapped_multi_step(state): + return multi_step(state, count) + + return pmapped_multi_step(state) + + u = np.ones((device_count, 100)) + u_final = multi_step_pmap(u) # doesn't crash + + if __name__ == '__main__': absltest.main()
jit inside pmap inside jit error Commenting out *either* of the `@jax.jit` decorators works, as does explicitly passing the `count` argument (e.g., `multi_step_pmap(u, 2)` ```python import jax.numpy as np import jax from jax import lax from functools import partial from jax.lib import xla_bridge device_count = xla_bridge.device_count() @jax.jit def multi_step(state, count): return lax.fori_loop(0, count, lambda i, s: s, state) @jax.jit def multi_step_pmap(state, count=2): @partial(jax.pmap, axis_name='x') def pmapped_multi_step(state): return multi_step(state, count) return pmapped_multi_step(state) u = np.ones((1, 100)) u_final = multi_step_pmap(u) ``` Here's the stacktrace: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-29-4f6d06752943> in <module>() 20 21 u = np.ones((1, 100)) ---> 22 u_final = multi_step_pmap(u) 9 frames /usr/local/lib/python3.6/dist-packages/jax/api.py in f_jitted(*args, **kwargs) 129 _check_args(args_flat) 130 flat_fun, out_tree = flatten_fun_leafout(f, in_tree) --> 131 out = xla.xla_call(flat_fun, *args_flat, device_values=device_values) 132 return out if out_tree() is leaf else tree_unflatten(out_tree(), out) 133 /usr/local/lib/python3.6/dist-packages/jax/core.py in call_bind(primitive, f, *args, **params) 661 if top_trace is None: 662 with new_sublevel(): --> 663 ans = primitive.impl(f, *args, **params) 664 else: 665 tracers = map(top_trace.full_raise, args) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params) 660 def _xla_call_impl(fun, *args, **params): 661 device_values = FLAGS.jax_device_values and params.pop('device_values') --> 662 compiled_fun = _xla_callable(fun, device_values, *map(abstractify, args)) 663 try: 664 return compiled_fun(*args) /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in memoized_fun(f, *args) 203 204 def memoized_fun(f, *args): --> 205 ans, f_prev = memoized_fun_body(f, args) 206 if id(f_prev) != id(f): 207 f.populate_stores(f_prev) /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in memoized_fun_body(f, args) 200 @fastcache.clru_cache(maxsize=max_size) 201 def memoized_fun_body(f, args): --> 202 return call(f, *args), f 203 204 def memoized_fun(f, *args): /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _xla_callable(fun, device_values, *abstract_args) 675 assert not env # no subtraces here (though cond might eventually need them) 676 axis_env = AxisEnv(jaxpr_replicas(jaxpr), [], []) --> 677 compiled, result_shape = _compile_jaxpr(jaxpr, axis_env, consts, *abstract_args) 678 del master, consts, jaxpr, env 679 if device_values: /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _compile_jaxpr(jaxpr, axis_env, const_vals, *abstract_args) 218 raise ValueErrr(msg.format(axis_env.nreps, xb.device_count())) 219 arg_shapes = list(map(xla_shape, abstract_args)) --> 220 built_c = _jaxpr_computation(jaxpr, axis_env, const_vals, (), *arg_shapes) 221 result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape()) 222 return built_c.Compile(arg_shapes, xb.get_compile_options(axis_env.nreps), /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes) 265 if isinstance(val, DeviceArray): 266 val.copy_to_host_async() --> 267 _map(write, jaxpr.constvars, map(c.Constant, const_vals)) 268 _map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes)) 269 else: /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _map(f, *xs) 307 308 def _map(f, *xs): --> 309 return tuple(map(f, *xs)) 310 311 def xla_destructure(c, ans): /usr/local/lib/python3.6/dist-packages/jax/lib/xla_bridge.py in Constant(self, py_val, canonicalize_types) 273 return _constant_handlers[py_type](self, py_val, canonicalize_types) 274 else: --> 275 raise TypeError("No constant handler for type: {}".format(py_type)) 276 277 # TODO(mattjj): remove when CrossReplicaSum is added to XLA:CPU TypeError: No constant handler for type: <class 'jax.interpreters.pxla.ShardedDeviceTuple'> ```
2019-07-24T19:32:52
google/jax
1,069
google__jax-1069
[ "1065" ]
8ac6a101bf72da7662a2128aefffd285aaa332b4
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -348,12 +348,19 @@ def _axis_groups(nrep, mesh_spec, mesh_axes): return tuple(map(tuple, groups.T)) def jaxpr_replicas(jaxpr): - nums = (eqn_replicas(eqn) for eqn in jaxpr.eqns if eqn.bound_subjaxprs) - return max(it.chain([1], nums)) # max(itr, default=1) + return max(it.chain([1], (eqn_replicas(eqn) for eqn in jaxpr.eqns))) def eqn_replicas(eqn): - (subjaxpr, _, _), = eqn.bound_subjaxprs - return eqn.params.get('axis_size', 1) * jaxpr_replicas(subjaxpr) + if eqn.bound_subjaxprs: + (subjaxpr, _, _), = eqn.bound_subjaxprs + return eqn.params.get('axis_size', 1) * jaxpr_replicas(subjaxpr) + elif eqn.primitive in initial_style_translations: + nums = (jaxpr_replicas(param if type(param) is core.Jaxpr else param.jaxpr) + for param in eqn.params.values() + if type(param) in (core.Jaxpr, core.TypedJaxpr)) + return max(it.chain([1], nums)) + else: + return 1 def lower_fun(fun, instantiate=False, initial_style=False):
diff --git a/tests/pmap_test.py b/tests/pmap_test.py --- a/tests/pmap_test.py +++ b/tests/pmap_test.py @@ -659,6 +659,24 @@ def g(z): f(onp.arange(1.).reshape((1, 1))) # doesn't crash + def testIssue1065(self): + # from https://github.com/google/jax/issues/1065 + device_count = xla_bridge.device_count() + + def multi_step_pmap(state, count): + @partial(pmap, axis_name='x') + @jit + def exchange_and_multi_step(state): + return state + + @jit + def time_evolution(state): + return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state) + + return time_evolution(state) + + multi_step_pmap(np.zeros((device_count,)), count=1) + if __name__ == '__main__': absltest.main()
Nested jit/pmap bug that only appears with multiple devices To reproduce: ```python import jax import jax.numpy as np from jax import lax from functools import partial from jax.lib import xla_bridge device_count = xla_bridge.device_count() # patch https://github.com/google/jax/pull/1063 from jax.interpreters.pxla import ShardedDeviceTuple xla_bridge.register_constant_handler(ShardedDeviceTuple, jax.xla._device_tuple_constant_handler) def multi_step_pmap(state, count): @partial(jax.pmap, axis_name='x') @jax.jit def exchange_and_multi_step(state): return state @jax.jit def time_evolution(state): return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state) return time_evolution(state) multi_step_pmap(np.zeros((device_count,)), count=1) ``` Here's the traceback: ``` --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-4-9a3737aed707> in <module>() ----> 1 multi_step_pmap(np.zeros((8,)), count=1) 13 frames <ipython-input-3-8106aee94f86> in multi_step_pmap(state, count) 23 return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state) 24 ---> 25 return time_evolution(state) google3/third_party/py/jax/api.py in f_jitted(*args, **kwargs) 129 _check_args(args_flat) 130 flat_fun, out_tree = flatten_fun_leafout(f, in_tree) --> 131 out = xla.xla_call(flat_fun, *args_flat, device_values=device_values) 132 return out if out_tree() is leaf else tree_unflatten(out_tree(), out) 133 google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 661 if top_trace is None: 662 with new_sublevel(): --> 663 ans = primitive.impl(f, *args, **params) 664 else: 665 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params) 672 def _xla_call_impl(fun, *args, **params): 673 device_values = FLAGS.jax_device_values and params.pop('device_values') --> 674 compiled_fun = _xla_callable(fun, device_values, *map(abstractify, args)) 675 try: 676 return compiled_fun(*args) google3/third_party/py/jax/linear_util.py in memoized_fun(f, *args) 203 204 def memoized_fun(f, *args): --> 205 ans, f_prev = memoized_fun_body(f, args) 206 if id(f_prev) != id(f): 207 f.populate_stores(f_prev) google3/third_party/py/jax/linear_util.py in memoized_fun_body(f, args) 200 @fastcache.clru_cache(maxsize=max_size) 201 def memoized_fun_body(f, args): --> 202 return call(f, *args), f 203 204 def memoized_fun(f, *args): google3/third_party/py/jax/interpreters/xla.py in _xla_callable(fun, device_values, *abstract_args) 687 assert not env # no subtraces here (though cond might eventually need them) 688 axis_env = AxisEnv(jaxpr_replicas(jaxpr), [], []) --> 689 compiled, result_shape = _compile_jaxpr(jaxpr, axis_env, consts, *abstract_args) 690 del master, consts, jaxpr, env 691 if device_values: google3/third_party/py/jax/interpreters/xla.py in _compile_jaxpr(jaxpr, axis_env, const_vals, *abstract_args) 223 raise ValueErrr(msg.format(axis_env.nreps, xb.device_count())) 224 arg_shapes = list(map(xla_shape, abstract_args)) --> 225 built_c = _jaxpr_computation(jaxpr, axis_env, const_vals, (), *arg_shapes) 226 result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape()) 227 return built_c.Compile(arg_shapes, xb.get_compile_options(axis_env.nreps), google3/third_party/py/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes) 291 elif eqn.primitive in initial_style_translations: 292 rule = initial_style_translations[eqn.primitive] --> 293 ans = rule(c, axis_env, *in_nodes, **eqn.params) 294 elif eqn.primitive in parallel_translations: 295 replica_groups = axis_groups(axis_env, eqn.params['axis_name']) google3/third_party/py/jax/lax/lax_control_flow.py in _while_loop_translation_rule(c, axis_env, init_val, cond_consts, body_consts, aval_out, cond_jaxpr, body_jaxpr) 202 203 cond_c = xla._jaxpr_computation(cond_jaxpr_converted, axis_env, (), (), shape) --> 204 body_c = xla._jaxpr_computation(body_jaxpr_converted, axis_env, (), (), shape) 205 full_ans = c.While(cond_c, body_c, loop_carry) 206 return c.GetTupleElement(full_ans, 0) google3/third_party/py/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes) 301 env_nodes = list(map(read, const_bindings + freevar_bindings)) 302 rule = call_translations[eqn.primitive] --> 303 ans = rule(c, subjaxpr, axis_env, env_nodes, in_nodes, **eqn.params) 304 else: 305 msg = "XLA translation rule for primitive '{}' not found" google3/third_party/py/jax/interpreters/pxla.py in _xla_pmap_translation_rule(c, jaxpr, axis_env, env_nodes, in_nodes, axis_name, axis_size) 591 *map(c.GetShape, in_nodes_sharded)) 592 sharded_result = c.Call(subc, env_nodes + in_nodes_sharded) --> 593 return xla_unshard(c, xla.axis_groups(new_env, axis_name), sharded_result) 594 xla.call_translations[xla_pmap_p] = _xla_pmap_translation_rule 595 ad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p) google3/third_party/py/jax/interpreters/xla.py in axis_groups(axis_env, name) 338 else: 339 mesh_axes = (axis_read(axis_env, name),) --> 340 return _axis_groups(axis_env.nreps, axis_env.sizes, mesh_axes) 341 342 def _axis_groups(nrep, mesh_spec, mesh_axes): google3/third_party/py/jax/interpreters/xla.py in _axis_groups(nrep, mesh_spec, mesh_axes) 342 def _axis_groups(nrep, mesh_spec, mesh_axes): 343 trailing_size, ragged = divmod(nrep, prod(mesh_spec)) --> 344 assert not ragged 345 full_spec = list(mesh_spec) + [trailing_size] 346 iota = onp.arange(prod(full_spec)).reshape(full_spec) AssertionError: ``` I did a little debugging -- the `axis_env` object here looks like `AxisEnv(nreps=1, names=['x'], sizes=[8])` If I remove the outer `jit`, the error goes away.
2019-07-26T01:22:30
google/jax
1,096
google__jax-1096
[ "964" ]
24d4aaf3e20d58dd66a3df8ffe0d0ea9e976c307
diff --git a/build/build.py b/build/build.py --- a/build/build.py +++ b/build/build.py @@ -187,6 +187,9 @@ def check_bazel_version(bazel_path, min_version, max_version): build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true + +build --spawn_strategy=standalone +build --strategy=Genrule=standalone """
jaxlib build w/ cuda: File not found during compilation I'm compiling `jaxlib` with CUDA 10.0 on Ubuntu 18.04. The build fails with the following error: ``` $ python3 build/build.py --enable_cuda --cuda_path /usr/local/cuda-10.0/ --cudnn_path /usr/local/cuda-10.0/ --enable_march_native [...] ERROR: /home/clem/.cache/bazel/_bazel_clem/ffaac3f7c6ad1cb26f04f1933452eef6/external/nccl_archive/BUILD.bazel:53:1: error while parsing .d file: /h ome/clem/.cache/bazel/_bazel_clem/ffaac3f7c6ad1cb26f04f1933452eef6/execroot/__main__/bazel-out/k8-opt/bin/external/nccl_archive/_objs/device_lib/pr od_i32_reduce_scatter.cu.d (No such file or directory) nvcc fatal : Could not open input file /tmp/tmpxft_00000004_00000000-6_prod_i32_reduce_scatter.cu.compute_35.cpp1.ii Target //build:install_xla_in_source_tree failed to build INFO: Elapsed time: 278.116s, Critical Path: 69.60s INFO: 1281 processes: 1281 linux-sandbox. FAILED: Build did NOT complete successfully FAILED: Build did NOT complete successfully Traceback (most recent call last): File "build/build.py", line 331, in <module> main() File "build/build.py", line 326, in main [":install_xla_in_source_tree", os.getcwd()]) File "build/build.py", line 50, in shell output = subprocess.check_output(cmd) File "/usr/lib/python3.6/subprocess.py", line 356, in check_output **kwargs).stdout File "/usr/lib/python3.6/subprocess.py", line 438, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['./bazel-0.24.1-linux-x86_64', 'run', '--verbose_failures=true', '--config=opt', '--config=mkl_open_source _only', '--config=cuda', ':install_xla_in_source_tree', '/home/clem/git/jax/build']' returned non-zero exit status 1. ``` Above this error message are only compiler warnings but no errors which could lead to some file not being created. Am I missing something? Or might there be a file name bug? Thanks a lot for your help! --- I'm on a fresh Ubuntu 18.04.2 install with CUDA 10.0, cudnn and driver version 410.48. [Full log](http://paste.ubuntu.com/p/tvXBHbr5gw/)
I saw this too. It seems to be nondeterministic and related to nvcc, but I didn't have time to track down the problem. Try running the build again, and it should make more progress. Thanks for the advice. I had to restart the compilation ~10 times and finally it finished. However, after installing `jaxlib` and `jax` the xla backend does not find my GPU and falls back to CPU. Could this be related? No, I think the two are unrelated. One is a build problem, the other is a run time problem. Are you sure it's using the right `jaxlib` (i.e., the one you just built?) You can install it locally with `pip install -e jax/build`. (You might also try a prebuilt `jaxlib` wheel; there are links to CUDA 10 wheels on the JAX github README.md). Thank you again, I'll look into it. I have tried the pre-build `jaxlib` wheels without success¹. The readme states the GPU support of those is experimental, thats why I tried building myself. ¹ _without my GPU being detected that is_ I've had this bug for a few months too, but never got around to reporting it. If I resume compilation, it usually progresses until another failure on a file in that directory. (But as reported above, it does eventually work.) My guess is that it's a race condition on a lot of requirements in `nccl`. My build machine has 20 threads (bazel uses all of them), and it happens most of the time. I don't have any issues detecting my GPU with CUDA 10.0 and cuDNN 7.5 on Ubuntu 18.04. Thank you @kroq-gar78 for the information. I posted a separate issue on that as it apparently does not correlate to the compilation error. Edit: It indeed does not, see #993.
2019-08-02T01:45:09
google/jax
1,099
google__jax-1099
[ "825" ]
7c060435bb0abc91b5ca9a1a69e08560ce8e1ae8
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -16,6 +16,7 @@ from __future__ import division from __future__ import print_function +from functools import partial import numpy as onp from jax.numpy import lax_numpy as np @@ -34,6 +35,7 @@ from jax.lax import (standard_primitive, standard_unop, binop_dtype_rule, _float, _complex, _input_dtype, _broadcasting_select) from jax.lib import lapack +from jax.lib import cusolver # traceables @@ -81,6 +83,11 @@ def _T(x): return np.swapaxes(x, -1, -2) def _H(x): return np.conj(_T(x)) def symmetrize(x): return (x + _H(x)) / 2 +def _unpack_tuple(f, n): + def g(c, *args, **kwargs): + t = f(c, *args, **kwargs) + return (c.GetTupleElement(t, i) for i in range(n)) + return g # primitives @@ -123,13 +130,18 @@ def _nan_like(c, operand): nan = c.Constant(onp.array(onp.nan, dtype=dtype)) return c.Broadcast(nan, shape.dimensions()) +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if hasattr(lapack, "potrf"): + _cpu_potrf = lapack.potrf +else: + _cpu_potrf = _unpack_tuple(lapack.jax_potrf, 2) + def cholesky_cpu_translation_rule(c, operand): shape = c.GetShape(operand) dtype = shape.element_type().type if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types: - potrf_output = lapack.jax_potrf(c, operand, lower=True) - result = c.GetTupleElement(potrf_output, 0) - info = c.GetTupleElement(potrf_output, 1) + result, info = _cpu_potrf(c, operand, lower=True) return c.Select(c.Eq(info, c.ConstantS32Scalar(0)), result, _nan_like(c, result)) else: @@ -163,15 +175,18 @@ def eig_abstract_eval(operand): w = vl = vr = operand return core.AbstractTuple((w, vl, vr)) +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if hasattr(lapack, "geev"): + _cpu_geev = lapack.geev +else: + _cpu_geev = _unpack_tuple(lapack.jax_geev, 4) def eig_cpu_translation_rule(c, operand): shape = c.GetShape(operand) batch_dims = shape.dimensions()[:-2] - geev_out = lapack.jax_geev(c, operand) - w = c.GetTupleElement(geev_out, 0) - vl = c.GetTupleElement(geev_out, 1) - vr = c.GetTupleElement(geev_out, 2) - ok = c.Eq(c.GetTupleElement(geev_out, 3), c.ConstantS32Scalar(0)) + w, vl, vr, info = _cpu_geev(c, operand) + ok = c.Eq(info, c.ConstantS32Scalar(0)) w = _broadcasting_select(c, c.Reshape(ok, None, batch_dims + (1,)), w, _nan_like(c, w)) vl = _broadcasting_select(c, c.Reshape(ok, None, batch_dims + (1, 1)), vl, @@ -219,13 +234,11 @@ def eigh_abstract_eval(operand, lower): v, w = operand, operand return core.AbstractTuple((v, w)) -def eigh_cpu_translation_rule(c, operand, lower): +def _eigh_cpu_gpu_translation_rule(syevd_impl, c, operand, lower): shape = c.GetShape(operand) batch_dims = shape.dimensions()[:-2] - syevd_out = lapack.jax_syevd(c, operand, lower=lower) - v = c.GetTupleElement(syevd_out, 0) - w = c.GetTupleElement(syevd_out, 1) - ok = c.Eq(c.GetTupleElement(syevd_out, 2), c.ConstantS32Scalar(0)) + v, w, info = syevd_impl(c, operand, lower=lower) + ok = c.Eq(info, c.ConstantS32Scalar(0)) v = _broadcasting_select(c, c.Reshape(ok, None, batch_dims + (1, 1)), v, _nan_like(c, v)) w = _broadcasting_select(c, c.Reshape(ok, None, batch_dims + (1,)), w, @@ -267,7 +280,22 @@ def eigh_batching_rule(batched_args, batch_dims, lower): eigh_p.def_abstract_eval(eigh_abstract_eval) xla.translations[eigh_p] = eigh_translation_rule ad.primitive_jvps[eigh_p] = eigh_jvp_rule -xla.backend_specific_translations['cpu'][eigh_p] = eigh_cpu_translation_rule + +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if hasattr(lapack, "syevd"): + _cpu_syevd = lapack.syevd +else: + _cpu_syevd = _unpack_tuple(lapack.jax_syevd, 3) + +xla.backend_specific_translations['cpu'][eigh_p] = partial( + _eigh_cpu_gpu_translation_rule, _cpu_syevd) + +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if cusolver: + xla.backend_specific_translations['gpu'][eigh_p] = partial( + _eigh_cpu_gpu_translation_rule, cusolver.syevd) batching.primitive_batchers[eigh_p] = eigh_batching_rule @@ -522,14 +550,13 @@ def _lu_batching_rule(batched_args, batch_dims): x = batching.bdim_at_front(x, bd) return lu_p.bind(x), 0 -def _lu_cpu_translation_rule(c, operand): +def _lu_cpu_gpu_translation_rule(getrf_impl, c, operand): shape = c.GetShape(operand) batch_dims = shape.dimensions()[:-2] - getrf_out = lapack.jax_getrf(c, operand) - lu = c.GetTupleElement(getrf_out, 0) + lu, pivot, info = getrf_impl(c, operand) # Subtract 1 from the pivot to get 0-based indices. - pivot = c.Sub(c.GetTupleElement(getrf_out, 1), c.ConstantS32Scalar(1)) - ok = c.Eq(c.GetTupleElement(getrf_out, 2), c.ConstantS32Scalar(0)) + pivot = c.Sub(pivot, c.ConstantS32Scalar(1)) + ok = c.Eq(info, c.ConstantS32Scalar(0)) lu = _broadcasting_select(c, c.Reshape(ok, None, batch_dims + (1, 1)), lu, _nan_like(c, lu)) return c.Tuple(lu, pivot) @@ -541,7 +568,20 @@ def _lu_cpu_translation_rule(c, operand): xla.translations[lu_p] = xla.lower_fun(_lu_python, instantiate=True) ad.primitive_jvps[lu_p] = _lu_jvp_rule batching.primitive_batchers[lu_p] = _lu_batching_rule -xla.backend_specific_translations['cpu'][lu_p] = _lu_cpu_translation_rule + +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if hasattr(lapack, "getrf"): + _cpu_getrf = lapack.getrf +else: + _cpu_getrf = _unpack_tuple(lapack.jax_getrf, 3) + +xla.backend_specific_translations['cpu'][lu_p] = partial( + _lu_cpu_gpu_translation_rule, _cpu_getrf) + +if cusolver: + xla.backend_specific_translations['gpu'][lu_p] = partial( + _lu_cpu_gpu_translation_rule, cusolver.getrf) def lu_pivots_to_permutation(swaps, m): @@ -681,16 +721,13 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv): dV = dV + np.dot(np.eye(n) - np.dot(V, Vt), np.dot(np.conj(dA).T, U)) / s_dim return core.pack((s, U, Vt)), core.pack((ds, dU, dV.T)) -def svd_cpu_translation_rule(c, operand, full_matrices, compute_uv): +def _svd_cpu_gpu_translation_rule(gesvd_impl, c, operand, full_matrices, compute_uv): shape = c.GetShape(operand) dtype = shape.element_type().type if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types: - gesdd_out = lapack.jax_gesdd(c, operand, full_matrices=full_matrices, - compute_uv=compute_uv) - s = c.GetTupleElement(gesdd_out, 0) - u = c.GetTupleElement(gesdd_out, 1) - vt = c.GetTupleElement(gesdd_out, 2) - ok = c.Eq(c.GetTupleElement(gesdd_out, 3), c.ConstantS32Scalar(0)) + s, u, vt, info = gesvd_impl(c, operand, full_matrices=full_matrices, + compute_uv=compute_uv) + ok = c.Eq(info, c.ConstantS32Scalar(0)) s = _broadcasting_select(c, c.Reshape(ok, None, (1,)), s, _nan_like(c, s)) u = _broadcasting_select(c, c.Reshape(ok, None, (1, 1)), u, @@ -711,7 +748,22 @@ def svd_batching_rule(batched_args, batch_dims, full_matrices, compute_uv): svd_p = Primitive('svd') svd_p.def_impl(svd_impl) svd_p.def_abstract_eval(svd_abstract_eval) -xla.translations[svd_p] = svd_translation_rule -xla.backend_specific_translations['cpu'][svd_p] = svd_cpu_translation_rule ad.primitive_jvps[svd_p] = svd_jvp_rule batching.primitive_batchers[svd_p] = svd_batching_rule +xla.translations[svd_p] = svd_translation_rule + +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if hasattr(lapack, "gesdd"): + _cpu_gesdd = lapack.gesdd +else: + _cpu_gesdd = _unpack_tuple(lapack.jax_gesdd, 4) + +xla.backend_specific_translations['cpu'][svd_p] = partial( + _svd_cpu_gpu_translation_rule, _cpu_gesdd) + +# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to +# 0.1.23. +if cusolver: + xla.backend_specific_translations['gpu'][svd_p] = partial( + _svd_cpu_gpu_translation_rule, cusolver.gesvd) diff --git a/jax/lib/__init__.py b/jax/lib/__init__.py --- a/jax/lib/__init__.py +++ b/jax/lib/__init__.py @@ -47,3 +47,10 @@ def _check_jaxlib_version(): from jaxlib import pytree except ImportError: pytree = None + +# TODO(phawkins): make the import unconditional when the minimum Jaxlib version +# has been increased to 0.1.23. +try: + from jaxlib import cusolver +except ImportError: + cusolver = None diff --git a/jaxlib/cusolver.py b/jaxlib/cusolver.py new file mode 100644 --- /dev/null +++ b/jaxlib/cusolver.py @@ -0,0 +1,182 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from jaxlib import xla_client + +try: + from jaxlib import cusolver_kernels + for _name, _value in cusolver_kernels.registrations().items(): + xla_client.register_custom_call_target(_name, _value, platform="gpu") +except ImportError: + pass + +_Shape = xla_client.Shape + + +def _real_type(dtype): + """Returns the real equivalent of 'dtype'.""" + if dtype == np.float32: + return np.float32 + elif dtype == np.float64: + return np.float64 + elif dtype == np.complex64: + return np.float32 + elif dtype == np.complex128: + return np.float64 + else: + raise NotImplementedError("Unsupported dtype {}".format(dtype)) + + +def getrf(c, a): + """LU decomposition.""" + a_shape = c.GetShape(a) + dtype = a_shape.element_type() + dims = a_shape.dimensions() + assert len(dims) >= 2 + m, n = dims[-2:] + batch_dims = tuple(dims[:-2]) + num_bd = len(batch_dims) + b = 1 + for d in batch_dims: + b *= d + + lwork, opaque = cusolver_kernels.build_getrf_descriptor( + np.dtype(dtype), b, m, n) + out = c.CustomCall( + b"cusolver_getrf", + operands=(a,), + shape_with_layout=_Shape.tuple_shape(( + _Shape.array_shape( + dtype, batch_dims + (m, n), + (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))), + _Shape.array_shape(dtype, (lwork,), (0,)), + _Shape.array_shape( + np.dtype(np.int32), batch_dims + (min(m, n),), + tuple(range(num_bd, -1, -1))), + _Shape.array_shape( + np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))), + )), + operand_shapes_with_layout=(_Shape.array_shape( + dtype, batch_dims + (m, n), + (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),), + opaque=opaque) + return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 2), + c.GetTupleElement(out, 3)) + + +def syevd(c, a, lower=False): + """Symmetric (Hermitian) eigendecomposition.""" + + a_shape = c.GetShape(a) + dtype = a_shape.element_type() + dims = a_shape.dimensions() + assert len(dims) >= 2 + m, n = dims[-2:] + assert m == n + batch_dims = tuple(dims[:-2]) + num_bd = len(batch_dims) + b = 1 + for d in batch_dims: + b *= d + layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) + + lwork, opaque = cusolver_kernels.build_syevd_descriptor( + np.dtype(dtype), lower, b, n) + eigvals_type = _real_type(dtype) + + out = c.CustomCall( + b"cusolver_syevd", + operands=(a,), + shape_with_layout=_Shape.tuple_shape(( + _Shape.array_shape(dtype, dims, layout), + _Shape.array_shape( + np.dtype(eigvals_type), batch_dims + (n,), + tuple(range(num_bd, -1, -1))), + _Shape.array_shape( + np.dtype(np.int32), batch_dims, + tuple(range(num_bd - 1, -1, -1))), + _Shape.array_shape(dtype, (lwork,), (0,)) + )), + operand_shapes_with_layout=( + _Shape.array_shape(dtype, dims, layout), + ), + opaque=opaque) + return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 1), + c.GetTupleElement(out, 2)) + + +def gesvd(c, a, full_matrices=True, compute_uv=True): + """Singular value decomposition.""" + + a_shape = c.GetShape(a) + dtype = a_shape.element_type() + b = 1 + m, n = a_shape.dimensions() + singular_vals_dtype = _real_type(dtype) + + if m < n: + lwork, opaque = cusolver_kernels.build_gesvd_descriptor( + np.dtype(dtype), b, n, m, compute_uv, full_matrices) + out = c.CustomCall( + b"cusolver_gesvd", + operands=(a,), + shape_with_layout=_Shape.tuple_shape(( + _Shape.array_shape(dtype, (m, n), (1, 0)), + _Shape.array_shape(np.dtype(singular_vals_dtype), (min(m, n),), (0,)), + _Shape.array_shape(dtype, (n, n), (1, 0)), + _Shape.array_shape(dtype, (m, m), (1, 0)), + _Shape.array_shape(np.dtype(np.int32), (), ()), + _Shape.array_shape(dtype, (lwork,), (0,)), + )), + operand_shapes_with_layout=( + _Shape.array_shape(dtype, (m, n), (1, 0)), + ), + opaque=opaque) + s = c.GetTupleElement(out, 1) + vt = c.GetTupleElement(out, 2) + u = c.GetTupleElement(out, 3) + info = c.GetTupleElement(out, 4) + else: + lwork, opaque = cusolver_kernels.build_gesvd_descriptor( + np.dtype(dtype), b, m, n, compute_uv, full_matrices) + + out = c.CustomCall( + b"cusolver_gesvd", + operands=(a,), + shape_with_layout=_Shape.tuple_shape(( + _Shape.array_shape(dtype, (m, n), (0, 1)), + _Shape.array_shape(np.dtype(singular_vals_dtype), (min(m, n),), (0,)), + _Shape.array_shape(dtype, (m, m), (0, 1)), + _Shape.array_shape(dtype, (n, n), (0, 1)), + _Shape.array_shape(np.dtype(np.int32), (), ()), + _Shape.array_shape(dtype, (lwork,), (0,)), + )), + operand_shapes_with_layout=( + _Shape.array_shape(dtype, (m, n), (0, 1)), + ), + opaque=opaque) + s = c.GetTupleElement(out, 1) + u = c.GetTupleElement(out, 2) + vt = c.GetTupleElement(out, 3) + info = c.GetTupleElement(out, 4) + if not full_matrices: + u = c.Slice(u, (0, 0), (m, min(m, n))) + vt = c.Slice(vt, (0, 0), (min(m, n), n)) + return s, u, vt, info
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -163,8 +163,8 @@ def testEigBatching(self, shape, dtype, rng): for lower in [False, True] for rng in [jtu.rand_default()])) # TODO(phawkins): enable when there is an eigendecomposition implementation - # for GPU/TPU. - @jtu.skip_on_devices("gpu", "tpu") + # for TPU. + @jtu.skip_on_devices("tpu") def testEigh(self, n, dtype, lower, rng): _skip_if_unsupported_type(dtype) args_maker = lambda: [rng((n, n), dtype)] @@ -196,8 +196,8 @@ def norm(x): for rng in [jtu.rand_default()] for lower in [True, False])) # TODO(phawkins): enable when there is an eigendecomposition implementation - # for GPU/TPU. - @jtu.skip_on_devices("gpu", "tpu") + # for TPU. + @jtu.skip_on_devices("tpu") def testEighGrad(self, shape, dtype, rng, lower): self.skipTest("Test fails with numeric errors.") uplo = "L" if lower else "U" @@ -224,8 +224,8 @@ def testEighGrad(self, shape, dtype, rng, lower): for lower in [True, False] for eps in [1e-4])) # TODO(phawkins): enable when there is an eigendecomposition implementation - # for GPU/TPU. - @jtu.skip_on_devices("gpu", "tpu") + # for TPU. + @jtu.skip_on_devices("tpu") def testEighGradVectorComplex(self, shape, dtype, rng, lower, eps): _skip_if_unsupported_type(dtype) # Special case to test for complex eigenvector grad correctness. @@ -263,7 +263,7 @@ def testEighGradVectorComplex(self, shape, dtype, rng, lower, eps): for shape in [(1, 1), (4, 4), (5, 5)] for dtype in float_types + complex_types for rng in [jtu.rand_default()])) - @jtu.skip_on_devices("gpu", "tpu") + @jtu.skip_on_devices("tpu") def testEighBatching(self, shape, dtype, rng): _skip_if_unsupported_type(dtype) shape = (10,) + shape @@ -318,7 +318,7 @@ def testNorm(self, shape, dtype, ord, axis, keepdims, rng): for full_matrices in [False, True] for compute_uv in [False, True] for rng in [jtu.rand_default()])) - @jtu.skip_on_devices("gpu", "tpu") + @jtu.skip_on_devices("tpu") def testSVD(self, m, n, dtype, full_matrices, compute_uv, rng): _skip_if_unsupported_type(dtype) args_maker = lambda: [rng((m, n), dtype)] @@ -414,7 +414,7 @@ def compare_orthogonal(q1, q2): if not full_matrices and m >= n: jtu.check_jvp(np.linalg.qr, partial(jvp, np.linalg.qr), (a,)) - @jtu.skip_on_devices("gpu", "tpu") + @jtu.skip_on_devices("tpu") def testQrBatching(self): shape = (10, 4, 5) dtype = np.float32 @@ -476,7 +476,7 @@ def args_maker(): self._CompileAndCheck(np.linalg.inv, args_maker, check_dtypes=True) # Regression test for incorrect type for eigenvalues of a complex matrix. - @jtu.skip_on_devices("gpu", "tpu") + @jtu.skip_on_devices("tpu") def testIssue669(self): def test(x): val, vec = np.linalg.eigh(x) @@ -499,9 +499,9 @@ class ScipyLinalgTest(jtu.JaxTestCase): def testLu(self, shape, dtype, rng): _skip_if_unsupported_type(dtype) args_maker = lambda: [rng(shape, dtype)] - - self._CheckAgainstNumpy(jsp.linalg.lu, osp.linalg.lu, args_maker, - check_dtypes=True, tol=1e-3) + x, = args_maker() + p, l, u = jsp.linalg.lu(x) + self.assertAllClose(x, onp.matmul(p, onp.matmul(l, u)), check_dtypes=True) self._CompileAndCheck(jsp.linalg.lu, args_maker, check_dtypes=True) # TODO(phawkins): figure out why this test fails on Travis and reenable. @@ -555,8 +555,13 @@ def testLuFactor(self, n, dtype, rng): _skip_if_unsupported_type(dtype) args_maker = lambda: [rng((n, n), dtype)] - self._CheckAgainstNumpy(jsp.linalg.lu_factor, osp.linalg.lu_factor, - args_maker, check_dtypes=True, tol=1e-3) + x, = args_maker() + lu, piv = jsp.linalg.lu_factor(x) + l = onp.tril(lu, -1) + onp.eye(n, dtype=dtype) + u = onp.triu(lu) + for i in range(n): + x[[i, piv[i]],] = x[[piv[i], i],] + self.assertAllClose(x, onp.matmul(l, u), check_dtypes=True, rtol=1e-3) self._CompileAndCheck(jsp.linalg.lu_factor, args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list(
jax.numpy.linalg ops missing GPU implementation I often hit messages like ```"Singular value decomposition is only implemented on the CPU backend"``` (https://github.com/google/jax/blob/27746b8c73f9ca9928da5da40b7382ae648a5f8d/jax/lax_linalg.py) for many `jax.numpy.linalg` ops. Examples I've hit so far are when calling: `jax.numpy.linalg.svd`; `jax.numpy.linalg.eigh`. It would be nice to be able to run on GPU. Thanks!
Thanks for raising this. Given your specific request here, what do you think about changing the issue title to be a request to get linalg support on GPU? That's probably easier for us to add. Done, thank you! I want to know if jax.numpy.linalg.inv ops can run on the GPU? As it happens, jax.numpy.linalg.inv should run on GPU right now! (It might not be terribly fast, since it's using a QR decomposition instead of an LU decomposition, and one that isn't necessarily that well tuned.) Another thing missing is `np.linalg.solve': ``` solve = np.linalg.solve(np.eye(3), np.ones(3)) Traceback (most recent call last): File "/opt/intellij-ue-2019.1/plugins/python/helpers/pydev/_pydevd_bundle/pydevd_exec2.py", line 3, in Exec exec(exp, global_vars, local_vars) File "<input>", line 1, in <module> File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/numpy/linalg.py", line 237, in solve lu, pivots = lax_linalg.lu(a) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/lax_linalg.py", line 53, in lu lu, pivots = lu_p.bind(x) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/core.py", line 136, in bind return self.impl(*args, **kwargs) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/lax_linalg.py", line 358, in lu_impl lu, pivot = xla.apply_primitive(lu_p, operand) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/interpreters/xla.py", line 52, in apply_primitive compiled_fun = xla_primitive_callable(prim, *abstract_args, **params) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/util.py", line 174, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/interpreters/xla.py", line 58, in xla_primitive_callable built_c = primitive_computation(prim, *shapes, **params) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/util.py", line 174, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/interpreters/xla.py", line 69, in primitive_computation xla_result = translation_rule(prim)(c, *xla_args, **params) File "/usr/local/google/_blaze_romann/513cef43ffae8d7478c0c7058e6a84e4/execroot/google3/blaze-out/k8-cuda9-py3-opt/bin/experimental/users/romann/ntk_tuner/train_and_eval.runfiles/google3/third_party/py/jax/lax_linalg.py", line 363, in lu_translation_rule "LU decomposition is only implemented on the CPU backend") NotImplementedError: LU decomposition is only implemented on the CPU backend ``` There is now an LU decomposition implementation that works on GPU. However, it may not be the most performant (it's implemented in JAX itself). We still would do well to add a specialized GPU implementation that calls cuSolver or MAGMA. (FYI, slight overlap with https://github.com/google/jax/issues/723)
2019-08-02T15:20:26
google/jax
1,106
google__jax-1106
[ "1101" ]
fd4b84bd955e8ed742f461c52903e26d1ad4a1b3
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3255,7 +3255,7 @@ def _reduction_computation(c, jaxpr, consts, init_value): reduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce', _reduce_translation_rule) -# batching.primitive_batchers[reduce_p] = _reduce_batch_rule # TODO(mattjj): test +batching.primitive_batchers[reduce_p] = _reduce_batch_rule def _reduce_sum_shape_rule(operand, axes, input_shape): diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -991,6 +991,8 @@ def _reduction_dims(a, axis): def _reduction_init_val(a, init_val): a_dtype = xla_bridge.canonicalize_dtype(_dtype(a)) + if a_dtype == 'bool': + return onp.array(init_val > 0, dtype=a_dtype) try: return onp.array(init_val, dtype=a_dtype) except OverflowError:
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1040,6 +1040,7 @@ def testTransposeAgainstNumpy(self, shape, dtype, perm, rng): for init_val, op, dtypes in [ (0, lax.add, default_dtypes), (1, lax.mul, default_dtypes), + (0, lax.max, all_dtypes), # non-monoidal (-onp.inf, lax.max, float_dtypes), (onp.iinfo(onp.int32).min, lax.max, [onp.int32]), # (onp.iinfo(onp.int64).min, lax.max, [onp.int64]), # TODO fails @@ -2591,14 +2592,15 @@ def testTranspose(self, shape, dtype, perm, bdims, rng): self._CheckBatching(op, 5, bdims, (shape,), dtype, rng) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_op={}_inshape={}_reducedims={}_bdims={}" + {"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}_bdims={}" .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims, - bdims), + init_val, bdims), "op": op, "init_val": init_val, "shape": shape, "dtype": dtype, "dims": dims, "bdims": bdims, "rng": rng} for init_val, op, dtypes in [ (0, lax.add, default_dtypes), (1, lax.mul, default_dtypes), + (0, lax.max, all_dtypes), # non-monoidal (-onp.inf, lax.max, float_dtypes), (onp.iinfo(onp.int32).min, lax.max, [onp.int32]), (onp.iinfo(onp.int64).min, lax.max, [onp.int64]),
`vmap` fails for a reduction (e.g., `np.argmax`) over `bool`s On Google Cloud Colab running the latest release of JAX I can find (0.1.22, last modified on 2019-07-20): ```python !pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\.//' -e 's/\..*//')/jaxlib-0.1.22-cp36-none-linux_x86_64.whl !pip install --upgrade -q jax from __future__ import division from __future__ import print_function import jax import jax.numpy as np import numpy as onp jax.config.update('jax_platform_name', 'cpu') m = onp.random.random((5, 5)) m # array([[0.05789924, 0.78492625, 0.14877936, 0.86753036, 0.71233691], # [0.58521529, 0.6402027 , 0.61487041, 0.7442337 , 0.57715207], # [0.29544111, 0.91596755, 0.17300919, 0.10794559, 0.46030352], # [0.06554419, 0.13804503, 0.72298234, 0.79693837, 0.04571761], # [0.8495216 , 0.70473064, 0.68557481, 0.57669368, 0.516764 ]]) np.argmax(m > 0.5, -1) # DeviceArray([1, 0, 1, 2, 0], dtype=int32) jax.vmap(np.argmax)(1.0 * (m > 0.5)) # DeviceArray([1, 0, 1, 2, 0], dtype=int32) jax.vmap(np.argmax)(m > 0.5) ``` gives ``` --------------------------------------------------------------------------- KeyError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in get_primitive_batcher(p) 231 try: --> 232 return primitive_batchers[p] 233 except KeyError: KeyError: reduce During handling of the above exception, another exception occurred: NotImplementedError Traceback (most recent call last) 10 frames <ipython-input-32-f78d0b39613e> in <module>() ----> 1 jax.vmap(np.argmax)(m > 0.5) /usr/local/lib/python3.6/dist-packages/jax/api.py in batched_fun(*args, **kwargs) 491 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) 492 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees) --> 493 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes) 494 return build_tree(out_tree(), out_flat) 495 /usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in batch(fun, in_vals, in_dims, out_dim_dst) 43 elif len(sizes) == 1: 44 sz = sizes.pop() ---> 45 return batch_transform(fun, sz, in_dims, out_dim_dst).call_wrapped(in_vals) 46 else: 47 raise TypeError("got inconsistent map dimension sizes: {}".format(sizes)) /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 145 146 del gen --> 147 ans = self.f(*args, **dict(self.params, **kwargs)) 148 del args 149 while stack: /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in argmax(a, axis) 2011 a = ravel(a) 2012 axis = 0 -> 2013 return _argminmax(max, a, axis) 2014 2015 /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in _argminmax(op, a, axis) 2028 idxs = onp.arange(a.shape[axis]).reshape(shape) 2029 maxval = onp.iinfo(xla_bridge.canonicalize_dtype(idxs.dtype)).max -> 2030 mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval) 2031 return min(mask_idxs, axis) 2032 /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in reduction(a, axis, dtype, out, keepdims) 956 if _dtype(a) != result_dtype: 957 a = lax.convert_element_type(a, result_dtype) --> 958 result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims) 959 if keepdims: 960 shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims))) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in reduce(operand, init_value, computation, dimensions) 791 jaxpr, consts = _reduction_jaxpr(computation, init_value) 792 return reduce_p.bind(operand, init_value, computation=computation, --> 793 jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions)) 794 795 def _reduction_jaxpr(computation, init_value): /usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs) 145 146 tracers = map(top_trace.full_raise, args) --> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 148 return full_lower(out_tracer) 149 /usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params) 121 else: 122 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here --> 123 batched_primitive = get_primitive_batcher(primitive) 124 val_out, dim_out = batched_primitive(vals_in, dims_in, **params) 125 return BatchTracer(self, val_out, dim_out) /usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in get_primitive_batcher(p) 233 except KeyError: 234 raise NotImplementedError( --> 235 "Batching rule for '{}' not implemented".format(p)) 236 237 def defvectorized(prim): NotImplementedError: Batching rule for 'reduce' not implemented ```
2019-08-04T04:36:40
google/jax
1,143
google__jax-1143
[ "1131" ]
a6ec5a916cae5b71e0232633b89e7490f77d81ef
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -79,11 +79,17 @@ def primitive_computation(prim, *shapes, **params): c = xb.make_computation_builder("primitive_computation") platform = xb.get_backend().platform xla_args = map(c.ParameterWithShape, shapes) - try: - rule = backend_specific_translations[platform].get(prim) or translations[prim] - except KeyError: + if prim in backend_specific_translations[platform]: + rule = backend_specific_translations[platform][prim] + rule(c, *xla_args, **params) # return val set as a side-effect on c + elif prim in translations: + rule = translations[prim] + rule(c, *xla_args, **params) # return val set as a side-effect on c + elif prim in initial_style_translations: + rule = initial_style_translations[prim] + rule(c, AxisEnv(1, [], []), *xla_args, **params) # side-effect on c + else: raise NotImplementedError("XLA translation rule for {} not found".format(prim)) - rule(c, *xla_args, **params) # return val set as a side-effect on c try: return c.Build() except RuntimeError as e: diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -156,16 +156,6 @@ def while_loop(cond_fun, body_fun, init_val): return build_tree(out_tree(), out_flat) -def _while_loop_impl(init_val, cond_consts, body_consts, aval_out, cond_jaxpr, - body_jaxpr): - cond_fun = partial(core.eval_jaxpr, cond_jaxpr, cond_consts, ()) - body_fun = partial(core.eval_jaxpr, body_jaxpr, body_consts, ()) - - val = init_val - while cond_fun(val): - val = body_fun(val) - return val - def _while_loop_abstract_eval(init_val, cond_consts, body_consts, aval_out, cond_jaxpr, body_jaxpr): return _maybe_tracer_tuple_to_abstract_tuple(aval_out) @@ -261,7 +251,7 @@ def _jaxtupletree_select(pred, on_true, on_false): while_p = lax.Primitive('while') -while_p.def_impl(_while_loop_impl) +while_p.def_impl(partial(xla.apply_primitive, while_p)) while_p.def_abstract_eval(_while_loop_abstract_eval) xla.initial_style_translations[while_p] = _while_loop_translation_rule batching.primitive_batchers[while_p] = _while_loop_batching_rule
Step length dependent compilation time of lax.scan. This is related to #1130 and was originally brought to my attention by @cpgoodri. Here is an example repro run in google cloud colab GPU. ```python !pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\.//' -e 's/\..*//')/jaxlib-$(pip search jaxlib | grep -oP '[0-9\.]+' | head -n 1)-cp36-none-linux_x86_64.whl !pip install --upgrade -q jax import jax.numpy as np from jax import lax from jax import jit def f(x, s): for i in range(2000): x = x + 1.0 return x, np.array([0]) def g(x, steps): xs = lax.iota(np.int32, steps) return lax.scan(f, x, xs)[0] q = jit(g, static_argnums=(1,)) for p in range(1, 14): x = 2 ** p time1 = time.clock() temp = q(np.array(2.0), x).block_until_ready() time2 = time.clock() temp = q(np.array(2.0), x).block_until_ready() time3 = time.clock() first_run_time = (time2 - time1) second_run_time = (time3 - time2) compile_time = first_run_time - second_run_time print('x = {}, compile time = {} (run times = {}, {})'.format( x, compile_time, first_run_time, second_run_time)) ``` which gives the output, ``` x = 2, compile time = 0.9660040000001118 (run times = 0.967151000000058, 0.0011469999999462743) x = 4, compile time = 0.761009999999942 (run times = 0.7619959999999537, 0.0009860000000116997) x = 8, compile time = 1.0294560000000956 (run times = 1.0305190000000266, 0.0010629999999309803) x = 16, compile time = 1.0474190000001045 (run times = 1.0485160000000633, 0.0010969999999588254) x = 32, compile time = 0.8571110000001454 (run times = 0.8583040000000892, 0.0011929999999438223) x = 64, compile time = 1.1593259999999646 (run times = 1.1608429999999998, 0.0015170000000352957) x = 128, compile time = 1.331446999999912 (run times = 1.333646999999928, 0.002200000000016189) x = 256, compile time = 1.6889869999999974 (run times = 1.6926300000000083, 0.003643000000010943) x = 512, compile time = 2.2191810000000487 (run times = 2.225097000000005, 0.005915999999956512) x = 1024, compile time = 3.9208149999999478 (run times = 3.9315219999999726, 0.010707000000024891) x = 2048, compile time = 6.794198999999935 (run times = 6.814807999999971, 0.02060900000003585) x = 4096, compile time = 13.265449999999987 (run times = 13.306156999999985, 0.040706999999997606) x = 8192, compile time = 24.92249199999992 (run times = 25.00054499999999, 0.0780530000000681) ```
Thanks for raising this. Can you dump the HLO? That will tell us if we're staging out something different to XLA for these different cases (i.e. it's fixable on the JAX side) or if this is a phenomenon related to XLA compile times of a fixed-size HLO graph (i.e. it would be fixed on the XLA side). Two ways to dump the HLO: use `jax.xla_computation` or set the environment variable `XLA_FLAGS="--xla_dump_to=DIRECTORY_PATH"`. Googlers can read more at [go/jax-dump-xla-hlo](go/jax-dump-xla-hlo) thanks to @levskaya. Following up on this, after looking into this with the help of @levskaya and @mattjj, it seems like an XLA issue. I reported a bug to XLA:GPU. @hawkinsp found that this is a JAX bug after all: we're hitting the "impl rule" when we're not supposed to! This could explain #1130 as well. I'm on it! I think I see the issue, though this explanation isn't going to make much sense unless you're already deeply read into the JAX internals: when `jit` a `scan`, we actually do [partial evaluation which "unzips" the scan into two scans](https://github.com/google/jax/blob/a6ec5a916cae5b71e0232633b89e7490f77d81ef/jax/lax/lax_control_flow.py#L656), one that evaluates everything known at compile time (the "jaxpr_1 side") and one staged out to a jaxpr (and ultimately an XLA computation) to be evaluated later (the "jaxpr_2 side"). The computation done by that first scan is essentially compile-time constant folding. Under a `jit` like this one, we actually stage all the real work out to XLA: there's no real constant folding / partial evaluation to do. That is, the first "jaxpr_1" scan is trivial: it's just shuffling empty tuples around. The problem here is that even when the first scan is trivial, [we're still executing it](https://github.com/google/jax/blob/a6ec5a916cae5b71e0232633b89e7490f77d81ef/jax/lax/lax_control_flow.py#L687-L688). That means we're still hitting the while_loop impl, and those are currently slow (and in any case running a number of steps that scales with the length of the scan). That's why JAX is doing work "at compile time" that scales with the length of the scanned array. I'm not sure yet what the right fix is, but one option is just to notice when the jaxpr_1 computation is trivial and not run it in that case! Here's a look at how the jaxpr_1 side is trivial: ``` ipdb> l 684 ys_aval = _promote_aval_rank(length, y_aval) 685 out_aval = core.AbstractTuple((carry_aval, ys_aval)) 686 out_pv = _put_known_pvs(sc_out, out_aval) 687 688 out_carry, (ys, residuals) = scan_p.bind( --> 689 *in_consts, forward=forward, length=length, jaxpr=jaxpr_1) 690 out_const = core.pack((out_carry, ys)) 691 residuals_tracer = trace.new_instantiated_const(core.pack(residuals)) 692 d, c, a = lifted_tracers 693 new_tracers = (d, c, (a, residuals_tracer)) 694 eqn = core.JaxprEqn(new_tracers, None, scan_p, (), True, False, ipdb> p jaxpr_1 { lambda d f ; ; a b c. let (e) = id a g = pack e f h = pack d g in h } ipdb> in_consts[0] # argument for the `a` parameter JaxTuple(DeviceArray([0], dtype=int32)) ipdb> jaxpr_1.literals # arguments for the `d` and `f` parameters (*, JaxTuple()) ``` So that's what we're running for 8192 iterations in a slow op-by-op mode while loop at compile time...
2019-08-08T16:05:04
google/jax
1,144
google__jax-1144
[ "1139" ]
5b5ad6e81e327fd9e4c2c20944e702667660de04
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -29,7 +29,7 @@ from jax.interpreters import xla from jax.interpreters import ad from jax.interpreters import batching -from jax.util import partial +from jax.util import partial, prod from jax.abstract_arrays import ShapedArray from jax.core import Primitive from jax.lax import (standard_primitive, standard_unop, binop_dtype_rule, @@ -371,7 +371,7 @@ def triangular_solve_batching_rule(batched_args, batch_dims, left_side, batching.primitive_batchers[triangular_solve_p] = triangular_solve_batching_rule -def triangular_solve_cpu_translation_rule( +def _triangular_solve_cpu_translation_rule( c, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal): shape = c.GetShape(a) dtype = shape.element_type().type @@ -389,8 +389,30 @@ def triangular_solve_cpu_translation_rule( return c.TriangularSolve(a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal) -xla.backend_specific_translations['cpu'][triangular_solve_p] = triangular_solve_cpu_translation_rule +xla.backend_specific_translations['cpu'][triangular_solve_p] = \ + _triangular_solve_cpu_translation_rule +def _triangular_solve_gpu_translation_rule( + c, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal): + shape = c.GetShape(a) + dtype = shape.element_type().type + dims = shape.dimensions() + m, n = dims[-2:] + batch = prod(dims[:-2]) + if batch > 1 and m <= 32 and n <= 32: + if conjugate_a and not transpose_a: + a = c.Conj(a) + conjugate_a = False + return cusolver.trsm( + c, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal) + else: + # Use the XLA implementation for unbatched triangular_solve. + return c.TriangularSolve(a, b, left_side, lower, transpose_a, conjugate_a, + unit_diagonal) + +if cusolver: + xla.backend_specific_translations['gpu'][triangular_solve_p] = \ + _triangular_solve_gpu_translation_rule # LU decomposition diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -228,6 +228,7 @@ def qr(a, mode="reduced"): @_wraps(onp.linalg.solve) +@jit def solve(a, b): a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b)) a_shape = np.shape(a) diff --git a/jaxlib/cusolver.py b/jaxlib/cusolver.py --- a/jaxlib/cusolver.py +++ b/jaxlib/cusolver.py @@ -16,10 +16,20 @@ from __future__ import division from __future__ import print_function +import operator + import numpy as np +from six.moves import reduce from jaxlib import xla_client +try: + from jaxlib import cublas_kernels + for _name, _value in cublas_kernels.registrations().items(): + xla_client.register_custom_call_target(_name, _value, platform="gpu") +except ImportError: + pass + try: from jaxlib import cusolver_kernels for _name, _value in cusolver_kernels.registrations().items(): @@ -27,6 +37,7 @@ except ImportError: pass + _Shape = xla_client.Shape @@ -43,6 +54,50 @@ def _real_type(dtype): else: raise NotImplementedError("Unsupported dtype {}".format(dtype)) +_prod = lambda xs: reduce(operator.mul, xs, 1) + +def trsm(c, a, b, left_side=False, lower=False, trans_a=False, conj_a=False, + diag=False): + """Batched triangular solve. + + XLA implements unbatched triangular solve directly, so we need only implement + the batched case.""" + b_shape = c.GetShape(b) + dtype = b_shape.element_type() + dims = b_shape.dimensions() + assert len(dims) >= 2 + m, n = dims[-2:] + batch_dims = tuple(dims[:-2]) + num_bd = len(batch_dims) + batch = _prod(batch_dims) + k = m if left_side else n + + a_shape = c.GetShape(a) + if (batch_dims + (k, k) != a_shape.dimensions() or + a_shape.element_type() != dtype): + raise ValueError("Argument mismatch for trsm, got {} and {}".format( + a_shape, b_shape)) + + if conj_a and not trans_a: + raise NotImplementedError("Conjugation without transposition not supported") + + lwork, opaque = cublas_kernels.build_trsm_batched_descriptor( + np.dtype(dtype), batch, m, n, left_side, lower, trans_a, conj_a, diag) + layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) + out = c.CustomCall( + b"cublas_trsm_batched", + operands=(a, b), + shape_with_layout=_Shape.tuple_shape(( + _Shape.array_shape(dtype, b_shape.dimensions(), layout), + _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)), + _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)))), + operand_shapes_with_layout=( + _Shape.array_shape(dtype, a_shape.dimensions(), layout), + _Shape.array_shape(dtype, b_shape.dimensions(), layout), + ), + opaque=opaque) + return c.GetTupleElement(out, 0) + def getrf(c, a): """LU decomposition.""" @@ -53,32 +108,39 @@ def getrf(c, a): m, n = dims[-2:] batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) - b = 1 - for d in batch_dims: - b *= d + batch = _prod(batch_dims) + + if batch > 1 and m == n and m // batch <= 128: + lwork, opaque = cublas_kernels.build_getrf_batched_descriptor( + np.dtype(dtype), batch, m) + workspace = _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)) + kernel = b"cublas_getrf_batched" + else: + lwork, opaque = cusolver_kernels.build_getrf_descriptor( + np.dtype(dtype), batch, m, n) + workspace = _Shape.array_shape(dtype, (lwork,), (0,)) + kernel = b"cusolver_getrf" - lwork, opaque = cusolver_kernels.build_getrf_descriptor( - np.dtype(dtype), b, m, n) out = c.CustomCall( - b"cusolver_getrf", + kernel, operands=(a,), shape_with_layout=_Shape.tuple_shape(( _Shape.array_shape( dtype, batch_dims + (m, n), (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))), - _Shape.array_shape(dtype, (lwork,), (0,)), _Shape.array_shape( np.dtype(np.int32), batch_dims + (min(m, n),), tuple(range(num_bd, -1, -1))), _Shape.array_shape( np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))), + workspace, )), operand_shapes_with_layout=(_Shape.array_shape( dtype, batch_dims + (m, n), (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),), opaque=opaque) - return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 2), - c.GetTupleElement(out, 3)) + return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 1), + c.GetTupleElement(out, 2)) def syevd(c, a, lower=False): @@ -92,19 +154,17 @@ def syevd(c, a, lower=False): assert m == n batch_dims = tuple(dims[:-2]) num_bd = len(batch_dims) - b = 1 - for d in batch_dims: - b *= d + batch = _prod(batch_dims) layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1)) if n <= 32: kernel = b"cusolver_syevj" lwork, opaque = cusolver_kernels.build_syevj_descriptor( - np.dtype(dtype), lower, b, n) + np.dtype(dtype), lower, batch, n) else: kernel = b"cusolver_syevd" lwork, opaque = cusolver_kernels.build_syevd_descriptor( - np.dtype(dtype), lower, b, n) + np.dtype(dtype), lower, batch, n) eigvals_type = _real_type(dtype) out = c.CustomCall(
slow linalg.solve Dear jax team, I'd like to perform a lot (1e5-1e7) `np.linalg.solve` operations on small (3x3) matrices. jax is a lot slower than the numpy version: 17s in jax vs. 382ms in numpy. Is 3x3 too small to be effective on GPU? Or is the `solve` method just not as efficiently implemented for such a task as the numpy version? ```python import jax import jax.numpy as np import numpy as onp def to_gpu(arr): return jax.device_put(arr.astype(np.float32)) num_matrices = int(1e6) sidelen = 3 A = to_gpu(onp.random.normal(size=(num_matrices, sidelen, sidelen))) b = to_gpu(onp.random.normal(size=(num_matrices, sidelen))) solve_jit = jax.jit(np.linalg.solve) solve_vmap = jax.vmap(np.linalg.solve) solve_vmap_jit = jax.jit(jax.vmap(np.linalg.solve)) # run jit'ed versions once _ = solve_jit(A, b) _ = solve_vmap_jit(A, b) %time np.linalg.solve(A, b).block_until_ready() # 16.5 s %time solve_jit(A, b).block_until_ready() # 17.0 s %time solve_vmap(A, b).block_until_ready() # 17.0 s %time solve_vmap_jit(A, b).block_until_ready() # 15.3 s %time onp.linalg.solve(A, b) # 383 ms ```
We didn't spend much time optimizing batched operations yet, especially on GPU. I suspect we should be calling `cublasSgetrfBatched` in this case rather than `cusolverDnSgetrf`, and similarly we should be calling a batched triangular solve. I have change that adds support for batched LU decompositions and triangular solves on GPU to JAX. I now see on a GTX 1080: ``` In [16]: %time solve_jit(A, b).block_until_ready() # 17.0 s CPU times: user 21.4 ms, sys: 3.13 ms, total: 24.5 ms Wall time: 23.5 ms ``` which is presumably more like what you wanted! I'll clean it up and get it submitted. Thank you very much for the explanation and the incredibly fast help and fix! Is there a under-the-hood difference in using the "builtin" batching of `solve` vs. an explicit `vmap`? Also, is `jit`'ing a `vmap` generally useful/advisable?
2019-08-08T17:04:22
google/jax
1,152
google__jax-1152
[ "1151" ]
d9c938a48f7e4e0c4126abc074fb7fa53ac44a39
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -100,6 +100,9 @@ def cholesky_jvp_rule(primals, tangents): sigma_dot, = tangents L = np.tril(cholesky_p.bind(x)) + if sigma_dot is ad_util.zero: + return L, ad_util.zero + # Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf def phi(X): l = np.tril(X) @@ -256,7 +259,12 @@ def eigh_jvp_rule(primals, tangents, lower): # https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf a, = primals a_dot, = tangents + v, w = eigh_p.bind(symmetrize(a), lower=lower) + + if a_dot is ad_util.zero: + return core.pack((v, w)), ad.TangentTuple(ad_util.zero, ad_util.zero) + # for complex numbers we need eigenvalues to be full dtype of v, a: w = w.astype(a.dtype) eye_n = np.eye(a.shape[-1], dtype=a.dtype) @@ -324,6 +332,8 @@ def triangular_solve_shape_rule(a, b, left_side=False, **unused_kwargs): def triangular_solve_jvp_rule_a( g_a, ans, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal): + if g_a is ad_util.zero: + return ad_util.zero k = 1 if unit_diagonal else 0 g_a = np.tril(g_a, k=-k) if lower else np.triu(g_a, k=k) g_a = lax.neg(g_a) @@ -524,13 +534,19 @@ def _lu_jvp_rule(primals, tangents): a_dot, = tangents lu, pivots = lu_p.bind(a) + if a_dot is ad_util.zero: + return (core.pack((lu, pivots)), + ad.TangentTuple((ad_util.zero, ad_util.zero))) + a_shape = np.shape(a) m, n = a_shape[-2:] dtype = lax.dtype(a) k = min(m, n) permutation = lu_pivots_to_permutation(pivots, m) - x = a_dot[..., permutation, :] + batch_dims = a_shape[:-2] + iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,))) + x = a_dot[iotas[:-1] + (permutation, slice(None))] # Differentiation of Matrix Functionals Using Triangular Factorization # F. R. De Hoog, R. S. Anderssen, and M. A. Lukas @@ -554,7 +570,6 @@ def _lu_jvp_rule(primals, tangents): u_padding[-2] = (0, n - k, 0) u = lax.pad(np.triu(lu[..., :k, :]), zero, u_padding) + u_eye - la = triangular_solve(l, x, left_side=True, transpose_a=False, lower=True, unit_diagonal=True) lau = triangular_solve(u, la, left_side=False, transpose_a=False, @@ -663,10 +678,12 @@ def qr_abstract_eval(operand, full_matrices): def qr_jvp_rule(primals, tangents, full_matrices): # See j-towns.github.io/papers/qr-derivative.pdf for a terse derivation. x, = primals - if full_matrices or np.shape(x)[-2] < np.shape(x)[-1]: - raise NotImplementedError dx, = tangents q, r = qr_p.bind(x, full_matrices=False) + if dx is ad_util.zero: + return core.pack((q, r)), ad.TangentTuple(ad_util.zero, ad_util.zero) + if full_matrices or np.shape(x)[-2] < np.shape(x)[-1]: + raise NotImplementedError dx_rinv = triangular_solve(r, dx) # Right side solve by default qt_dx_rinv = np.matmul(_T(q), dx_rinv) qt_dx_rinv_lower = np.tril(qt_dx_rinv, -1) @@ -717,14 +734,19 @@ def svd_abstract_eval(operand, full_matrices, compute_uv): return core.AbstractTuple((s, u, vt)) def svd_jvp_rule(primals, tangents, full_matrices, compute_uv): - if full_matrices: - #TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf - raise NotImplementedError("Singular value decomposition JVP not implemented for full matrices") - A, = primals dA, = tangents s, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True) + if dA is ad_util.zero: + return (core.pack((s, U, Vt)), + ad.TangentTuple(ad_util.zero, ad_util.zero, ad_util.zero)) + + if full_matrices: + # TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf + raise NotImplementedError( + "Singular value decomposition JVP not implemented for full matrices") + k = s.shape[-1] Ut, V = np.conj(U).T, np.conj(Vt).T s_dim = s[..., None, :]
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -27,6 +27,7 @@ from absl.testing import absltest from absl.testing import parameterized +import jax from jax import jit, grad, jvp, vmap from jax import numpy as np from jax import scipy as jsp @@ -486,6 +487,17 @@ def test(x): xc = onp.eye(3, dtype=onp.complex) self.assertAllClose(xc, grad_test_jc(xc), check_dtypes=True) + def testIssue1151(self): + A = np.array(onp.random.randn(100, 3, 3), dtype=np.float32) + b = np.array(onp.random.randn(100, 3), dtype=np.float32) + x = np.linalg.solve(A, b) + self.assertAllClose(vmap(np.dot)(A, x), b, atol=1e-3, rtol=1e-3, + check_dtypes=True) + jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A, b) + jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A, b) + jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A[0], b[0]) + jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A[0], b[0]) + class ScipyLinalgTest(jtu.JaxTestCase): @@ -515,14 +527,15 @@ def testLuOfSingularMatrixReturnsNans(self): {"testcase_name": "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)), "shape": shape, "dtype": dtype, "rng": rng} - for shape in [(1, 1), (4, 5), (10, 5), (10, 10)] + for shape in [(1, 1), (4, 5), (10, 5), (10, 10), (6, 7, 7)] for dtype in float_types + complex_types for rng in [jtu.rand_default()])) @jtu.skip_on_devices("tpu") # TODO(phawkins): precision problems on TPU. def testLuGrad(self, shape, dtype, rng): _skip_if_unsupported_type(dtype) a = rng(shape, dtype) - jtu.check_grads(jsp.linalg.lu, (a,), 2, atol=5e-2, rtol=1e-1) + lu = vmap(jsp.linalg.lu) if len(shape) > 2 else jsp.linalg.lu + jtu.check_grads(lu, (a,), 2, atol=5e-2, rtol=1e-1) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name":
No autodiff on linalg.solve Dear jax team, after recent improvements to `linalg.solve` (thanks again!) I found that it has some issues with being autodiff'ed. I have not yet digged into the error messages, but they rather sound like a minute error in handling the shapes than some fundamental limitation. ```python import jax import jax.numpy as np import numpy as onp @jax.jit def solve(A, b): return np.linalg.solve(A, b) def to_gpu(arr): return jax.device_put(arr.astype(onp.float32)) A = to_gpu(onp.random.rand(100, 3, 3)) b = to_gpu(onp.random.rand(100, 3)) x = solve(A, b) assert onp.allclose(jax.vmap(np.dot)(A, x), b, atol=1e-4, rtol=1e-4) print("# BATCHED") try: jac0 = jax.jacobian(solve, argnums=0)(A, b) # error except Exception as e: print(e) # triangular_solve requires both arguments to have the same number of dimensions and equal batch dimensions, got (100, 3, 3) and (100, 100, 3, 3) try: jac1 = jax.jacobian(solve, argnums=1)(A, b) # error except Exception as e: print(e) # 'Zero' object is not subscriptable print("# SINGLE") try: jac0 = jax.jacobian(solve, argnums=0)(A[0], b[0]) # fine except Exception as e: print(e) try: jac1 = jax.jacobian(solve, argnums=1)(A[0], b[0]) # error except Exception as e: print(e) # 'Zero' object is not subscriptable ```
2019-08-09T14:59:04
google/jax
1,171
google__jax-1171
[ "1170" ]
39b01564d4c6776960a45898bdcd8b17f517469a
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -711,7 +711,7 @@ def pmap(fun, axis_name=None): @wraps(fun) def f_pmapped(*args, **kwargs): - axis_size = _pmap_axis_size(args) + axis_size = _pmap_axis_size((args, kwargs)) f = lu.wrap_init(fun) args_flat, in_tree = tree_flatten((args, kwargs)) flat_fun, out_tree = flatten_fun_leafout(f, in_tree)
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -955,6 +955,14 @@ def testIndexAddBatchedIndexesOnly(self): result = vmap(f, (None, 0, None))(onp.zeros((10,)), onp.arange(10,), 1.) self.assertAllClose(result, onp.eye(10), check_dtypes=False) + def testIssue1170(self): + def f(index1, index2): + return np.arange(36).reshape(6, 6)[index1, index2] + g = jax.jit(jax.pmap(f)) + ans = g(index1=onp.asarray([1]), index2=onp.asarray([2])) + expected = g(onp.asarray([1]), onp.asarray([2])) + self.assertAllClose(ans, expected, check_dtypes=True) + if __name__ == '__main__': absltest.main()
Key word arguments breaks pmap This code ```python def f(index1, index2): return np.arange(36).reshape(6, 6)[index1, index2] g = jax.jit(jax.pmap(f)) g(index1=onp.asarray([1]), index2=onp.asarray([2])) ``` Causes this error. ``` google3/third_party/py/jax/api.py in _pmap_axis_size(args) 724 return next(_axis_size(leaf) for arg in args for leaf in tree_leaves(arg)) 725 except StopIteration: --> 726 raise ValueError("pmap requires a leading axis to map over.") 727 728 def _axis_size(x): ValueError: pmap requires a leading axis to map over. ``` Whereas the exact same code without the kwargs works correctly. ```python def f(index1, index2): return np.arange(36).reshape(6, 6)[index1, index2] g = jax.jit(jax.pmap(f)) g(onp.asarray([1]), onp.asarray([2])) ``` Which nicely prints ``` DeviceArray([8], dtype=int32) ``` as expected.
I think the underlying issue is that [we only look at `args` to determine the axis size](https://github.com/google/jax/blob/39b01564d4c6776960a45898bdcd8b17f517469a/jax/api.py#L714), even though once the axis size is determined we handle kwargs correctly.
2019-08-13T01:04:18
google/jax
1,175
google__jax-1175
[ "1172" ]
744216f13fefc1de6295467e99aac32b180cf164
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -32,7 +32,7 @@ from . import numpy as np from . import tree_util from .api import custom_transforms, defjvp, jit, vmap -from .numpy.lax_numpy import _constant_like, asarray +from .numpy.lax_numpy import _constant_like, asarray, stack from jax.lib import xla_bridge from jax import core from jax.scipy.special import logit @@ -123,38 +123,22 @@ def apply_round(v, rot): else: x = list(np.split(count.ravel(), 2)) - rotations = onp.uint32([13, 15, 26, 6, 17, 29, 16, 24]) - ks = [key1, key2, key1 ^ key2 ^ onp.uint32(0x1BD11BDA)] + rotations = onp.array([[13, 15, 26, 6], [17, 29, 16, 24]], dtype=onp.uint32) + ks = stack([key1, key2, key1 ^ key2 ^ onp.uint32(0x1BD11BDA)]) + idxs = onp.array([[1, 2], [2, 0], [0, 1], [1, 2], [2, 0]]) x[0] = x[0] + ks[0] x[1] = x[1] + ks[1] - for r in rotations[:4]: - x = apply_round(x, r) - x[0] = x[0] + ks[1] - x[1] = x[1] + ks[2] + onp.uint32(1) + get = partial(lax.dynamic_index_in_dim, keepdims=False) - for r in rotations[4:]: - x = apply_round(x, r) - x[0] = x[0] + ks[2] - x[1] = x[1] + ks[0] + onp.uint32(2) - - for r in rotations[:4]: - x = apply_round(x, r) - x[0] = x[0] + ks[0] - x[1] = x[1] + ks[1] + onp.uint32(3) - - for r in rotations[4:]: - x = apply_round(x, r) - x[0] = x[0] + ks[1] - x[1] = x[1] + ks[2] + onp.uint32(4) - - for r in rotations[:4]: - x = apply_round(x, r) - x[0] = x[0] + ks[2] - x[1] = x[1] + ks[0] + onp.uint32(5) - - out = np.concatenate(x) + def step(i, x): + for r in get(rotations, i % 2): + x = apply_round(x, r) + i0, i1 = get(idxs, i) + return [x[0] + ks[i0], + x[1] + ks[i1] + asarray(i + 1, dtype=onp.uint32)] + out = np.concatenate(lax.fori_loop(0, 5, step, x)) assert out.dtype == onp.uint32 return lax.reshape(out[:-1] if odd_size else out, count.shape)
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -398,10 +398,12 @@ def testIssue756(self): self.assertEqual(onp.result_type(w), onp.float32) def testNoOpByOpUnderHash(self): - def fail(): assert False + def fail(*args, **kwargs): assert False apply_primitive, xla.apply_primitive = xla.apply_primitive, fail - out = random.threefry_2x32(onp.zeros(2, onp.uint32), onp.arange(10, dtype=onp.uint32)) - xla.apply_primitive = apply_primitive + try: + out = random.threefry_2x32(onp.zeros(2, onp.uint32), onp.arange(10, dtype=onp.uint32)) + finally: + xla.apply_primitive = apply_primitive if __name__ == "__main__":
compiling time grows non-linear w.r.t. the number of random.split ops A repro code ``` from jax import jit, random from jax.config import config; config.update("jax_platform_name", "cpu") import time def f(key, i): for _ in range(i): _, key = random.split(key) return key for i in range(17): t = time.time() key = jit(f, static_argnums=(1,))(random.PRNGKey(0), i).copy() print("split {} times takes {}".format(i, time.time() - t)) ``` which returns ``` split 0 times takes 0.008060216903686523 split 1 times takes 0.05638694763183594 split 2 times takes 0.13771629333496094 split 3 times takes 0.284271240234375 split 4 times takes 0.4720790386199951 split 5 times takes 0.8609561920166016 split 6 times takes 1.5761287212371826 split 7 times takes 2.615913152694702 split 8 times takes 4.20055079460144 split 9 times takes 6.581374168395996 split 10 times takes 9.83530855178833 split 11 times takes 15.45705771446228 split 12 times takes 20.33790898323059 split 13 times takes 27.418986320495605 split 14 times takes 35.04498267173767 split 15 times takes 44.33582162857056 split 16 times takes 53.186384439468384 ``` @mattjj I think that this causes the regression of gamma sampler in our last discussion. The above issue is causing a big problem in compiling models with many random latent variables in NumPyro.
Thanks for opening this. We'll look into it. Can you send me a master list of JAX bugs that are blocking or otherwise inconveniencing your work? I want to prioritize them, but I fear that I've lost track of the most important ones. This isn't a great workaround, but just to make sure you're aware, if you make just one `split` call things are faster, i.e. `all_keys = random.split(key, 16)`. I looked at a profile and found that all the time is being spent in thew XLA compiler. Each call to `random.split` is inlining a call to the hash function underlying `random.split`, and that inlining means that we're building large XLA graphs (and XLA compile times can be super-linear in the input size). There may be things we can do on the JAX side, and also I pinged the XLA folks for ideas. Thank Matt, that's a great idea! We'll think more if we can go with that solution. cc @neerajprad > Can you send me a master list of JAX bugs that are blocking or otherwise inconveniencing your work? Currently, beside this issue, we only have some problems with fastmath mode, which makes it impossible (due to `nan` issue) for us to do stochastic/mcmc inference on some models involving e.g. logsumexp. I'll resort some repro code and will make a separate issue for it soon. Other than that, JAX <3 works beautifully for us.
2019-08-13T14:20:33
google/jax
1,234
google__jax-1234
[ "1230" ]
80ac43bbae6549b79f7a40dd75970900b2e1c8d5
diff --git a/jax/lax/__init__.py b/jax/lax/__init__.py --- a/jax/lax/__init__.py +++ b/jax/lax/__init__.py @@ -18,7 +18,7 @@ _reduce_and, _reduce_window_sum, _reduce_window_max, _reduce_window_min, _reduce_window_prod, _float, _complex, _input_dtype, _const, _eq_meet, _safe_mul, - _broadcasting_select) + _broadcasting_select, _check_user_dtype_supported) from .lax_control_flow import * from .lax_fft import * from .lax_parallel import * diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -4408,3 +4408,15 @@ def subvals(lst, replace): def _abstractify(x): return raise_to_shaped(core.get_aval(x)) + + +def _check_user_dtype_supported(dtype, fun_name=None): + if dtype is not None and onp.dtype(dtype) != xla_bridge.canonicalize_dtype(dtype): + msg = ("Explicitly requested dtype {} {} is not available, " + "and will be truncated to dtype {}. To enable more dtypes, set the " + "jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell " + "environment variable. " + "See https://github.com/google/jax#current-gotchas for more.") + fun_name = "requested in {}".format(fun_name) if fun_name else "" + truncated_dtype = xla_bridge.canonicalize_dtype(dtype).name + warnings.warn(msg.format(dtype, fun_name , truncated_dtype)) diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1408,6 +1408,7 @@ def atleast_3d(*arys): def array(object, dtype=None, copy=True, order="K", ndmin=0): if order is not None and order != "K": raise NotImplementedError("Only implemented for order='K'") + lax._check_user_dtype_supported(dtype, "array") if isinstance(object, ndarray): if dtype and _dtype(object) != xla_bridge.canonicalize_dtype(dtype): @@ -1442,38 +1443,49 @@ def array(object, dtype=None, copy=True, order="K", ndmin=0): @_wraps(onp.asarray) def asarray(a, dtype=None, order=None): + lax._check_user_dtype_supported(dtype, "asarray") return array(a, dtype=dtype, copy=False, order=order) @_wraps(onp.zeros_like) def zeros_like(x, dtype=None): + lax._check_user_dtype_supported(dtype, "zeros_like") return lax.full_like(x, 0, dtype) @_wraps(onp.ones_like) def ones_like(x, dtype=None): + lax._check_user_dtype_supported(dtype, "ones_like") return lax.full_like(x, 1, dtype) @_wraps(onp.full) def full(shape, fill_value, dtype=None): + lax._check_user_dtype_supported(dtype, "full") return lax.full(shape, fill_value, dtype) @_wraps(onp.full_like) def full_like(a, fill_value, dtype=None): + lax._check_user_dtype_supported(dtype, "full_like") return lax.full_like(a, fill_value, dtype) @_wraps(onp.zeros) -def zeros(shape, dtype=onp.dtype("float64")): +def zeros(shape, dtype=None): if isinstance(shape, types.GeneratorType): raise TypeError("expected sequence object with len >= 0 or a single integer") + lax._check_user_dtype_supported(dtype, "zeros") + dtype = onp.dtype("float64") if dtype is None else dtype shape = (shape,) if onp.isscalar(shape) else shape return lax.full(shape, 0, dtype) @_wraps(onp.ones) -def ones(shape, dtype=onp.dtype("float64")): +def ones(shape, dtype=None): + if isinstance(shape, types.GeneratorType): + raise TypeError("expected sequence object with len >= 0 or a single integer") + lax._check_user_dtype_supported(dtype, "ones") + dtype = onp.dtype("float64") if dtype is None else dtype shape = (shape,) if onp.isscalar(shape) else shape return lax.full(shape, 1, dtype) @@ -1493,7 +1505,9 @@ def array_equal(a1, a2): @_wraps(onp.eye) -def eye(N, M=None, k=None, dtype=onp.dtype("float64")): +def eye(N, M=None, k=None, dtype=None): + lax._check_user_dtype_supported(dtype, "eye") + dtype = onp.dtype("float64") if dtype is None else dtype M = N if M is None else M if N < 0 or M < 0: msg = "negative dimensions are not allowed, got {} and {}" @@ -1512,11 +1526,13 @@ def eye(N, M=None, k=None, dtype=onp.dtype("float64")): @_wraps(onp.identity) def identity(n, dtype=None): + lax._check_user_dtype_supported(dtype, "identity") return eye(n, dtype=dtype) @_wraps(onp.arange) def arange(start, stop=None, step=None, dtype=None): + lax._check_user_dtype_supported(dtype, "arange") # If called like np.arange(N), we create a lazy lax._IotaConstant. if stop is None and step is None: dtype = dtype or _dtype(start) @@ -1538,6 +1554,7 @@ def wrapper(*args, **kwargs): def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + lax._check_user_dtype_supported(dtype, "linspace") try: out = onp.linspace(start, stop, num, endpoint, retstep, dtype, axis) if retstep: @@ -1646,6 +1663,7 @@ def repeat(a, repeats, axis=None): @_wraps(onp.tri) def tri(N, M=None, k=0, dtype=None): + lax._check_user_dtype_supported(dtype, "tri") M = M if M is not None else N dtype = dtype or float32 x = arange(N, dtype=int32) @@ -1679,6 +1697,7 @@ def triu(m, k=0): def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): if out: raise NotImplementedError("The 'out' argument to trace is not supported.") + lax._check_user_dtype_supported(dtype, "trace") axis1 = _canonicalize_axis(axis1, ndim(a)) axis2 = _canonicalize_axis(axis2, ndim(a)) @@ -2839,6 +2858,10 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input, keepdims=keepdims) +def _astype(arr, dtype): + lax._check_user_dtype_supported(dtype, "astype") + return lax.convert_element_type(arr, dtype) + ### track unimplemented functions def _not_implemented(fun): @@ -2934,7 +2957,7 @@ def _unimplemented_setitem(self, i, x): setattr(ShapedArray, "T", core.aval_property(transpose)) setattr(ShapedArray, "real", core.aval_property(real)) setattr(ShapedArray, "imag", core.aval_property(imag)) -setattr(ShapedArray, "astype", core.aval_method(lax.convert_element_type)) +setattr(ShapedArray, "astype", core.aval_method(_astype)) # Forward operators, methods, and properties on DeviceArray to lax_numpy @@ -2948,7 +2971,7 @@ def _unimplemented_setitem(self, i, x): setattr(DeviceArray, "T", property(transpose)) setattr(DeviceArray, "real", property(real)) setattr(DeviceArray, "imag", property(imag)) -setattr(DeviceArray, "astype", lax.convert_element_type) +setattr(DeviceArray, "astype", _astype) # Extra methods that are handy
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -19,6 +19,7 @@ import collections from functools import partial import unittest +import warnings from absl.testing import absltest import numpy as onp @@ -41,6 +42,7 @@ from jax.config import config config.parse_flags_with_absl() +FLAGS = config.FLAGS class APITest(jtu.JaxTestCase): @@ -975,6 +977,54 @@ def f(x): for x, y in zip(xs, ys): self.assertAllClose(x * 2 - 3., y, check_dtypes=True) + def test_dtype_warning(self): + # cf. issue #1230 + if FLAGS.jax_enable_x64: + return # test only applies when x64 is disabled + + def check_warning(warn, nowarn): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + + nowarn() # get rid of extra startup warning + + prev_len = len(w) + nowarn() + assert len(w) == prev_len + + warn() + assert len(w) > 0 + msg = str(w[-1].message) + expected_prefix = "Explicitly requested dtype " + self.assertEqual(expected_prefix, msg[:len(expected_prefix)]) + + prev_len = len(w) + nowarn() + assert len(w) == prev_len + + check_warning(lambda: np.array([1, 2, 3], dtype="float64"), + lambda: np.array([1, 2, 3], dtype="float32"),) + check_warning(lambda: np.ones(3, dtype=onp.float64), + lambda: np.ones(3)) + check_warning(lambda: np.ones_like(3, dtype=onp.int64), + lambda: np.ones_like(3, dtype=onp.int32)) + check_warning(lambda: np.zeros(3, dtype="int64"), + lambda: np.zeros(3, dtype="int32")) + check_warning(lambda: np.zeros_like(3, dtype="float64"), + lambda: np.zeros_like(3, dtype="float32")) + check_warning(lambda: np.full((2, 3), 1, dtype="int64"), + lambda: np.full((2, 3), 1)) + check_warning(lambda: np.ones(3).astype("float64"), + lambda: np.ones(3).astype("float32")) + check_warning(lambda: np.eye(3, dtype=onp.float64), + lambda: np.eye(3)) + check_warning(lambda: np.arange(3, dtype=onp.float64), + lambda: np.arange(3, dtype=onp.float32)) + check_warning(lambda: np.linspace(0, 3, dtype=onp.float64), + lambda: np.linspace(0, 3, dtype=onp.float32)) + check_warning(lambda: np.tri(2, dtype="float64"), + lambda: np.tri(2, dtype="float32")) + if __name__ == '__main__': absltest.main()
JAX should throw an error (or warning?) when asked to cast to a dtype it does not support on a device I believe JAX should in some way notify the user when a cast operation cannot be performed. ``` python a = np.ones((1,2,3)) print(a.dtype) # reports float32 a = np.array(a, dtype='float64') # JAX should throw an error on this line print(a.dtype) # reports float32 ```
Great point, you're right! Let's do this. By the way, if you want to switch on support for 64-bit values (in which case this cast will work as you expect), see the first item in our [Gotchas list](https://github.com/google/jax#current-gotchas).
2019-08-22T16:25:12
google/jax
1,238
google__jax-1238
[ "1122", "490", "1120" ]
e025fac1ad22d94b6574d6caf5ddf464ceccf7df
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -164,7 +164,7 @@ def while_loop(cond_fun, body_fun, init_val): raise TypeError(msg.format(cond_tree)) if cond_jaxpr.out_avals != [ShapedArray((), onp.bool_)]: msg = "cond_fun must return a boolean scalar, but got output type(s) {}." - raise TypeError(msg.format(coud_jaxpr.out_avals)) + raise TypeError(msg.format(cond_jaxpr.out_avals)) if not treedef_children(in_tree) == [body_tree]: msg = "body_fun output pytree structure must match init_val, got {} and {}." raise TypeError(msg.format(body_tree, treedef_children(in_tree)[0])) @@ -301,18 +301,6 @@ def cond(pred, true_operand, true_fun, false_operand, false_fun): true_nconsts=len(true_consts), false_nconsts=len(false_consts)) return tree_unflatten(out_tree, out) -def _cond_impl(pred, *args, **kwargs): - true_jaxpr, false_jaxpr, true_nconsts, false_nconsts = split_dict( - kwargs, ["true_jaxpr", "false_jaxpr", "true_nconsts", "false_nconsts"]) - true_nops = len(true_jaxpr.in_avals) - true_nconsts - true_consts, true_ops, false_consts, false_ops = split_list( - args, [true_nconsts, true_nops, false_nconsts]) - - if pred: - return core.jaxpr_as_fun(true_jaxpr)(*(true_consts + true_ops)) - else: - return core.jaxpr_as_fun(false_jaxpr)(*(false_consts + false_ops)) - def _cond_abstract_eval(*args, **kwargs): return kwargs["true_jaxpr"].out_avals @@ -339,10 +327,47 @@ def make_computation(name, jaxpr, op_shape): return c.Conditional(pred, true_op, true_c, false_op, false_c) +def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, + false_nconsts): + # TODO: maybe avoid moving arg axes to front if we're promoting to select? + args = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0 + else x for x, d in zip(args, dims)] + true_nops = len(true_jaxpr.in_avals) - true_nconsts + (pred,), true_consts, true_ops, false_consts, false_ops = split_list( + args, [1, true_nconsts, true_nops, false_nconsts]) + size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped} + orig_bat = [d is not batching.not_mapped for d in dims] + (pred_bat,), t_bat, tconst_bat, f_bat, fconst_bat = split_list( + orig_bat, [1, true_nconsts, len(true_ops), false_nconsts]) + + _, true_out_bat = batching.batch_jaxpr(true_jaxpr, size, tconst_bat + t_bat, False) + _, false_out_bat = batching.batch_jaxpr(false_jaxpr, size, fconst_bat + f_bat, False) + out_bat = [a or b for a, b in zip(true_out_bat, false_out_bat)] + + true_jaxpr_batched, _ = batching.batch_jaxpr(true_jaxpr, size, tconst_bat + t_bat, out_bat) + false_jaxpr_batched, _ = batching.batch_jaxpr(false_jaxpr, size, fconst_bat + f_bat, out_bat) + + if pred_bat: + true_out = core.jaxpr_as_fun(true_jaxpr_batched)(*(true_consts + true_ops)) + false_out = core.jaxpr_as_fun(false_jaxpr_batched)(*(false_consts + false_ops)) + true_out = [batching.broadcast(x, size, 0) if not b else x + for x, b in zip(true_out, out_bat)] + false_out = [batching.broadcast(x, size, 0) if not b else x + for x, b in zip(false_out, out_bat)] + return [lax.select(pred, t, f) + for t, f in zip(true_out, false_out)], [0] * len(true_out) + else: + out_dims = [0 if b else batching.not_mapped for b in out_bat] + return cond_p.bind( + *itertools.chain([pred], true_consts, true_ops, false_consts, false_ops), + true_jaxpr=true_jaxpr_batched, false_jaxpr=false_jaxpr_batched, + true_nconsts=len(true_consts), false_nconsts=len(false_consts)), out_dims + cond_p = lax.Primitive('cond') cond_p.multiple_results = True -cond_p.def_impl(_cond_impl) +cond_p.def_impl(partial(xla.apply_primitive, cond_p)) cond_p.def_abstract_eval(_cond_abstract_eval) +batching.primitive_batchers[cond_p] = _cond_batching_rule xla.initial_style_translations[cond_p] = _cond_translation_rule
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -475,6 +475,58 @@ def cfun(x): self.assertEqual(fun(4), cfun(4)) self.assertEqual(cfun(4), (4, 2., 4.)) + def testCondBatched(self): + def fun(x, y, z): + pred = lax.lt(x, 3) + true_fun = lambda y: y + false_fun = lambda z: lax.neg(z) + return lax.cond(pred, y, true_fun, z, false_fun) + + # these cases stay as cond + x = onp.array(2) + y = onp.array([1, 2]) + z = onp.array([3, 4]) + ans = api.vmap(fun, (None, 0, 0))(x, y, z) + jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z) + expected = onp.array([1, 2]) + self.assertAllClose(ans, expected, check_dtypes=False) + assert "select" not in str(jaxpr) + + x = onp.array(4) + ans = api.vmap(fun, (None, 0, 0))(x, y, z) + jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z) + expected = onp.array([-3, -4]) + self.assertAllClose(ans, expected, check_dtypes=False) + assert "select" not in str(jaxpr) + + fun = api.jit(fun) + ans = api.vmap(fun, (None, 0, 0))(x, y, z) + expected = onp.array([-3, -4]) + self.assertAllClose(ans, expected, check_dtypes=False) + + z = onp.array(5) + ans = api.vmap(fun, (None, 0, None))(x, y, z) + jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None)))(x, y, z) + expected = onp.array([-5, -5]) + self.assertAllClose(ans, expected, check_dtypes=False) + assert "select" not in str(jaxpr) + + + # these cases become select + x = onp.array([2, 4]) + ans = api.vmap(fun, (0, 0, None))(x, y, z) + jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None)))(x, y, z) + expected = onp.array([1, -5]) + self.assertAllClose(ans, expected, check_dtypes=False) + assert "select" in str(jaxpr) + + z = onp.array([3, 4]) + ans = api.vmap(fun)(x, y, z) + jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z) + expected = onp.array([1, -4]) + self.assertAllClose(ans, expected, check_dtypes=False) + assert "select" in str(jaxpr) + def testIssue514(self): # just check this doesn't crash lax.cond(True,
add cond batching rule Covers both the predication (batched condition) and unbatched condition cases. Fixes #490; may also fix #1120. vmap support / batching rule for lax.cond It would be great to have vmap support for `lax.cond` similar to the recently added vmap support for `lax.while_loop`. I know that I can use `np.where` as a workaround for some cases, but for computationally expensive if conditions a batched cond might be better. Feature request: `vmap`-able boolean function options I would like to write functions that take static options (associated with branching within the function) that are compatible with both `vmap` and `jit`. Some examples of things that currently work/don't work: ```python def python_control_flow(x, option=False): return x**2 if option else x**3 vmap(python_control_flow)(np.arange(3)) # works vmap(jit(python_control_flow))(np.arange(3)) # works (lambda x: jit(python_control_flow)(x, True))(np.arange(3)) # doesn't work vmap(jit(lambda x: python_control_flow(x, True)))(np.arange(3)) # works vmap(python_control_flow, in_axes=(0, None))(np.arange(3), True) # doesn't work def cond_control_flow(x, option=False): return lax.cond(option, x, lambda x: x**2, x, lambda x: x**3) cond_control_flow(np.arange(3)) # works jit(cond_control_flow)(np.arange(3)) # doesn't work, https://github.com/google/jax/issues/1077 vmap(cond_control_flow)(np.arange(3)) # doesn't work vmap(jit(cond_control_flow))(np.arange(3)) # doesn't work vmap(cond_control_flow, in_axes=(0, None))(np.arange(3), True) # doesn't work ``` It seems that for the best compatibility with `jit` the `lax.cond` control flow is preferable, perhaps with a wrapper, e.g., ```python @jit def _cond_control_flow(x, option): return lax.cond(option, x, lambda x: x**2, x, lambda x: x**3) def cond_control_flow(x, option=False): return _cond_control_flow(x, option) ``` to avoid https://github.com/google/jax/issues/1077, but no approach along these lines is currently compatible with a `vmap` down the line due to `NotImplementedError: Batching rule for 'cond' not implemented`.
What about batching via `lax.map`? Apparently the right kind of lattice join is already being invoked in the right place, so the updated implementation both (a) works and (b) raises to the batch tracer only when necessary, even though the actual batch rule doesn't do any raising at all. (Earlier, I was cargo-culting off of the `while_loop` batching rule, which does need to do these things because there's no generic lattice join being called in `while_loop` itself). Whoa! Do you mean the lattice join in [this code](https://github.com/google/jax/blob/a6ec5a916cae5b71e0232633b89e7490f77d81ef/jax/interpreters/partial_eval.py#L317) or elsewhere? How did you figure out that it wasn't doing any extra raising? As I usually only need the if condition (no else part) I am considering a single iteration `while_loop` instead of `cond` as another workaround. What do you think? I think this feature sounds super fun to implement and I can't wait to try it! But in the meantime a single-iteration `while_loop` (or `fori_loop if it's more convenient) is probably a good workaround.
2019-08-23T03:26:23
google/jax
1,245
google__jax-1245
[ "1236" ]
af8859b9de1e74c76de2a0355b6a804157659556
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -27,12 +27,14 @@ from __future__ import division from __future__ import print_function +from distutils.util import strtobool import collections import itertools +import os import re import string -import warnings import types +import warnings import numpy as onp import opt_einsum @@ -42,12 +44,21 @@ from jax import jit, device_put from .. import core from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray +from ..config import flags from ..interpreters.xla import DeviceArray from .. import lax from ..util import partial, get_module_functions, unzip2, prod as _prod from ..lib import xla_bridge from ..lib import xla_client +FLAGS = flags.FLAGS +flags.DEFINE_enum( + 'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'), + enum_values=['allow', 'warn', 'raise'], + help= + 'Control NumPy-style automatic rank promotion broadcasting ' + '("allow", "warn", or "raise").') + if six.PY3: def removechars(s, chars): return s.translate(str.maketrans(dict.fromkeys(chars))) @@ -158,9 +169,20 @@ def _promote_shapes(*args): return args else: shapes = [shape(arg) for arg in args] - nd = len(lax.broadcast_shapes(*shapes)) - return [lax.reshape(arg, (1,) * (nd - len(shp)) + shp) - if shp and len(shp) != nd else arg for arg, shp in zip(args, shapes)] + ranks = [len(shp) for shp in shapes] + if len(set(ranks)) == 1: + return args + elif FLAGS.jax_numpy_rank_promotion != "raise": + if FLAGS.jax_numpy_rank_promotion == "warn": + msg = "following NumPy automatic rank promotion behavior for {}." + warnings.warn(msg.format(' '.join(map(str, shapes)))) + nd = len(lax.broadcast_shapes(*shapes)) + return [lax.reshape(arg, (1,) * (nd - len(shp)) + shp) + if shp and len(shp) != nd else arg for arg, shp in zip(args, shapes)] + else: + msg = ("operands could not be broadcast together with shapes {} " + "and with the config option jax_numpy_rank_promotion='raise'.") + raise ValueError(msg.format(' '.join(map(str, shapes)))) def _promote_dtypes(*args): """Convenience function to apply Numpy argument dtype promotion."""
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -186,9 +186,15 @@ def test_binop_mismatch(self): def f(x, y): return x + y - jtu.check_raises(lambda: grad(f)(onp.zeros(3), onp.zeros(4)), - ValueError, - "Incompatible shapes for broadcasting: ((3,), (4,))") + jtu.check_raises( + lambda: f(np.zeros(3), np.zeros(4)), + TypeError, + "add got incompatible shapes for broadcasting: (3,), (4,).") + + jtu.check_raises( + lambda: grad(f)(onp.zeros(3), onp.zeros(4)), + TypeError, + "add got incompatible shapes for broadcasting: (3,), (4,).") def test_dot_mismatch(self): def f(x, y): diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -23,6 +23,7 @@ import operator import unittest from unittest import SkipTest +import warnings from absl.testing import absltest from absl.testing import parameterized @@ -1829,5 +1830,35 @@ def testMeshGrid(self, shapes, dtype, indexing, sparse, rng): lnp_fun = partial(lnp.meshgrid, indexing=indexing, sparse=sparse) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + def testDisableNumpyRankPromotionBroadcasting(self): + try: + prev_flag = FLAGS.jax_numpy_rank_promotion + FLAGS.jax_numpy_rank_promotion = "allow" + lnp.ones(2) + lnp.ones((1, 2)) # works just fine + finally: + FLAGS.jax_numpy_rank_promotion = prev_flag + + try: + prev_flag = FLAGS.jax_numpy_rank_promotion + FLAGS.jax_numpy_rank_promotion = "raise" + self.assertRaises(ValueError, lambda: lnp.ones(2) + lnp.ones((1, 2))) + finally: + FLAGS.jax_numpy_rank_promotion = prev_flag + + try: + prev_flag = FLAGS.jax_numpy_rank_promotion + FLAGS.jax_numpy_rank_promotion = "warn" + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + lnp.ones(2) + lnp.ones((1, 2)) + assert len(w) > 0 + msg = str(w[-1].message) + self.assertEqual( + msg, + "following NumPy automatic rank promotion behavior for (2,) (1, 2).") + finally: + FLAGS.jax_numpy_rank_promotion = prev_flag + + if __name__ == "__main__": absltest.main()
throw an error (or warning) when an array is silently broadcast to higher dimension Feature request submitted based on in person discussion with @mattjj . Silent overly-generous broadcasting of arrays often masks bugs. (It might make sense for the behavior -- error, warn, silent, where silent corresponds to numpy behavior -- to be controlled by a global variable.) e.g., I believe the following should result in an error or warning: ``` a = np.arange(5) # shape (5,) b = np.zeros((5,5)) # shape (5,5) c = a + b # should throw an error, since a and b have different numbers of dimensions ```
2019-08-24T00:06:30
google/jax
1,246
google__jax-1246
[ "1241" ]
4838440ff46daf944390863dc03036078d0cad4b
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -656,4 +656,5 @@ def _instantiate_device_constant(const, device_num=0, cutoff=1e6): compiled = c.Build(xla_const).Compile((), opts, backend=xb.get_backend()) return compiled.Execute(()) else: - return xc.Buffer.from_pyval(onp.asarray(const), device_num) + return xc.Buffer.from_pyval(onp.asarray(const), device_num, + backend=xb.get_backend())
seg fault happens when running CPU code using GPU-supported jaxlib The following repro script, which is taken from #1239, ``` import jax from jax.config import config; config.update('jax_platform_name', 'cpu') import jax.numpy as np from jax import random, lax, jit def welford_covariance(): def init_fn(size): return np.zeros(size), np.zeros(size), 0 def update_fn(sample, state): mean, m2, n = state n = n + 1 delta_pre = sample - mean mean = mean + delta_pre / n delta_post = sample - mean m2 = m2 + delta_pre * delta_post return mean, m2, n def final_fn(state): mean, m2, n = state cov = m2 / (n - 1) cov_inv_sqrt = np.sqrt(np.reciprocal(cov)) return cov, cov_inv_sqrt return init_fn, update_fn, final_fn def warmup_adapter(): mm_init, mm_update, mm_final = welford_covariance() def init_fn(z, rng, mass_matrix_size): inverse_mass_matrix = np.ones(mass_matrix_size) mass_matrix_sqrt = inverse_mass_matrix mm_state = mm_init(mass_matrix_size) return (inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng) def _update_at_window_end(z, rng_ss, state): inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng = state inverse_mass_matrix, mass_matrix_sqrt = mm_final(mm_state) mm_state = mm_init(inverse_mass_matrix.shape[-1]) return (inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng) def update_fn(t, accept_prob, z, state): inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng = state rng, rng_ss = random.split(rng) state = (inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng) state = lax.cond(t < 10, (z, rng_ss, state), lambda args: _update_at_window_end(*args), state, lambda x: x) return state return init_fn, update_fn wa_init, wa_update = warmup_adapter() wa_update = jit(wa_update) # uncomment this will make it fast z = np.ones(3) wa_state = wa_init(z, random.PRNGKey(0), mass_matrix_size=3) import time for t in range(10): tic = time.time() wa_state = wa_update(t, 0.1 * t, z, wa_state) print(time.time() - tic) ``` causes ``` [mutex.cc : 419] RAW: Lock blocking 0x7fbae0006640 @ 0x7fbbc9577eaf 0x7fbbc95785a6 0x7fbbc6a81236 0x7fbbc9576913 0x7fbbc9463f12 0x7fbbf463c421 [mutex.cc : 419] RAW: Unlock 0x7fbae0006640 @ 0x7fbbc6a80ae2 0x7fbbc9578088 0x7fbbc95785a6 0x7fbbc6a81236 0x7fbbc9576913 0x7fbbc9463f12 0x7fbbf463c421 ``` using https://storage.googleapis.com/jax-releases/cuda100/jaxlib-0.1.23-cp36-none-linux_x86_64.whl. This issue just happens after the recent refactoring of JAX (related to no more tuple). The code runs fine if we change `config.update('jax_platform_name', 'cpu')` to `config.update('jax_platform_name', 'gpu')` or if we use jaxlib from pypi. cc @mattjj @skye
One of the amazing XLA:{C,G}PU team members @cheshire tried to repro this internally (meaning against the most up-to-date XLA code) and the bug didn’t appear. The optimistic explanation for that is this bug is already fixed and we just need to push out an updated jaxlib. The pessimistic explanation is that it’s just hard to repro. We’ll update jaxlib wheels today and hope that fixes things. Thanks @mattjj, that's a great news! @fehiepsi we just uploaded jaxlib 0.1.25 wheels. Does it still repro? @mattjj The issue still happens, though I don't see mutex.cc messages anymore. When I uncomment the line `wa_update = jit(wa_update)`, the iteration run for the first step and stuck there (all GPU memory is allocated). I tried to restart my computer and create a new conda environment to test, the issue still happens. Are you able to replicate the issue in your machine? It turns out that we accidentally updated to an XLA version that is still a few days out of date, so we're going to update it one more time just to rule out that category of explanations. (I haven't tried to repro externally on the new jaxlib wheels yet, but will with 0.1.26.) This is at the top of our priority list, but just so I understand how many fires we should try to light under people, is this currently blocking your work or your users'? Hi @mattjj, please don't worry about it. We currently pin the jax version to 0.1.41 so this issue doesn't blocking our work. :) We just pushed jaxlib 0.1.26 wheels, but unfortunately the bug is still there: I was able to repro using your script on a fresh cloud VM. We'll follow up with the XLA team and see if we can track this down!
2019-08-25T20:04:15
google/jax
1,268
google__jax-1268
[ "1263" ]
c3e52cfc5af3b3c628e04c2dc74024d48f828854
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -329,6 +329,10 @@ def make_computation(name, jaxpr, op_shape): return c.Conditional(pred, true_op, true_c, false_op, false_c) +def _cond_pred_bcast_select(pred, x, y): + bcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred)))) + return lax.select(bcast_pred, x, y) + def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, false_nconsts): # TODO: maybe avoid moving arg axes to front if we're promoting to select? @@ -339,8 +343,8 @@ def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, args, [1, true_nconsts, true_nops, false_nconsts]) size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped} orig_bat = [d is not batching.not_mapped for d in dims] - (pred_bat,), t_bat, tconst_bat, f_bat, fconst_bat = split_list( - orig_bat, [1, true_nconsts, len(true_ops), false_nconsts]) + (pred_bat,), tconst_bat, t_bat, fconst_bat, f_bat = split_list( + orig_bat, [1, true_nconsts, true_nops, false_nconsts]) _, true_out_bat = batching.batch_jaxpr(true_jaxpr, size, tconst_bat + t_bat, False) _, false_out_bat = batching.batch_jaxpr(false_jaxpr, size, fconst_bat + f_bat, False) @@ -355,8 +359,8 @@ def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, true_out = [batching.broadcast(x, size, 0) if not b else x for x, b in zip(true_out, out_bat)] false_out = [batching.broadcast(x, size, 0) if not b else x - for x, b in zip(false_out, out_bat)] - return [lax.select(pred, t, f) + for x, b in zip(false_out, out_bat)] + return [_cond_pred_bcast_select(pred, t, f) for t, f in zip(true_out, false_out)], [0] * len(true_out) else: out_dims = [0 if b else batching.not_mapped for b in out_bat]
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -30,6 +30,7 @@ from jax import api from jax import core from jax import lax +from jax import random from jax import test_util as jtu from jax.util import unzip2 from jax.lib import xla_bridge @@ -527,6 +528,21 @@ def fun(x, y, z): self.assertAllClose(ans, expected, check_dtypes=False) assert "select" in str(jaxpr) + def testIssue1263(self): + def f(rng, x): + cond = random.bernoulli(rng) + return lax.cond(cond, x, lambda x: x, np.abs(x) - 1., lambda x: x) + + def body_fn(i, state): + rng, x = state + key, subkey = random.split(rng) + return key, f(subkey, x) + + def g(rng, x): + return lax.fori_loop(0, 10, body_fn, (rng, x)) + + api.vmap(g)(random.split(random.PRNGKey(0), 3), np.ones((3, 4))) + def testIssue514(self): # just check this doesn't crash lax.cond(True,
cond batching rule shape mismatch A simple script to repro ``` from jax import lax, numpy as np, vmap, jit, random from jax.config import config; config.update('jax_platform_name', 'cpu') def f(rng, x): cond = random.bernoulli(rng) y = np.abs(x) - 1. return lax.cond(cond, x, lambda x: x, y, lambda x: x) def body_fn(i, state): rng, x = state key, subkey = random.split(rng) return key, f(subkey, x) def g(rng, x): return lax.fori_loop(0, 10, body_fn, (rng, x)) vmap(g)(random.split(random.PRNGKey(0), 3), np.ones((3, 4))) ``` which triggers ``` TypeError: select pred must be scalar or have the same shape as on_true and on_false, got pred shape (3,) for on_true and on_false of shape (3, 4). ``` More details <details> ```sh ~/jax/jax/lax/lax_control_flow.py in fori_loop(lower, upper, body_fun, init_val) 116 return lax.add(i, lax._const(i, 1)), body_fun(i, x) 117 --> 118 _, result = while_loop(while_cond_fun, while_body_fun, (lower, init_val)) 119 return result 120 ~/jax/jax/lax/lax_control_flow.py in while_loop(cond_fun, body_fun, init_val) 171 outs = while_p.bind(*itertools.chain(cond_consts, body_consts, init_vals), 172 cond_nconsts=len(cond_consts), cond_jaxpr=cond_jaxpr, --> 173 body_nconsts=len(body_consts), body_jaxpr=body_jaxpr) 174 return tree_unflatten(body_tree, outs) 175 ~/jax/jax/core.py in bind(self, *args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) --> 133 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 134 if self.multiple_results: 135 return map(full_lower, out_tracer) ~/jax/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params) 113 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here 114 batched_primitive = get_primitive_batcher(primitive) --> 115 val_out, dim_out = batched_primitive(vals_in, dims_in, **params) 116 if primitive.multiple_results: 117 return map(partial(BatchTracer, self), val_out, dim_out) ~/jax/jax/lax/lax_control_flow.py in _while_loop_batching_rule(args, dims, cond_nconsts, cond_jaxpr, body_nconsts, body_jaxpr) 247 batched = bconst_bat + carry_bat 248 body_jaxpr_batched, carry_bat_out = batching.batch_jaxpr( --> 249 body_jaxpr, size, batched, instantiate=carry_bat) 250 cond_jaxpr_batched, (pred_bat,) = batching.batch_jaxpr( 251 cond_jaxpr, size, cconst_bat + carry_bat, instantiate=False) ~/jax/jax/interpreters/batching.py in batch_jaxpr(jaxpr, size, batched, instantiate) 293 for a, b in zip(jaxpr.in_avals, batched)] 294 in_pvals = [pe.PartialVal((aval, core.unit)) for aval in avals_in] --> 295 jaxpr_out, pvals_out, consts_out = pe.trace_to_jaxpr(f, in_pvals, instantiate=True) 296 avals_out, _ = unzip2(pvals_out) 297 jaxpr_out = core.TypedJaxpr(jaxpr_out, consts_out, avals_in, avals_out) ~/jax/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, **kwargs) 316 with new_master(JaxprTrace) as master: 317 fun = trace_to_subjaxpr(fun, master, instantiate) --> 318 jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) 319 assert not env 320 del master ~/jax/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 163 164 del gen --> 165 ans = self.f(*args, **dict(self.params, **kwargs)) 166 del args 167 while stack: ~/jax/jax/core.py in jaxpr_as_fun(typed_jaxpr, *args) 77 @curry 78 def jaxpr_as_fun(typed_jaxpr, *args): ---> 79 return eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args) 80 81 ~/jax/jax/core.py in eval_jaxpr(jaxpr, consts, freevar_vals, *args) 183 in eqn.bound_subjaxprs] 184 subfuns = map(lu.wrap_init, subfuns) --> 185 ans = eqn.primitive.bind(*(subfuns + in_vals), **eqn.params) 186 if eqn.primitive.multiple_results: 187 map(write, eqn.outvars, ans) ~/jax/jax/core.py in bind(self, *args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) --> 133 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 134 if self.multiple_results: 135 return map(full_lower, out_tracer) ~/jax/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params) 113 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here 114 batched_primitive = get_primitive_batcher(primitive) --> 115 val_out, dim_out = batched_primitive(vals_in, dims_in, **params) 116 if primitive.multiple_results: 117 return map(partial(BatchTracer, self), val_out, dim_out) ~/jax/jax/lax/lax_control_flow.py in _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, false_nconsts) 358 for x, b in zip(false_out, out_bat)] 359 return [lax.select(pred, t, f) --> 360 for t, f in zip(true_out, false_out)], [0] * len(true_out) 361 else: 362 out_dims = [0 if b else batching.not_mapped for b in out_bat] ~/jax/jax/lax/lax_control_flow.py in <listcomp>(.0) 358 for x, b in zip(false_out, out_bat)] 359 return [lax.select(pred, t, f) --> 360 for t, f in zip(true_out, false_out)], [0] * len(true_out) 361 else: 362 out_dims = [0 if b else batching.not_mapped for b in out_bat] ~/jax/jax/lax/lax.py in select(pred, on_true, on_false) 611 operator. 612 """ --> 613 return select_p.bind(pred, on_true, on_false) 614 615 def slice(operand, start_indices, limit_indices, strides=None): ~/jax/jax/core.py in bind(self, *args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) --> 133 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 134 if self.multiple_results: 135 return map(full_lower, out_tracer) ~/jax/jax/interpreters/partial_eval.py in process_primitive(self, primitive, tracers, params) 93 tracers = map(self.instantiate_const, tracers) 94 avals = [t.aval for t in tracers] ---> 95 out_aval = primitive.abstract_eval(*avals, **params) 96 # TODO(dougalm): think about whether these ref cycles will leak memory 97 if primitive.multiple_results: ~/jax/jax/lax/lax.py in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs) 1468 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) 1469 elif least_specialized is ShapedArray: -> 1470 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) 1471 elif least_specialized is UnshapedArray: 1472 return UnshapedArray(dtype_rule(*args, **kwargs)) ~/jax/jax/lax/lax.py in _select_shape_rule(pred, on_true, on_false) 2561 msg = ("select pred must be scalar or have the same shape as on_true and " 2562 "on_false, got pred shape {} for on_true and on_false of shape {}.") -> 2563 raise TypeError(msg.format(pred.shape, on_true.shape)) 2564 return on_true.shape 2565 ``` </details> I think that the logic of `cond_batching_rule` is correct but the `pred` shape is somehow not broadcasted in `_cond_batching_rule`. cc @jekbradbury @mattjj @neerajprad
Thanks for catching this! I think it's just that the `Select` HLO / `lax.select` primitive has limited automatic broadcasting (which we sometimes forget), so we need to do the broadcast ourselves. We did this [for the While batching rule](https://github.com/google/jax/blob/fd9e0333a6430bb06bd7a0c9faa650c66fdf2a21/jax/lax/lax_control_flow.py#L230-L237) but neglected to in the cond batching rule and [only called the raw select function without the broadcasting we need](https://github.com/google/jax/blob/fd9e0333a6430bb06bd7a0c9faa650c66fdf2a21/jax/lax/lax_control_flow.py#L359). Thanks @mattjj! Following your suggestion, I am able to resolve it with ``` def _pred_bcast_select1(pred, x, y): import jax.numpy as np pred = np.broadcast_to(np.reshape(pred, np.shape(pred) + (1,) * (np.ndim(x) - np.ndim(pred))), np.shape(x)) return lax.select(pred, x, y) ``` There is still a small issue of batching `cond` which throws ``` TypeError: broadcast_in_dim broadcast_dimensions must have length equal to operand ndim, got broadcast_dimensions (1, 2) for operand ndim 3. ``` at ``` ~/jax/jax/lax/lax_control_flow.py in _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, false_nconsts) 356 357 if pred_bat: --> 358 true_out = core.jaxpr_as_fun(true_jaxpr_batched)(*(true_consts + true_ops)) 359 false_out = core.jaxpr_as_fun(false_jaxpr_batched)(*(false_consts + false_ops)) 360 true_out = [batching.broadcast(x, size, 0) if not b else x ``` but it is harder to make a repro code. >"< I am working on it btw. Here is another repro script which triggers another error but related to cond batching rule ``` import os; os.environ['XLA_FLAGS'] = '--xla_force_host_platform_device_count=2' import jax from jax.config import config; config.update('jax_platform_name', 'cpu') import jax.numpy as np from jax import random, lax, jit, vmap, pmap def welford_covariance(): def init_fn(size): return np.zeros(size), np.zeros(size), 0 def update_fn(sample, state): mean, m2, n = state n = n + 1 delta_pre = sample - mean mean = mean + delta_pre / n delta_post = sample - mean m2 = m2 + delta_pre * delta_post return mean, m2, n def final_fn(state): mean, m2, n = state cov = m2 / (n - 1) cov_inv_sqrt = np.sqrt(np.reciprocal(cov)) return cov, cov_inv_sqrt return init_fn, update_fn, final_fn def warmup_adapter(): mm_init, mm_update, mm_final = welford_covariance() def init_fn(z, rng, mass_matrix_size): inverse_mass_matrix = np.ones(mass_matrix_size) mass_matrix_sqrt = inverse_mass_matrix mm_state = mm_init(mass_matrix_size) return (inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng) def _update_at_window_end(z, rng_ss, state): inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng = state inverse_mass_matrix, mass_matrix_sqrt = mm_final(mm_state) mm_state = mm_init(inverse_mass_matrix.shape[-1]) return (inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng) def update_fn(t, accept_prob, z, state): inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng = state rng, rng_ss = random.split(rng) state = (inverse_mass_matrix, mass_matrix_sqrt, mm_state, rng) state = lax.cond(t < 10, (z, rng_ss, state), lambda args: _update_at_window_end(*args), state, lambda x: x) return state return init_fn, update_fn def f(z, rng): wa_init, wa_update = warmup_adapter() wa_state = wa_init(z, rng, mass_matrix_size=3) wa_state = wa_update(1, 0.1, z, wa_state) return wa_state pmap(f)(np.ones((2, 3)), random.split(random.PRNGKey(0))) # pass vmap(f)(np.ones((2, 3)), random.split(random.PRNGKey(0))) # fail ``` Error details <details> ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-3-3ada91d9ee7b> in <module> ----> 1 vmap(f)(np.ones((2, 3)), random.split(random.PRNGKey(0))) ~/jax/jax/api.py in batched_fun(*args) 607 flat_fun, out_tree = flatten_fun_nokwargs(f, in_tree) 608 out_flat = batching.batch(flat_fun, args_flat, _flatten_axes(in_tree, in_axes), --> 609 lambda: _flatten_axes(out_tree(), out_axes)) 610 return tree_unflatten(out_tree(), out_flat) 611 ~/jax/jax/interpreters/batching.py in batch(fun, in_vals, in_dims, out_dim_dests) 39 40 def batch(fun, in_vals, in_dims, out_dim_dests): ---> 41 out_vals, out_dims = batch_fun(fun, in_vals, in_dims) 42 size, = {x.shape[d] for x, d in zip(in_vals, in_dims) if d is not not_mapped} 43 return map(partial(matchaxis, size), out_dims, out_dim_dests(), out_vals) ~/jax/jax/interpreters/batching.py in batch_fun(fun, in_vals, in_dims) 46 with new_master(BatchTrace) as master: 47 fun, out_dims = batch_subtrace(fun, master, in_dims) ---> 48 out_vals = fun.call_wrapped(*in_vals) 49 del master 50 return out_vals, out_dims() ~/jax/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 163 164 del gen --> 165 ans = self.f(*args, **dict(self.params, **kwargs)) 166 del args 167 while stack: <ipython-input-1-f5b4fcd16d7b> in f(z, rng) 57 wa_init, wa_update = warmup_adapter() 58 wa_state = wa_init(z, rng, mass_matrix_size=3) ---> 59 wa_state = wa_update(1, 0.1, z, wa_state) 60 return wa_state <ipython-input-1-f5b4fcd16d7b> in update_fn(t, accept_prob, z, state) 49 state = lax.cond(t < 10, 50 (z, rng_ss, state), lambda args: _update_at_window_end(*args), ---> 51 state, lambda x: x) 52 return state 53 ~/jax/jax/lax/lax_control_flow.py in cond(pred, true_operand, true_fun, false_operand, false_fun) 300 *itertools.chain([pred], true_consts, true_ops, false_consts, false_ops), 301 true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, --> 302 true_nconsts=len(true_consts), false_nconsts=len(false_consts)) 303 return tree_unflatten(out_tree, out) 304 ~/jax/jax/core.py in bind(self, *args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) --> 133 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 134 if self.multiple_results: 135 return map(full_lower, out_tracer) ~/jax/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params) 113 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here 114 batched_primitive = get_primitive_batcher(primitive) --> 115 val_out, dim_out = batched_primitive(vals_in, dims_in, **params) 116 if primitive.multiple_results: 117 return map(partial(BatchTracer, self), val_out, dim_out) ~/jax/jax/lax/lax_control_flow.py in _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, true_nconsts, false_nconsts) 369 *itertools.chain([pred], true_consts, true_ops, false_consts, false_ops), 370 true_jaxpr=true_jaxpr_batched, false_jaxpr=false_jaxpr_batched, --> 371 true_nconsts=len(true_consts), false_nconsts=len(false_consts)), out_dims 372 373 cond_p = lax.Primitive('cond') ~/jax/jax/core.py in bind(self, *args, **kwargs) 128 top_trace = find_top_trace(args) 129 if top_trace is None: --> 130 return self.impl(*args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) ~/jax/jax/interpreters/xla.py in apply_primitive(prim, *args, **params) 121 """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" 122 abstract_args = map(abstractify, args) --> 123 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params) 124 return compiled_fun(*args) 125 ~/jax/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **params) 134 handle_result = aval_to_result_handler(aval_out) 135 xla_shapes = tuple(map(aval_to_xla_shape, abstract_args)) --> 136 built_c = primitive_computation(prim, *xla_shapes, **params) 137 compiled = built_c.Compile(xla_shapes, xb.get_compile_options(), 138 backend=xb.get_backend(backend)) ~/jax/jax/interpreters/xla.py in primitive_computation(prim, *xla_shapes, **params) 157 elif prim in initial_style_translations: 158 rule = initial_style_translations[prim] --> 159 rule(c, AxisEnv(), *xla_args, backend=backend, **new_params) # side-effect on c 160 else: 161 raise NotImplementedError("XLA translation rule for {} not found".format(prim)) ~/jax/jax/lax/lax_control_flow.py in _cond_translation_rule(c, axis_env, pred, *args, **kwargs) 323 324 true_op = c.Tuple(*(true_consts + true_ops)) --> 325 true_c = make_computation("true_comp", true_jaxpr, c.GetShape(true_op)) 326 327 false_op = c.Tuple(*(false_consts + false_ops)) ~/jax/jax/lax/lax_control_flow.py in make_computation(name, jaxpr, op_shape) 319 ops = [c.GetTupleElement(op, i) for i in range(len(jaxpr.in_avals))] 320 out = c.Call(xla.jaxpr_computation(jaxpr.jaxpr, backend, axis_env, jaxpr.literals, (), --> 321 *_map(c.GetShape, ops)), ops) 322 return c.Build(out) 323 ~/jax/jax/interpreters/xla.py in jaxpr_computation(jaxpr, backend, axis_env, const_vals, freevar_shapes, *arg_shapes) 233 def jaxpr_computation(jaxpr, backend, axis_env, const_vals, freevar_shapes, *arg_shapes): 234 c, out_nodes = _jaxpr_computation(jaxpr, backend, axis_env, const_vals, freevar_shapes, --> 235 *arg_shapes) 236 return c.Build(c.Tuple(*out_nodes)) 237 ~/jax/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, backend, axis_env, const_vals, freevar_shapes, *arg_shapes) 303 raise NotImplementedError(msg.format(eqn.primitive.name)) 304 --> 305 c.GetShape(ans) # force xla to do shape error checking 306 out_nodes = xla_destructure(c, ans) if eqn.primitive.multiple_results else [ans] 307 _map(write, eqn.outvars, out_nodes) ~/miniconda3/envs/pydata/lib/python3.6/site-packages/jaxlib/xla_client.py in GetShape(self, operand) 711 712 def GetShape(self, operand): --> 713 return self._builder.GetShape(operand) 714 715 def SetOpMetadata(self, op_metadata): RuntimeError: Invalid argument: Size of broadcast_dimensions has to match operand's rank; operand rank: 2, size of broadcast_dimensions 1. ``` </details> I think it is related to https://github.com/google/jax/pull/1237 but here the issue happens in `batching` context. The above repro script triggers error at `pred_bat=False` branch, a similar issue happens at `pred_bat=True` branch too ``` def f(t, z, rng): wa_init, wa_update = warmup_adapter() wa_state = wa_init(z, rng, mass_matrix_size=3) wa_state = wa_update(t, 0.1, z, wa_state) return wa_state vmap(f)(np.array([1, 2]), np.ones((2, 3)), random.split(random.PRNGKey(0))) ```
2019-08-30T02:29:55
google/jax
1,298
google__jax-1298
[ "1297" ]
c5737d0f6c9875df5884add23be0789871870be2
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -134,12 +134,7 @@ def _nan_like(c, operand): nan = c.Constant(onp.array(onp.nan, dtype=dtype)) return c.Broadcast(nan, shape.dimensions()) -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if hasattr(lapack, "potrf"): - _cpu_potrf = lapack.potrf -else: - _cpu_potrf = _unpack_tuple(lapack.jax_potrf, 2) +_cpu_potrf = lapack.potrf def cholesky_cpu_translation_rule(c, operand): shape = c.GetShape(operand) @@ -181,12 +176,7 @@ def eig_abstract_eval(operand): raise NotImplementedError return w, vl, vr -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if hasattr(lapack, "geev"): - _cpu_geev = lapack.geev -else: - _cpu_geev = _unpack_tuple(lapack.jax_geev, 4) +_cpu_geev = lapack.geev def eig_cpu_translation_rule(c, operand): shape = c.GetShape(operand) @@ -294,21 +284,13 @@ def eigh_batching_rule(batched_args, batch_dims, lower): xla.translations[eigh_p] = eigh_translation_rule ad.primitive_jvps[eigh_p] = eigh_jvp_rule -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if hasattr(lapack, "syevd"): - _cpu_syevd = lapack.syevd -else: - _cpu_syevd = _unpack_tuple(lapack.jax_syevd, 3) +_cpu_syevd = lapack.syevd xla.backend_specific_translations['cpu'][eigh_p] = partial( _eigh_cpu_gpu_translation_rule, _cpu_syevd) -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if cusolver: - xla.backend_specific_translations['gpu'][eigh_p] = partial( - _eigh_cpu_gpu_translation_rule, cusolver.syevd) +xla.backend_specific_translations['gpu'][eigh_p] = partial( + _eigh_cpu_gpu_translation_rule, cusolver.syevd) batching.primitive_batchers[eigh_p] = eigh_batching_rule @@ -612,12 +594,7 @@ def _lu_cpu_gpu_translation_rule(getrf_impl, c, operand): ad.primitive_jvps[lu_p] = _lu_jvp_rule batching.primitive_batchers[lu_p] = _lu_batching_rule -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if hasattr(lapack, "getrf"): - _cpu_getrf = lapack.getrf -else: - _cpu_getrf = _unpack_tuple(lapack.jax_getrf, 3) +_cpu_getrf = lapack.getrf xla.backend_specific_translations['cpu'][lu_p] = partial( _lu_cpu_gpu_translation_rule, _cpu_getrf) @@ -803,18 +780,10 @@ def svd_batching_rule(batched_args, batch_dims, full_matrices, compute_uv): batching.primitive_batchers[svd_p] = svd_batching_rule xla.translations[svd_p] = svd_translation_rule -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if hasattr(lapack, "gesdd"): - _cpu_gesdd = lapack.gesdd -else: - _cpu_gesdd = _unpack_tuple(lapack.jax_gesdd, 4) +_cpu_gesdd = lapack.gesdd xla.backend_specific_translations['cpu'][svd_p] = partial( _svd_cpu_gpu_translation_rule, _cpu_gesdd) -# TODO(phawkins): remove if-condition after increasing minimum Jaxlib version to -# 0.1.23. -if cusolver: - xla.backend_specific_translations['gpu'][svd_p] = partial( - _svd_cpu_gpu_translation_rule, cusolver.gesvd) +xla.backend_specific_translations['gpu'][svd_p] = partial( + _svd_cpu_gpu_translation_rule, cusolver.gesvd) diff --git a/jax/lib/__init__.py b/jax/lib/__init__.py --- a/jax/lib/__init__.py +++ b/jax/lib/__init__.py @@ -41,16 +41,5 @@ def _check_jaxlib_version(): from jaxlib import xrt from jaxlib import lapack -# TODO(phawkins): make the import unconditional when the minimum Jaxlib version -# has been increased to 0.1.23. -try: - from jaxlib import pytree -except ImportError: - pytree = None - -# TODO(phawkins): make the import unconditional when the minimum Jaxlib version -# has been increased to 0.1.23. -try: - from jaxlib import cusolver -except ImportError: - cusolver = None +from jaxlib import pytree +from jaxlib import cusolver diff --git a/jax/tree_util.py b/jax/tree_util.py --- a/jax/tree_util.py +++ b/jax/tree_util.py @@ -46,308 +46,85 @@ from .util import unzip2, partial, safe_map -# TODO(phawkins): use the first case unconditionally when the minimum Jaxlib -# version has been increased to 0.1.23. -if pytree: - - def tree_map(f, tree): - """Map a function over a pytree to produce a new pytree. - - Args: - f: function to be applied at each leaf. - tree: a pytree to be mapped over. - - Returns: - A new pytree with the same structure as `tree` but with the value at each - leaf given by `f(x)` where `x` is the value at the corresponding leaf in - `tree`. - """ - leaves, treedef = pytree.flatten(tree) - return treedef.unflatten(map(f, leaves)) - - def tree_multimap(f, tree, *rest): - """Map a multi-input function over pytree args to produce a new pytree. - - Args: - f: function that takes `1 + len(rest)` arguments, to be applied at the - corresponding leaves of the pytrees. - tree: a pytree to be mapped over, with each leaf providing the first - positional argument to `f`. - *rest: a tuple of pytrees, each of which has the same structure as tree or - or has tree as a prefix. - Returns: - A new pytree with the same structure as `tree` but with the value at each - leaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf - in `tree` and `xs` is the tuple of values at corresponding nodes in - `rest`. - """ - leaves, treedef = pytree.flatten(tree) - all_leaves = [leaves] + [treedef.flatten_up_to(r) for r in rest] - return treedef.unflatten(f(*xs) for xs in zip(*all_leaves)) - - def tree_leaves(tree): - return pytree.flatten(tree)[0] - - def process_pytree(process_node, tree): - leaves, treedef = pytree.flatten(tree) - return treedef.walk(process_node, None, leaves), treedef - - tree_flatten = pytree.flatten - - def build_tree(treedef, xs): - return treedef.from_iterable_tree(xs) - - def treedef_is_leaf(treedef): - return treedef.num_nodes == 1 - - def tree_unflatten(treedef, xs): - return treedef.unflatten(xs) - - def tree_transpose(outer_treedef, inner_treedef, pytree_to_transpose): - flat, treedef = tree_flatten(pytree_to_transpose) - expected_treedef = outer_treedef.compose(inner_treedef) - if treedef != expected_treedef: - raise TypeError("Mismatch\n{}\n != \n{}".format(treedef, expected_treedef)) - - inner_size = inner_treedef.num_leaves - outer_size = outer_treedef.num_leaves - flat = iter(flat) - lol = [[next(flat) for _ in range(inner_size)] for __ in range(outer_size)] - transposed_lol = zip(*lol) - subtrees = map(partial(tree_unflatten, outer_treedef), transposed_lol) - return tree_unflatten(inner_treedef, subtrees) - - def tree_structure(tree): - _, treedef = pytree.flatten(tree) - return treedef - - def treedef_tuple(trees): - return pytree.tuple(list(trees)) - - def treedef_children(treedef): - return treedef.children() - - register_pytree_node = pytree.register_node - -else: - def tree_map(f, tree): - """Map a function over a pytree to produce a new pytree. - - Args: - f: function to be applied at each leaf. - tree: a pytree to be mapped over. - - Returns: - A new pytree with the same structure as `tree` but with the value at each - leaf given by `f(x)` where `x` is the value at the corresponding leaf in - `tree`. - """ - node_type = _get_node_type(tree) - if node_type: - children, node_spec = node_type.to_iterable(tree) - new_children = [tree_map(f, child) for child in children] - return node_type.from_iterable(node_spec, new_children) - else: - return f(tree) - - def tree_multimap(f, tree, *rest): - """Map a multi-input function over pytree args to produce a new pytree. - - Args: - f: function that takes `1 + len(rest)` arguments, to be applied at the - corresponding leaves of the pytrees. - tree: a pytree to be mapped over, with each leaf providing the first - positional argument to `f`. - *rest: a tuple of pytrees, each with the same structure as `tree`. - - Returns: - A new pytree with the same structure as `tree` but with the value at each - leaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf - in `tree` and `xs` is the tuple of values at corresponding leaves in `rest`. - """ - node_type = _get_node_type(tree) - if node_type: - children, aux_data = node_type.to_iterable(tree) - all_children = [children] - for other_tree in rest: - other_node_type = _get_node_type(other_tree) - if node_type != other_node_type: - raise TypeError('Mismatch: {} != {}'.format(other_node_type, node_type)) - other_children, other_aux_data = node_type.to_iterable(other_tree) - if other_aux_data != aux_data: - raise TypeError('Mismatch: {} != {}'.format(other_aux_data, aux_data)) - all_children.append(other_children) - - new_children = [tree_multimap(f, *xs) for xs in zip(*all_children)] - return node_type.from_iterable(aux_data, new_children) - else: - return f(tree, *rest) - - def _walk_pytree(f_node, f_leaf, tree): - node_type = _get_node_type(tree) - if node_type: - children, node_spec = node_type.to_iterable(tree) - proc_children, child_specs = unzip2([_walk_pytree(f_node, f_leaf, child) - for child in children]) - tree_def = _PyTreeDef(node_type, node_spec, child_specs) - return f_node(proc_children), tree_def - else: - return f_leaf(tree), leaf - - def process_pytree(process_node, tree): - return _walk_pytree(process_node, lambda x: x, tree) - - def build_tree(treedef, xs): - if treedef is leaf: - return xs - else: - # We use 'iter' for clearer error messages - children = safe_map(build_tree, iter(treedef.children), iter(xs)) - return treedef.node_type.from_iterable(treedef.node_data, children) - - def tree_leaves(tree): - """Generator that iterates over all leaves of a pytree.""" - node_type = _get_node_type(tree) - if node_type: - children, _ = node_type.to_iterable(tree) - for child in children: - # TODO(mattjj,phawkins): use 'yield from' when PY2 is dropped - for leaf in tree_leaves(child): - yield leaf - else: - yield tree - - def tree_flatten(tree): - itr, treedef = _walk_pytree(it.chain.from_iterable, lambda x: (x,), tree) - return list(itr), treedef - - def _tree_unflatten(xs, treedef): - if treedef is leaf: - return next(xs) - else: - children = tuple(map(partial(_tree_unflatten, xs), treedef.children)) - return treedef.node_type.from_iterable(treedef.node_data, children) - - def tree_unflatten(treedef, xs): - return _tree_unflatten(iter(xs), treedef) - - def tree_transpose(outer_treedef, inner_treedef, pytree_to_transpose): - flat, treedef = tree_flatten(pytree_to_transpose) - expected_treedef = _nested_treedef(inner_treedef, outer_treedef) - if treedef != expected_treedef: - raise TypeError("Mismatch\n{}\n != \n{}".format(treedef, expected_treedef)) - - inner_size = _num_leaves(inner_treedef) - outer_size = _num_leaves(outer_treedef) - flat = iter(flat) - lol = [[next(flat) for _ in range(inner_size)] for __ in range(outer_size)] - transposed_lol = zip(*lol) - subtrees = map(partial(tree_unflatten, outer_treedef), transposed_lol) - return tree_unflatten(inner_treedef, subtrees) - - def _num_leaves(treedef): - return 1 if treedef is leaf else sum(map(_num_leaves, treedef.children)) - - def _nested_treedef(inner, outer): - # just used in tree_transpose error checking - if outer is leaf: - return inner - else: - children = map(partial(_nested_treedef, inner), outer.children) - return _PyTreeDef(outer.node_type, outer.node_data, tuple(children)) - - def tree_structure(tree): - _, spec = process_pytree(lambda _: None, tree) - return spec - - - class _PyTreeDef(object): - __slots__ = ("node_type", "node_data", "children") - - def __init__(self, node_type, node_data, children): - self.node_type = node_type - self.node_data = node_data - self.children = children - - def __repr__(self): - if self.node_data is None: - data_repr = "" - else: - data_repr = "[{}]".format(self.node_data) - - return "PyTree({}{}, [{}])".format(self.node_type.name, data_repr, - ','.join(map(repr, self.children))) - - def __hash__(self): - return hash((self.node_type, self.node_data, tuple(self.children))) - - def __eq__(self, other): - if other is leaf: - return False - else: - return (self.node_type == other.node_type and - self.node_data == other.node_data and - self.children == other.children) - - def __ne__(self, other): - return not self == other - - - class _PyLeaf(object): - __slots__ = () - - def __repr__(self): - return '*' - - leaf = _PyLeaf() - - def treedef_is_leaf(treedef): - return treedef is leaf - - def treedef_tuple(treedefs): - return _PyTreeDef(node_types[tuple], None, tuple(treedefs)) - - def treedef_children(treedef): - return treedef.children - - def dict_to_iterable(xs): - keys = tuple(sorted(xs.keys())) - return tuple(map(xs.get, keys)), keys - - class NodeType(object): - def __init__(self, name, to_iterable, from_iterable): - self.name = name - self.to_iterable = to_iterable - self.from_iterable = from_iterable - - def __repr__(self): - return self.name - - node_types = {} - - def register_pytree_node(py_type, to_iterable, from_iterable): - assert py_type not in node_types - node_types[py_type] = NodeType(str(py_type), to_iterable, from_iterable) - - register_pytree_node(tuple, lambda xs: (xs, None), lambda _, xs: tuple(xs)) - register_pytree_node(list, lambda xs: (tuple(xs), None), lambda _, xs: list(xs)) - register_pytree_node(dict, dict_to_iterable, lambda keys, xs: dict(zip(keys, xs))) - - # To handle namedtuples, we can't just use the standard table of node_types - # because every namedtuple creates its own type and thus would require its own - # entry in the table. Instead we use a heuristic check on the type itself to - # decide whether it's a namedtuple type, and if so treat it as a pytree node. - def _get_node_type(maybe_tree): - t = type(maybe_tree) - return node_types.get(t) or _namedtuple_node(t) +def tree_map(f, tree): + """Map a function over a pytree to produce a new pytree. - def _namedtuple_node(t): - if issubclass(t, tuple) and hasattr(t, '_fields'): - return NamedtupleNode + Args: + f: function to be applied at each leaf. + tree: a pytree to be mapped over. + + Returns: + A new pytree with the same structure as `tree` but with the value at each + leaf given by `f(x)` where `x` is the value at the corresponding leaf in + `tree`. + """ + leaves, treedef = pytree.flatten(tree) + return treedef.unflatten(map(f, leaves)) + +def tree_multimap(f, tree, *rest): + """Map a multi-input function over pytree args to produce a new pytree. + + Args: + f: function that takes `1 + len(rest)` arguments, to be applied at the + corresponding leaves of the pytrees. + tree: a pytree to be mapped over, with each leaf providing the first + positional argument to `f`. + *rest: a tuple of pytrees, each of which has the same structure as tree or + or has tree as a prefix. + Returns: + A new pytree with the same structure as `tree` but with the value at each + leaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf + in `tree` and `xs` is the tuple of values at corresponding nodes in + `rest`. + """ + leaves, treedef = pytree.flatten(tree) + all_leaves = [leaves] + [treedef.flatten_up_to(r) for r in rest] + return treedef.unflatten(f(*xs) for xs in zip(*all_leaves)) + +def tree_leaves(tree): + return pytree.flatten(tree)[0] + +def process_pytree(process_node, tree): + leaves, treedef = pytree.flatten(tree) + return treedef.walk(process_node, None, leaves), treedef + +tree_flatten = pytree.flatten + +def build_tree(treedef, xs): + return treedef.from_iterable_tree(xs) + +def treedef_is_leaf(treedef): + return treedef.num_nodes == 1 + +def tree_unflatten(treedef, xs): + return treedef.unflatten(xs) + +def tree_transpose(outer_treedef, inner_treedef, pytree_to_transpose): + flat, treedef = tree_flatten(pytree_to_transpose) + expected_treedef = outer_treedef.compose(inner_treedef) + if treedef != expected_treedef: + raise TypeError("Mismatch\n{}\n != \n{}".format(treedef, expected_treedef)) + + inner_size = inner_treedef.num_leaves + outer_size = outer_treedef.num_leaves + flat = iter(flat) + lol = [[next(flat) for _ in range(inner_size)] for __ in range(outer_size)] + transposed_lol = zip(*lol) + subtrees = map(partial(tree_unflatten, outer_treedef), transposed_lol) + return tree_unflatten(inner_treedef, subtrees) + +def tree_structure(tree): + _, treedef = pytree.flatten(tree) + return treedef + +def treedef_tuple(trees): + return pytree.tuple(list(trees)) + +def treedef_children(treedef): + return treedef.children() + +register_pytree_node = pytree.register_node - NamedtupleNode = NodeType('namedtuple', - lambda xs: (tuple(xs), type(xs)), - lambda t, xs: t(*xs)) def tree_reduce(f, tree):
JAX with GPU support requires jaxlib version 0.1.26 I've got an issue. So, I followed the installation instructions here: ![image](https://user-images.githubusercontent.com/12001304/64115427-e8b98300-cd8f-11e9-879c-b4935195991c.png) Then I run the following line in `JupyterLab`: ```python import jax.numpy as jnp ``` And that produces the following error: ```python ValueError: jaxlib is version 0.1.23, but this version of jax requires version 0.1.26. ``` I went back and changed the second last line to: ```bash pip install --upgrade $BASE_URL/$CUDA_VERSION/jaxlib-0.1.26-$PYTHON_VERSION-none-$PLATFORM.whl ``` Tried running. But, was denied. ![image](https://user-images.githubusercontent.com/12001304/64115630-6bdad900-cd90-11e9-92d6-ebc6dc1c15a0.png)
Thanks! I think our documentation is out-of-date, as we forgot to update it when we bumped the minimum jaxlib version.
2019-09-02T14:27:08
google/jax
1,299
google__jax-1299
[ "1271" ]
220a2ea5920f5a124145f1afb47bd60294639f92
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1352,7 +1352,7 @@ def pad(array, pad_width, mode='constant', constant_values=0): @_wraps(onp.stack) def stack(arrays, axis=0): - if not arrays: + if not len(arrays): raise ValueError("Need at least one array to stack.") shape0 = shape(arrays[0]) axis = _canonicalize_axis(axis, len(shape0) + 1) @@ -1377,7 +1377,7 @@ def tile(a, reps): @_wraps(onp.concatenate) def concatenate(arrays, axis=0): - if not arrays: + if not len(arrays): raise ValueError("Need at least one array to concatenate.") if ndim(arrays[0]) == 0: raise ValueError("Zero-dimensional arrays cannot be concatenated.")
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1864,6 +1864,18 @@ def testDisableNumpyRankPromotionBroadcasting(self): finally: FLAGS.jax_numpy_rank_promotion = prev_flag + def testStackArrayArgument(self): + # tests https://github.com/google/jax/issues/1271 + @api.jit + def foo(x): + return lnp.stack(x) + foo(onp.zeros(2)) # doesn't crash + + @api.jit + def foo(x): + return lnp.concatenate(x) + foo(onp.zeros((2, 2))) # doesn't crash + # Most grad tests are at the lax level (see lax_test.py), but we add some here # as needed for e.g. particular compound ops of interest.
np.stack raises incorrect exception when given a traced array ```python @jax.jit def foo(x): return np.stack(x) foo(np.zeros(2)) # this fails ``` Raises an exception because of a `if not arrays` check which assumes `arrays` is an object without a `__bool__` method so that the size of arrays would be used instead. Using `len(arrays) == 0` might be sufficient to fix this. This issue might be present in a few other places and might warrant a test. Error message: ```python TypeError: Abstract value passed to `bool`, which requires a concrete value. The function to be transformed can't be traced at the required level of abstraction. If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions instead. ```
2019-09-02T14:56:45
google/jax
1,308
google__jax-1308
[ "1228" ]
73d512bdd2fc2d817a46676af0c3f5900b56a108
diff --git a/jax/scipy/linalg.py b/jax/scipy/linalg.py --- a/jax/scipy/linalg.py +++ b/jax/scipy/linalg.py @@ -16,10 +16,11 @@ from __future__ import division from __future__ import print_function -import warnings +from functools import partial import scipy.linalg +from jax import jit from .. import lax from .. import lax_linalg from ..numpy.lax_numpy import _wraps @@ -117,6 +118,46 @@ def lu_factor(a, overwrite_a=False, check_finite=True): a = np_linalg._promote_arg_dtypes(np.asarray(a)) return lax_linalg.lu(a) +@partial(jit, static_argnums=(3,)) +def _lu_solve(lu, pivots, b, trans): + lu_shape = np.shape(lu) + b_shape = np.shape(b) + if len(lu_shape) != 2 or lu_shape[0] != lu_shape[1]: + raise ValueError("LU decomposition must be a square matrix, got shape {}" + .format(lu_shape)) + if len(b_shape) < 1: + raise ValueError("b matrix must have rank >= 1, got shape {}" + .format(b_shape)) + + if b_shape[0] != lu_shape[0]: + raise ValueError("Dimension of LU decomposition matrix (shape {}) must " + "match leading axis of b array (shape {})" + .format(lu_shape, b_shape)) + m = lu_shape[0] + permutation = lax_linalg.lu_pivots_to_permutation(np.array(pivots), m) + x = np.reshape(b, (m, -1)) + if trans == 0: + x = x[permutation, :] + x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True, + unit_diagonal=True) + x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False) + elif trans == 1 or trans == 2: + conj = trans == 2 + x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False, + transpose_a=True, conjugate_a=conj) + x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True, + unit_diagonal=True, transpose_a=True, + conjugate_a=conj) + x = x[np.argsort(permutation), :] + else: + raise ValueError("'trans' value must be 0, 1, or 2, got {}".format(trans)) + return lax.reshape(x, b_shape) + +@_wraps(scipy.linalg.lu_solve) +def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True): + del overwrite_b, check_finite + lu, pivots = lu_and_piv + return _lu_solve(lu, pivots, b, trans) @_wraps(scipy.linalg.lu) def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -585,6 +585,36 @@ def testLuFactor(self, n, dtype, rng): self.assertAllClose(x, onp.matmul(l, u), check_dtypes=True, rtol=1e-3) self._CompileAndCheck(jsp.linalg.lu_factor, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_lhs={}_rhs={}_trans={}".format( + jtu.format_shape_dtype_string(lhs_shape, dtype), + jtu.format_shape_dtype_string(rhs_shape, dtype), + trans), + "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, + "trans": trans, "rng": rng} + for lhs_shape, rhs_shape in [ + ((1, 1), (1, 1)), + ((4, 4), (4,)), + ((8, 8), (8, 4, 2)), + ] + for trans in [0, 1, 2] + for dtype in float_types + complex_types + for rng in [jtu.rand_default()])) + def testLuSolve(self, lhs_shape, rhs_shape, dtype, trans, rng): + _skip_if_unsupported_type(dtype) + osp_fun = lambda lu, piv, rhs: osp.linalg.lu_solve((lu, piv), rhs, trans=trans) + jsp_fun = lambda lu, piv, rhs: jsp.linalg.lu_solve((lu, piv), rhs, trans=trans) + + def args_maker(): + a = rng(lhs_shape, dtype) + lu, piv = osp.linalg.lu_factor(a) + return [lu, piv, rng(rhs_shape, dtype)] + + self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker, + check_dtypes=True, tol=1e-3) + self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_lhs={}_rhs={}_sym_pos={}_lower={}".format(
Add impl of `scipy.linalg.lu_solve` This is used by the Glow generative model to parameterize an invertible 1x1 convolution. A TF implementation in terms of triangular solve is here: https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/math/linalg.py#L306
Pretty much exactly the same functionality is already available via `jax.numpy.linalg.solve`, but I guess there's no harm in adding the `scipy` variant too. (It supports an additional transpose or conjugate transpose and takes its input prefactored.)
2019-09-05T14:01:35
google/jax
1,309
google__jax-1309
[ "1304" ]
73d512bdd2fc2d817a46676af0c3f5900b56a108
diff --git a/jax/interpreters/pxla.py b/jax/interpreters/pxla.py --- a/jax/interpreters/pxla.py +++ b/jax/interpreters/pxla.py @@ -297,6 +297,7 @@ def block_until_ready(self): self._check_if_deleted() for buf in self.device_buffers: buf.block_host_until_ready() + return self class ShardedDeviceArray(ShardedDeviceValue, xla.DeviceArray): diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -507,9 +507,12 @@ def block_until_ready(self): This method is mostly useful for timing microbenchmarks that wish to time how long a computation takes, without transferring the result back to the host. + + Returns the buffer object (`self`). """ self._check_if_deleted() self.device_buffer.block_host_until_ready() + return self def _forward_method(attrname, self, fun, *args): return fun(getattr(self, attrname), *args)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -699,8 +699,9 @@ def test_devicearray_delete(self): def test_devicearray_block_until_ready(self): x = device_put(1.) - x.block_until_ready() - # Tests only that block_until_ready() does not produce an error. + y = x.block_until_ready() + # Tests mostly that block_until_ready() does not produce an error. + self.assertTrue(y is x) def test_namedtuple_transparency(self): # See https://github.com/google/jax/issues/446
block_until_ready() should return the DeviceArray object It would be nice to have the `block_until_ready()` method return the `DeviceArray` object so that it can be more easily dropped into existing code. So something like ```python a = f() ``` could easily become ```python a = f().block_until_ready() ```
2019-09-05T14:17:21
google/jax
1,329
google__jax-1329
[ "1316" ]
22afc8cea1b9b5a0dca0cdfe8a0ce51df94cf9e0
diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -61,7 +61,9 @@ def jvp_subtrace(master, primals, tangents): for x in list(primals) + list(tangents): if isinstance(x, Tracer): assert x.trace.level < trace.level - ans = yield map(partial(JVPTracer, trace), primals, tangents), {} + in_tracers = [JVPTracer(trace, x, t) if t is not zero else x + for x, t in zip(primals, tangents)] + ans = yield in_tracers, {} out_tracers = map(trace.full_raise, ans) yield unzip2([(out_tracer.primal, out_tracer.tangent) for out_tracer in out_tracers])
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -972,6 +972,15 @@ def testWhileCondConstant(self): out = lax.while_loop(lambda _: False, lambda _: (), ()) # doesn't crash self.assertEqual(out, ()) + def testIssue1316(self): + def f(carry, _): + c, key = carry + key, _ = random.split(key) + return (c, key), () + + key = random.PRNGKey(0) + api.grad(lambda c: lax.scan(f, (c, key), onp.ones(3))[0][0])(0.) # doesn't crash + if __name__ == '__main__': absltest.main()
forward-mode differentiation of lax.scan suddenly not working When running a piece of code that was working earlier today, I am suddenly getting the following error ` NotImplementedError: Forward-mode differentiation rule for 'while' not implemented ` If necessary I can try to create a minimal repo, but I will say that if I replace a lax.scan call with a for loop then this error goes away.
Sounds like it could be a regression! Though we have [tests](https://github.com/google/jax/blob/e087915c55f2716ba8f5464cf9f9f55f4f8ce93b/tests/lax_control_flow_test.py#L602) [for](https://github.com/google/jax/blob/e087915c55f2716ba8f5464cf9f9f55f4f8ce93b/tests/lax_control_flow_test.py#L635) [this](https://github.com/google/jax/blob/e087915c55f2716ba8f5464cf9f9f55f4f8ce93b/tests/lax_control_flow_test.py#L730), they might leave some case uncovered. A minimal repro would be really helpful, since our current tests pass. Ok, I have (finally) reduced this to a minimal repro, see here: https://pastebin.com/WzYk6Xj1 Let me know if anything doesn't make sense. Thanks! Thanks again for raising this! I only just started looking at it. I think the issue (or at least one issue) was uncovered by #1175 or #1269: the loop being complained about is in the PRNG hash function. We're not dropping out of the autodiff trace properly somewhere on integer values (that is my guess at the root cause), and that's hitting the PRNG code which has while loops in it. Some evidence for this guess is I replaced the line `key, split = random.split(key)` with `key, split = key, key` and the error went away. Still looking... I think I introduced the real issue in #1224, because in [these lines](https://github.com/google/jax/blob/22afc8cea1b9b5a0dca0cdfe8a0ce51df94cf9e0/jax/interpreters/ad.py#L488-L489) I'm effectively creating JVPTracers (with zeros) for integer-valued arguments. I think Dougal had some clever way of avoiding that before, and I clobbered it!
2019-09-10T00:47:48
google/jax
1,335
google__jax-1335
[ "1213" ]
705eb1cbcb683b644389e61d067b7df091bfc5a1
diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -60,6 +60,7 @@ def svd(a, full_matrices=True, compute_uv=True): @_wraps(onp.linalg.slogdet) +@jit def slogdet(a): a = _promote_arg_dtypes(np.asarray(a)) dtype = lax.dtype(a) @@ -72,10 +73,10 @@ def slogdet(a): is_zero = np.any(diag == np.array(0, dtype=dtype), axis=-1) parity = np.count_nonzero(pivot != np.arange(a_shape[-1]), axis=-1) if np.iscomplexobj(a): - sign = np.prod(diag / np.abs(diag)) + sign = np.prod(diag / np.abs(diag), axis=-1) else: sign = np.array(1, dtype=dtype) - parity = parity + np.count_nonzero(diag < 0) + parity = parity + np.count_nonzero(diag < 0, axis=-1) sign = np.where(is_zero, np.array(0, dtype=dtype), sign * np.array(-2 * (parity % 2) + 1, dtype=dtype))
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -99,19 +99,27 @@ def testDet(self, n, dtype, rng): @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": - "_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)), - "n": n, "dtype": dtype, "rng": rng} - for n in [0, 4, 10, 200] + "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype, "rng": rng} + for shape in [(0, 0), (1, 1), (3, 3), (4, 4), (10, 10), (200, 200), + (2, 2, 2), (2, 3, 3), (3, 2, 2)] for dtype in float_types + complex_types for rng in [jtu.rand_default()])) - def testSlogdet(self, n, dtype, rng): + def testSlogdet(self, shape, dtype, rng): _skip_if_unsupported_type(dtype) - args_maker = lambda: [rng((n, n), dtype)] + args_maker = lambda: [rng(shape, dtype)] self._CheckAgainstNumpy(onp.linalg.slogdet, np.linalg.slogdet, args_maker, check_dtypes=True, tol=1e-3) self._CompileAndCheck(np.linalg.slogdet, args_maker, check_dtypes=True) + def testIssue1213(self): + for n in range(5): + mat = np.array([onp.diag(onp.ones([5], dtype=onp.float32))*(-.01)] * 2) + args_maker = lambda: [mat] + self._CheckAgainstNumpy(onp.linalg.slogdet, np.linalg.slogdet, args_maker, + check_dtypes=True, tol=1e-3) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}".format( jtu.format_shape_dtype_string(shape, dtype)),
`slogdet` sign is incorrect In the following, numpy disagrees with JAX ```python mat = np.array([[[-0.01]], [[-0.01]]]) print(np.linalg.slogdet(mat)) print(jnp.linalg.slogdet(jnp.array(mat))) ``` => ``` (array([-1., -1.]), array([-4.60517019, -4.60517019])) (DeviceArray([1., 1.]), DeviceArray([-4.60517019, -4.60517019])) ```
Not sure if relevant, but I was running with `from jax import config; config.update('jax_enable_x64',True)` Seems to be something to do with the even/odd-ness of the matrix rank: ```python mat = np.array([np.diag(np.ones([5]))*(-.01)] * 2) for n in range(1, 6): print(n) print(onp.linalg.slogdet(mat[...,:n,:n])) print(np.linalg.slogdet(np.array(mat[...,:n,:n]))) ``` => ``` 1 (array([-1., -1.], dtype=float32), array([-4.6051702, -4.6051702], dtype=float32)) (DeviceArray([1., 1.], dtype=float32), DeviceArray([-4.60517025, -4.60517025], dtype=float32)) 2 (array([1., 1.], dtype=float32), array([-9.2103405, -9.2103405], dtype=float32)) (DeviceArray([1., 1.], dtype=float32), DeviceArray([-9.2103405, -9.2103405], dtype=float32)) 3 (array([-1., -1.], dtype=float32), array([-13.815511, -13.815511], dtype=float32)) (DeviceArray([1., 1.], dtype=float32), DeviceArray([-13.81551075, -13.81551075], dtype=float32)) 4 (array([1., 1.], dtype=float32), array([-18.420681, -18.420681], dtype=float32)) (DeviceArray([1., 1.], dtype=float32), DeviceArray([-18.420681, -18.420681], dtype=float32)) 5 (array([-1., -1.], dtype=float32), array([-23.02585, -23.02585], dtype=float32)) (DeviceArray([1., 1.], dtype=float32), DeviceArray([-23.0258522, -23.0258522], dtype=float32)) ```
2019-09-11T12:21:11
google/jax
1,349
google__jax-1349
[ "1233" ]
b7b5328526d5cf624f308eabbb897b280ea8ef7c
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1682,11 +1682,11 @@ def ix_(*args): return tuple(output) -@_wraps(onp.repeat) -def repeat(a, repeats, axis=None): + +def _repeat_scalar(a, repeats, axis=None): if not isscalar(repeats): raise NotImplementedError( - "np.repeat implementation only supports scalar repeats") + "_repeat_scalar implementation only supports scalar repeats") if axis is None or isscalar(a): a = ravel(a) axis = 0 @@ -1710,6 +1710,55 @@ def repeat(a, repeats, axis=None): lax.broadcast_in_dim(a, broadcast_shape, broadcast_dims), a_shape) +@_wraps(onp.repeat) +def repeat(a, repeats, axis=None): + ''' + :param repeats: int or array of ints + ''' + # use `_repeat_scalar` when possible + if isscalar(repeats): + return _repeat_scalar(a, repeats, axis) + repeats_raveled = ravel(array(repeats)) # make sure it's jax's array type + if size(repeats_raveled) == 1: + return _repeat_scalar(a, list(repeats_raveled)[0], axis) + + if axis is None or isscalar(a): + a = ravel(a) + axis = 0 + + # repeats must match the dimension along the requested axis + a_shape = list(a.shape) + n = a_shape[axis] + if size(repeats_raveled) != n: + raise ValueError("repeats shape {} does not match the dimension on axis {}".format( + repeats_raveled.shape, n + )) + + # calculating the new shape + total = sum(repeats_raveled) + + new_shape = a_shape[:] + new_shape[axis] = total + + a_flattened = ravel(a) + + ''' + main algorithm: + first break down raveled input array into list of chunks; each chunk is the unit of repeat + then tile the repeats to have same length as the list of chunks + finally repeat each unit x number of times according to the tiled repeat list + ''' + chunks = product(a_shape[:axis+1]).item() + a_splitted = split(a_flattened, chunks) + repeats_tiled = tile(repeats_raveled, chunks // len(repeats_raveled)) + + ret = array([], dtype=a.dtype) + for i, repeat in enumerate(repeats_tiled): + if not isinstance(repeat, int): + repeat = repeat.item() + ret = concatenate((ret, tile(a_splitted[i], repeat))) + + return reshape(ret, new_shape) @_wraps(onp.tri) def tri(N, M=None, k=0, dtype=None):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -729,6 +729,36 @@ def testRepeat(self, axis, shape, dtype, repeats, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + def testIssue1233(self): + ''' + Following numpy test suite from `test_repeat` at https://github.com/numpy/numpy/blob/master/numpy/core/tests/test_multiarray.py + ''' + tol = 1e-5 + + def test_single(m, args_maker, repeats, axis): + lax_ans = lnp.repeat(m, repeats, axis) + numpy_ans = onp.repeat(m, repeats, axis) + + self.assertAllClose(lax_ans, numpy_ans, check_dtypes=True, rtol=tol, atol=tol) + + lnp_fun = lambda arg: lnp.repeat(arg, repeats = repeats, axis=axis) + self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + + m = lnp.array([1,2,3,4,5,6]) + args_maker = lambda: [m] + + for repeats in [2, [1,3,2,1,1,2], [2], lnp.array([1,3,2,1,1,2]), lnp.array([2])]: + test_single(m, args_maker, repeats, None) + + m_rect = m.reshape((2,3)) + args_maker = lambda: [m_rect] + + for repeats in [2, [2,1], [2], lnp.array([2,1]), lnp.array([2])]: + test_single(m_rect, args_maker, repeats, axis=0) + + for repeats in [2, [1,3,2], [2], lnp.array([1,3,2]), lnp.array([2])]: + test_single(m_rect, args_maker, repeats, axis=1) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format( op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),
Support for non-scalar np.repeat arguments. Hello, I noticed that jax.numpy.repeat does not support non-scalar repeats. ``` np.repeat(np.array([[1, 2, 3]]), np.array([2, 3, 1])) NotImplementedError Traceback (most recent call last) <ipython-input-120-c0c19bd58e3a> in <module>() ----> 1 np.repeat(np.array([[1, 2, 3]]), np.array([2, 3, 1])) google3/third_party/py/jax/numpy/lax_numpy.py in repeat(a, repeats, axis) 1539 if not isscalar(repeats): 1540 raise NotImplementedError( -> 1541 "np.repeat implementation only supports scalar repeats") 1542 if axis is None or isscalar(a): 1543 a = ravel(a) ``` The expected results with numpy: ``` array([1, 1, 2, 2, 2, 3]) ``` Note that the documentation indicates that this is supported: https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.repeat.html Is it possible to add support for this in jax, and/or is there a workaround?
Certainly doable! Anyone who wants to get started on this (maybe you!) should start by looking at [the current `np.repeat` implementation in lax_numpy.py.](https://github.com/google/jax/blob/d7894198a1ad0e54de42450c27ad5e715cb59aa1/jax/numpy/lax_numpy.py#L1596-L1622). If no one has picked this up I will try to have a go at this.
2019-09-14T21:03:05
google/jax
1,352
google__jax-1352
[ "1350" ]
b7b5328526d5cf624f308eabbb897b280ea8ef7c
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1725,7 +1725,7 @@ def _div_transpose_rule(cotangent, x, y): rem_p = standard_binop([_num, _num], 'rem') ad.defjvp(rem_p, lambda g, x, y: _brcast(g, y), - lambda g, x, y: mul(neg(g), floor(div(x, y)))) + lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y)))) def _broadcasting_select(c, which, x, y):
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1501,8 +1501,6 @@ def grad_test_spec(op, nargs, order, rng, dtypes, name=None, tol=None): dtypes=[onp.float64]), grad_test_spec(lax.round, nargs=1, order=2, rng=jtu.rand_default(), dtypes=[onp.float64]), - # grad_test_spec(lax.rem, nargs=2, order=2, rng=jtu.rand_default(), - # dtypes=[onp.float64]), # TODO(mattjj): enable grad_test_spec(lax.exp, nargs=1, order=2, rng=jtu.rand_small(), dtypes=[onp.float64, onp.complex64]), @@ -2287,6 +2285,22 @@ def f2(x, y): expected = onp.array(0.0) self.assertAllClose(ans, expected, check_dtypes=False) + # TODO(mattjj): make this a more systematic test + def testRemainder(self): + rng = onp.random.RandomState(0) + x = rng.uniform(-0.9, 9, size=(3, 4)) + y = rng.uniform(0.7, 1.9, size=(3, 1)) + assert not set(onp.unique(x)) & set(onp.unique(y)) + tol = 1e-1 if num_float_bits(onp.float64) == 32 else 1e-3 + check_grads(lax.rem, (x, y), 2, ["fwd", "rev"], tol, tol) + + rng = onp.random.RandomState(0) + x = rng.uniform(-0.9, 9, size=(1, 4)) + y = rng.uniform(0.7, 1.9, size=(3, 4)) + assert not set(onp.unique(x)) & set(onp.unique(y)) + tol = 1e-1 if num_float_bits(onp.float64) == 32 else 1e-3 + check_grads(lax.rem, (x, y), 2, ["fwd", "rev"], tol, tol) + def all_bdims(*shapes): bdims = (itertools.chain([None], range(len(shape) + 1)) for shape in shapes)
Gradient shape error. Consider the following code, ```python def pairwise_displacement(Ra, Rb): return Ra - Rb def periodic_displacement(side, dR): return np.mod(dR + side * f32(0.5), side) - f32(0.5) * side def square_distance(dR): return np.sum(dR ** 2, axis=-1) def distance(dR): return np.sqrt(square_distance(dR)) def periodic(side): def displacement_fn(Ra, Rb): return periodic_displacement(side, pairwise_displacement(Ra, Rb)) return displacement_fn def get_samples(sigma): particle_count = 10 key = random.PRNGKey(0) N_2 = particle_count // 2 particle_volume = N_2 * np.pi * (sigma ** 2 + 1.) / 4. box_size = np.sqrt(particle_volume / 0.96) displacement = periodic(box_size) R = box_size * random.uniform(key, (particle_count, 2), dtype=f64) displacement = vmap(vmap(displacement, (None, 0)), (0, None)) return np.sum(displacement(R, R)) print('Got shape {} but expected a scalar!!'.format( grad(get_samples)(1.0).shape)) ``` The function `grad(get_samples)(s)` should return a scalar. However, we get something of shape `[particle_count, particle_count, 2]`. It seems like the issue is most likely coming from tracing through `periodic(box_size)` since without that line the shapes work out as expected.
Some more minimization: ```python import jax.numpy as np from jax import vmap, grad from jax import random f64 = np.float64 f32 = np.float32 def pairwise_displacement(Ra, Rb): return Ra - Rb def periodic_displacement(side, dR): # return dR + side # NOTE this works return np.mod(dR, side) def periodic(side): def displacement_fn(Ra, Rb): return periodic_displacement(side, pairwise_displacement(Ra, Rb)) return displacement_fn def get_samples(sigma): particle_count = 10 R = np.zeros((particle_count, 2), dtype=f64) displacement = vmap(vmap(periodic(sigma), (None, 0)), (0, None)) return np.sum(displacement(R, R)) print('Got shape {} but expected a scalar!!'.format( grad(get_samples)(1.0).shape)) ``` Notice the `# NOTE this works` comment. That makes me think there's a broadcasting issue with `np.mod`...
2019-09-15T15:47:18
google/jax
1,358
google__jax-1358
[ "1330" ]
4d7f41ba421bca8232dd480af12b2a62d5c85d17
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2232,8 +2232,9 @@ def argmin(a, axis=None): def _argminmax(op, a, axis): shape = [1] * a.ndim shape[axis] = a.shape[axis] - idxs = onp.arange(a.shape[axis]).reshape(shape) + idxs = lax.tie_in(a, arange(a.shape[axis])).reshape(shape) maxval = onp.iinfo(xla_bridge.canonicalize_dtype(idxs.dtype)).max + maxval = lax.tie_in(a, maxval) mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval) return min(mask_idxs, axis)
argmax instantiates global constants The culprit is this line: https://github.com/google/jax/blob/22afc8cea1b9b5a0dca0cdfe8a0ce51df94cf9e0/jax/numpy/lax_numpy.py#L2234 I'm attempting to do argmax on large tensors and am running out of memory due to the size of the globally-allocated range object. A `tie_in` might workaround might address the memory issue, but perhaps the better approach is to implement what the TODO says? https://github.com/google/jax/blob/22afc8cea1b9b5a0dca0cdfe8a0ce51df94cf9e0/jax/numpy/lax_numpy.py#L2230-L2237
My current workaround is: ``` def _argminmax(op, a, axis): shape = [1] * a.ndim shape[axis] = a.shape[axis] idxs = lax.tie_in(a, arange(a.shape[axis])).reshape(shape) maxval = onp.iinfo(xla_bridge.canonicalize_dtype(idxs.dtype)).max maxval = lax.tie_in(a, maxval) mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval) return min(mask_idxs, axis) ``` I realized that maxval needed a tie-in too, because it would be broadcast as part of `where`, with the result constant-folded and stored in global memory.
2019-09-16T21:31:23
google/jax
1,378
google__jax-1378
[ "1369" ]
16484ccbed49d1afeb08b3f6311476c19b49b604
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -143,6 +143,8 @@ def process_map(self, map_primitive, f, tracers, params): return out_tracers def post_process_call(self, call_primitive, out_tracers, params): + if call_primitive in map_primitives: + return self.post_process_map(call_primitive, out_tracers, params) jaxpr, consts, env = tracers_to_jaxpr([], out_tracers) out_pvs, out_pv_consts = unzip2(t.pval for t in out_tracers) out = out_pv_consts + consts @@ -163,6 +165,31 @@ def todo(x): return out_tracers return out, todo + def post_process_map(self, map_primitive, out_tracers, params): + jaxpr, consts, env = tracers_to_jaxpr([], out_tracers) + out_pvs_reduced, out_pv_consts = unzip2(t.pval for t in out_tracers) + out_pvs = [None if pv is None else _unmapped_aval(params['axis_size'], pv) + for pv in out_pvs_reduced] + out = out_pv_consts + consts + del consts, out_pv_consts + master = self.master + def todo(x): + n = len(jaxpr.outvars) + out_pv_consts, consts = x[:n], x[n:] + trace = JaxprTrace(master, core.cur_sublevel()) + const_tracers = map(trace.new_instantiated_const, consts) + env_tracers = map(trace.full_raise, env) + lifted_jaxpr = closure_convert_jaxpr(jaxpr) + bound_subjaxpr = (lifted_jaxpr, (), env_tracers) + out_tracers = [JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), None) + for out_pv, out_pv_const in zip(out_pvs, out_pv_consts)] + eqn = new_jaxpr_eqn(const_tracers, out_tracers, map_primitive, + (bound_subjaxpr,), params) + for t in out_tracers: + t.recipe = eqn + return out_tracers + return out, todo + def _mapped_aval(aval): if aval is core.abstract_unit: return aval
diff --git a/tests/pmap_test.py b/tests/pmap_test.py --- a/tests/pmap_test.py +++ b/tests/pmap_test.py @@ -689,6 +689,27 @@ def testShardedDeviceArrayGetItem(self): z = y[0] # doesn't crash self.assertAllClose(z, 2 * x[0], check_dtypes=False) + def testPostProcessMap(self): + # test came from https://github.com/google/jax/issues/1369 + nrep = xla_bridge.device_count() + + def pmvm(a, b): + a = a.reshape((nrep, -1, a.shape[1])) + func = pmap(lambda z: np.dot(z, b)) + return func(a).reshape(b.shape) + + rng = onp.random.RandomState(0) + a = rng.randn(80, 80) + b = rng.randn(80) + + iters = np.arange(5) + def body(carry, i): + return pmvm(a, carry), i + ans, _ = lax.scan(body, b, iters) + + expected = onp.linalg.matrix_power(a, 5).dot(b) + self.assertAllClose(ans, expected, check_dtypes=False) + class PmapWithDevicesTest(jtu.JaxTestCase):
scan+pmap shape bug @JasperSnoek reported this one: ```python import jax.numpy as np from jax import lax, random from jax import pmap from jax.lib import xla_bridge nrep = xla_bridge.device_count() key = random.PRNGKey(0) n = 80 a_in = np.linspace(0, 1, n)[:, None] a = np.dot(a_in, a_in.T) + np.eye(n) * 0.1 b = random.normal(key, (n,)).flatten() def pmvm(a, b): a = a.reshape((nrep, -1, a.shape[1])) func = pmap(lambda z: np.dot(z, b)) return func(a).reshape(b.shape) for i in range(10): b = pmvm(a, b) # Ok iters = np.arange(10) def body(carry, i): return pmvm(a, carry), i res = lax.scan(body, b, iters) ``` It results in some kind of shape error (the exact one depends on how many replicas there are).
2019-09-20T14:11:42
google/jax
1,381
google__jax-1381
[ "1361" ]
f5fd2b3e9de5c30a6e230c29cb67307ababc92db
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -36,19 +36,17 @@ def identity(x): return x # A partial value (pval) is modeled as a pair (pv, const), as per # type PVal = (PV, Const) -# data PV = NonePV | AbstractPV AbstractValue +# data PV = Known | Unknown AbstractValue # type Const = MaybeTraced JaxType -# where the NonePV arm indicates a known (constant) value, the AbstractPV arm -# indicates an unknown value. -# Additionally, when the pv is an AbstractValue, then the const must be unit. +# where the Known arm, represented by a None, indicates a known (constant) value +# and the Unknown arm, represented by an AbstractValue instance, indicates an +# unknown value. +# When the pv is an AbstractValue, then the const must be unit. class JaxprTrace(Trace): def pure(self, val): - if type(val) in core.literalable_types and onp.shape(val) == (): - return JaxprTracer(self, PartialVal((None, val)), Literal(val)) - else: - return self.new_const(val) + return self.new_const(val) def lift(self, val): return self.new_const(val) @@ -76,8 +74,8 @@ def instantiate_const(self, tracer): if isinstance(pv, AbstractValue): return tracer elif pv is None: - if type(tracer.recipe) is Literal: - return self.new_instantiated_literal(tracer.recipe.val) + if type(const) in core.literalable_types and onp.shape(const) == (): + return self.new_instantiated_literal(const) else: return self.new_instantiated_const(const) else: @@ -392,8 +390,7 @@ def tracers_to_jaxpr(in_tracers, out_tracers): env_vars, env_vals = unzip2(env.items()) const_vars, const_vals = unzip2(consts.items()) jaxpr = Jaxpr(const_vars, env_vars, invars, list(map(var, out_tracers)), eqns) - # core.skip_checks or core.check_jaxpr(jaxpr) - core.check_jaxpr(jaxpr) + core.skip_checks or core.check_jaxpr(jaxpr) return jaxpr, const_vals, env_vals
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -981,6 +981,17 @@ def f(carry, _): key = random.PRNGKey(0) api.grad(lambda c: lax.scan(f, (c, key), onp.ones(3))[0][0])(0.) # doesn't crash + def testIssue1361(self): + @api.jit + def jit_run_scan(x): + def fun(carry, _): + x, _ = carry + return (2 * x, 0.), None + (x, _), _ = lax.scan(fun, (x, 0.), np.arange(3)) + return x + + api.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash + if __name__ == '__main__': absltest.main()
Bug with shape inference in grad + jit + scan Encountered with new pip install (jax 0.1.45, jaxlib 0.1.28) ```python import jax import jax.numpy as np def loop_stuff(init_x, func, num_iter): def body(args): i, x_new, _ = args x = init_x x_old = x x = func(i, x_old) i_new = i + 1 return i_new, x_new, x_old init_vals = (0, init_x, np.zeros_like(init_x)) def scan_step(args, idx): del idx return body(args), None (iterations, sol, prev_sol), _ = jax.lax.scan( f=scan_step, init=init_vals, xs=np.arange(num_iter), ) return iterations, sol, prev_sol max_steps = 10 def step(i, x): return x - 1 init_x = np.zeros(()) def run_scan(x): return loop_stuff(init_x=x, func=step, num_iter=max_steps) @jax.jit def jit_run_scan(x): return loop_stuff(init_x=x, func=step, num_iter=max_steps) jax.grad(lambda x: run_scan(x)[1])(init_x) # runs fine jax.grad(lambda x: jit_run_scan(x)[1])(init_x) # fails mysteriously ``` The error: `RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: (s32[], (), (), (), (), (), f32[], (), ())) -> pred[]; body: (parameter: (s32[], (), (), (), (), (), f32[], (), ())) -> (s32[], (), (), (), (), (), f32[], f32[], ()); init: (s32[], (), (), (), (), (), f32[], (), ())..` @mattjj
Thanks for raising this. Here's a smaller repro: ```python import jax import jax.numpy as np @jax.jit def jit_run_scan(x): def fun(carry, _): x, _ = carry return (2 * x, 0.), None (x, _), _ = jax.lax.scan(fun, (x, 0.), np.arange(3)) return x jax.grad(lambda x: jit_run_scan(x))(0.) ``` I think the issue is a literal `0.` in the carry output. I have a feeling we fixed this before, or some other version of it... It's a strange beast. I'm not sure it is an issue of literal `0.`, but I could be missing something subtle. If you drop third carry term in my example, everything seems to work fine, e.g., ```python def loop_stuff(init_x, func, num_iter): def body(args): i, x_new = args x = init_x x_old = x x = func(i, x_old) i_new = i + 1 return i_new, x_new init_vals = (0, init_x) def scan_step(args, idx): del idx return body(args), None (iterations, sol), _ = jax.lax.scan( f=scan_step, init=init_vals, xs=np.arange(num_iter), ) return iterations, sol ``` You can also rewrite my example without any zeros and still cause the error: ```python import jax import jax.numpy as np def loop_stuff(init_x, func, num_iter): def body(args): i, x_new, _ = args x = init_x x_old = x x = func(i, x_old) i_new = i + 1 return i_new, x_new, x_old init_vals = (1, init_x, init_x) def scan_step(args, idx): del idx return body(args), None (iterations, sol, prev_sol), _ = jax.lax.scan( f=scan_step, init=init_vals, xs=np.arange(num_iter), ) return iterations, sol, prev_sol max_steps = 10 def step(i, x): return x - 1 init_x = np.ones(()) + 1 def run_scan(x): return loop_stuff(init_x=x, func=step, num_iter=max_steps) @jax.jit def jit_run_scan(x): return loop_stuff(init_x=x, func=step, num_iter=max_steps) jax.grad(lambda x: run_scan(x)[1])(init_x) # runs fine jax.grad(lambda x: jit_run_scan(x)[1])(init_x) # fails mysteriously ``` From playing around with it and the error message, my initial guess was that this occurs when you have two copies of the same tensor as initial carry. The error message makes me think that when the type of one is determined, it affect the other at a time when some part of the code doesn't expect it to change. I haven't sifted through how the backend is implemented, but would something like that be possible in some form or another? Also, not sure if it helps but I've encountered another bug with scan in a similar setting. Replacing the loop with a unrolled loop works fine. I've been having trouble boiling it down to something self contained that I can post but here is the error message in case it is related in any way: ``` File "/usr/local/lib/python3.6/dist-packages/jax/api.py", line 397, in value_and_grad_f g = vjp_py(onp.ones((), dtype=dtype)) File "/usr/local/lib/python3.6/dist-packages/jax/api_util.py", line 62, in apply_jaxtree_fun ans = fun(*args) File "/usr/local/lib/python3.6/dist-packages/jax/api.py", line 1012, in out_vjp_packed return out_vjp(cotangent_in) File "/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py", line 112, in vjp_ _, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primal_and_ct) File "/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py", line 186, in backward_pass cts_out = get_primitive_transpose(eqn.primitive)(ct_in, *invals, **eqn.params) File "/usr/local/lib/python3.6/dist-packages/jax/lax/lax_control_flow.py", line 723, in _scan_transpose jaxpr_trans = _move_stuff_and_add_add(jaxpr_lifted_trans) File "/usr/local/lib/python3.6/dist-packages/jax/lax/lax_control_flow.py", line 761, in _move_stuff_and_add_add assert CTc_aval == CTc_aval2 AssertionError: In call to configurable 'run' (<function run at 0x7ff0991b20d0>) ``` I'm happy to share the code that causes this but I am not quite ready to broadcast it publicly. It isn't confidential or proprietary in any way so I'm happy to share it privately if that is of any interest to you and the JAX team. Yeah, all bugs are certainly of interest. Thanks for helping to find them! If you could open an issue for it, perhaps just with the error message, and then separately email me the repro, that would work. It's useful to open the issue just to help track/organize things. Re: this particular issue, I'll check out your other repros too to see if it looks like the same bug or a separate one. In your new repro, there is a literal `0.` in the loop carry output again, but it's complicated to explain why! Here's how to see it for yourself, after running the new repro script and starting a post-mortem debugger: ``` In [2]: %debug > /usr/local/google/home/mattjj/miniconda3/lib/python3.7/site-packages/jaxlib/xla_client.py(713)GetShape() 711 712 def GetShape(self, operand): --> 713 return self._builder.GetShape(operand) 714 715 def SetOpMetadata(self, op_metadata): ipdb> up > /usr/local/google/home/mattjj/packages/jax/jax/interpreters/xla.py(294)jaxpr_subcomp() 292 raise NotImplementedError(msg.format(eqn.primitive.name)) 293 --> 294 c.GetShape(ans) # force xla to do shape error checking 295 out_nodes = xla_destructure(c, ans) if eqn.primitive.multiple_results else [ans] 296 _map(write, eqn.outvars, out_nodes) ipdb> up > /usr/local/google/home/mattjj/packages/jax/jax/interpreters/xla.py(241)jaxpr_computation() 239 freevars = _map(c.ParameterWithShape, freevar_shapes) 240 args = _map(c.ParameterWithShape, arg_shapes) --> 241 out_nodes = jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args) 242 return c.Build(c.Tuple(*out_nodes)) 243 ipdb> p jaxpr { lambda ; ; a b c d e f g. let h i j k l m n o p = while[ cond_nconsts=0 cond_jaxpr={ lambda ; ; a b c d e f g h i. let j = lt a 10 in [j] } body_nconsts=0 body_jaxpr={ lambda ; ; a b c d e f g h i. let j = add a 1 in [j, *, *, *, *, *, g, 0.0, *] } ] 0 a b c d e f g * in [i, j, k, l, m, n, o, p] } ``` In the line with `[j, *, *, *, *, *, g, 0.0, *]` there's a literal `0.0` which I believe is problematic. If this issue is like the one I'm recalling, it's just an implementation detail around the `core.unit` placeholders we use in partial evaluation. Thanks for the explanation! I had forgotten that jax transforms the scan into a while op. Re: other bug, for good measure I tried to recreate it once more and it seems like I can no longer reproduce it with the newest wheels. I was probably missing a fix when I last tried. I hope I didn't waste any of your time on this one!
2019-09-20T22:36:50
google/jax
1,388
google__jax-1388
[ "1383" ]
86a3cd5f625f1a4d1b0a46831cd4dfa1d6239e67
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1720,7 +1720,7 @@ def _div_transpose_rule(cotangent, x, y): div_p = standard_binop([_num, _num], 'div') ad.defjvp(div_p, lambda g, x, y: div(_brcast(g, y), y), - lambda g, x, y: div(mul(neg(_brcast(g, x)), x), pow(y, _two(y)))) + lambda g, x, y: div(mul(neg(_brcast(g, x)), x), square(y))) ad.primitive_transposes[div_p] = _div_transpose_rule rem_p = standard_binop([_num, _num], 'rem')
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -538,6 +538,20 @@ def testIssue1151(self): jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A[0], b[0]) jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A[0], b[0]) + def testIssue1383(self): + seed = jax.random.PRNGKey(0) + tmp = jax.random.uniform(seed, (2,2)) + a = np.dot(tmp, tmp.T) + + def f(inp): + val, vec = np.linalg.eigh(inp) + return np.dot(np.dot(vec, inp), vec.T) + + grad_func = jax.jacfwd(f) + hess_func = jax.jacfwd(grad_func) + cube_func = jax.jacfwd(hess_func) + self.assertFalse(onp.any(onp.isnan(cube_func(a)))) + class ScipyLinalgTest(jtu.JaxTestCase):
forward-mode eigh derivatives return NaN's First off, thanks for developing JAX, it's awesome! I recently noticed that when one computes higher order derivatives of expressions involving eigenvectors obtained from `jax.numpy.eigh` or `jax.scipy.eigh`, NaNs are returned. However, it only appears to be a problem with forward-mode autodiff. This is unfortunate, since forward-mode tends to be more efficient for higher order derivatives. A minimal example: ```python import jax import jax.numpy as np # Create a random symmetric matrix seed = jax.random.PRNGKey(0) tmp = jax.random.uniform(seed, (2,2)) a = np.dot(tmp, tmp.T) def test(inp): val, vec = np.linalg.eigh(inp) return np.dot(np.dot(vec, inp), vec.T) def test_deriv(func, inp): grad_func = jax.jacfwd(func) hess_func = jax.jacfwd(grad_func) cube_func = jax.jacfwd(hess_func) # This derivative returns NaN, but jax.jacrev works! print(grad_func(inp)) print(hess_func(inp)) print(cube_func(inp)) test_deriv(test, a) ``` Additional notes: - Expressions involving eigenvalues do not appear to have this issue - Any array contractions involving eigenvectors and other arrays (`dot`, `einsum`, etc) also exhibit similar behavior: `jacfwd` fails, while `jacrev` works. - I'm using CPU - JAX version 0.1.46 Thanks for your time!
The first NaN in the computation appears to come from the forward-mode derivative of `pow` where the LHS is negative: `jax.jvp(lax.pow, (-1.5, 2.), (0., 0.))` This in turn comes from the `y ** 2` term in the RHS JVP for `div`.
2019-09-23T16:47:02
google/jax
1,390
google__jax-1390
[ "1372" ]
e1c6848409e36524a2af580882f238e4963edcfa
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -448,7 +448,7 @@ def jacfun(*args, **kwargs): f_partial, dyn_args = _argnums_partial(f, argnums, args) holomorphic or tree_map(_check_real_input_jacfwd, dyn_args) pushfwd = partial(jvp, f_partial, dyn_args) - y, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args)) + y, jac = vmap(pushfwd, out_axes=(None, batching.last))(_std_basis(dyn_args)) example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args return tree_map(partial(_unravel_array_into_pytree, example_args, -1), jac) @@ -617,12 +617,10 @@ def _flatten_axes(treedef, axis_tree): dummy = tree_unflatten(treedef, [object()] * treedef.num_leaves) axes = [] add_leaves = lambda i, x: axes.extend([i] * len(tree_flatten(x)[0])) - # TODO(mattjj): remove _replace_nones / list comp after jaxlib 0.1.25 tree_multimap(add_leaves, _replace_nones(axis_tree), dummy) axes = [None if a is _none_proxy else a for a in axes] return axes -# TODO(mattjj): remove this when jaxlib is updated past 0.1.25 def _replace_nones(tuptree): if type(tuptree) in (list, tuple): return tuple(map(_replace_nones, tuptree)) diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -241,9 +241,14 @@ def zeros_like_batched(batched_args, batch_dims): # almost works, except for broadcast, for which raw numpy.ndarrays don't have a # method. To handle that case, the `broadcast` function uses a try/except. +class _Last(object): pass +last = _Last() + def broadcast(x, sz, axis): if core.get_aval(x) is core.abstract_unit: return core.unit + if axis is last: + axis = onp.ndim(x) shape = list(onp.shape(x)) shape.insert(axis, sz) if isinstance(x, onp.ndarray) or onp.isscalar(x): @@ -267,6 +272,8 @@ def matchaxis(sz, src, dst, x): return x elif type(src) == type(dst) == int: return moveaxis(x, src, dst) + elif type(src) == int and dst is last: + return moveaxis(x, src, -1) elif src is not_mapped and dst is not not_mapped: return broadcast(x, sz, dst) else:
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -357,6 +357,41 @@ def test_hessian_on_pytrees(self): (onp.array([0., 0.]), onp.array([0., 2.]))) self.assertAllClose(ans, expected, check_dtypes=False) + @jtu.skip_on_devices("tpu") + def test_issue1372(self): + def quad(x): + return np.dot(x, x) + + def f(x, u): + return quad(x) + quad(u) + + x, u = np.ones(5), np.ones(2) + + rev = jacrev + fwd = jacfwd + + # Diagonal entries + self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5)) + self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5)) + self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5)) + self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5)) + self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2)) + self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2)) + self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2)) + self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2)) + + # Off-diagonal entries by reverse-mode on the outside + self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5)) + self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5)) + self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2)) + self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2)) + + # Off-diagonal entries by forward-mode on the outside + self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5)) + self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5)) + self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2)) + self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2)) + def test_disable_jit(self): effects = []
Incorrect reshaping after forward-of-reverse off-diagonal second-order autodiff The following code sample computes blocks of a Hessian by composition of autodiff with itself. Whenever forward-mode autodiff is composed atop (reverse- or forward-mode) autodiff to compute off-diagonal blocks, a shape-related error occurs, due to what appears to be incorrect output-reshaping logic. ```python from jax.api import * import jax.numpy as np def quad(x): return np.dot(np.dot(np.ones(x.shape * 2), x), x) def f(x, u): return quad(x) + quad(u) x, u = np.ones(5), np.ones(2) rev = jacrev # `rev = grad` yields the same outcomes below fwd = jacfwd # Diagonal entries - all OK rev(rev(f, 0), 0)(x, u) rev(fwd(f, 0), 0)(x, u) fwd(rev(f, 0), 0)(x, u) fwd(fwd(f, 0), 0)(x, u) rev(rev(f, 1), 1)(x, u) rev(fwd(f, 1), 1)(x, u) fwd(rev(f, 1), 1)(x, u) fwd(fwd(f, 1), 1)(x, u) # Off-diagonal entries by reverse-mode on the outside - all OK rev(rev(f, 1), 0)(x, u) rev(fwd(f, 1), 0)(x, u) rev(rev(f, 0), 1)(x, u) rev(fwd(f, 0), 1)(x, u) # Off-diagonal entries by forward-mode on the outside - all fail with: # # RuntimeError: Invalid argument: Input dimension should be either 1 or equal to # the output dimension it is broadcasting into; the 0th operand dimension is X, # the 0th output dimension is Y: This is a bug in JAX's shape-checking rules; # please report it! fwd(rev(f, 1), 0)(x, u) # X = 2, Y = 5 fwd(fwd(f, 1), 0)(x, u) # X = 2, Y = 5 fwd(rev(f, 0), 1)(x, u) # X = 5, Y = 2 fwd(fwd(f, 0), 1)(x, u) # X = 5, Y = 2 ``` Similar errors can be triggered in the analogous cases under `make_jaxpr`, rather than by standard evaluation: ```python make_jaxpr(rev(rev(f)))(x, u) # OK make_jaxpr(rev(fwd(f)))(x, u) # OK make_jaxpr(fwd(rev(f)))(x, u) # OK make_jaxpr(fwd(fwd(f)))(x, u) # OK # ... make_jaxpr(rev(rev(f, 1), 0))(x, u) # OK make_jaxpr(rev(fwd(f, 1), 0))(x, u) # OK # ValueError: cannot reshape array of size 10 into shape (5,5) make_jaxpr(fwd(rev(f, 1), 0))(x, u) make_jaxpr(fwd(fwd(f, 1), 0))(x, u) ```
2019-09-23T20:51:49
google/jax
1,395
google__jax-1395
[ "1379" ]
ad03bafb9b611f1b38baa7c34801c60700d99d90
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -286,7 +286,21 @@ def cond(pred, true_operand, true_fun, false_operand, false_fun): return true_fun(true_operand) else: return false_fun(false_operand) + + Pred has to be a scalar type, collection types (list, tuple) are not supported + """ + + if len(onp.shape(pred)) != 0: + raise TypeError("Pred must be a scalar, got {} of shape {}".format(pred, onp.shape(pred))) + + pred_dtype = onp.result_type(pred) + if pred_dtype.kind != 'b': + if pred_dtype.kind in 'iuf': + pred = pred != 0 + else: + msg = ("Pred type must be either boolean or number, got {}") + raise TypeError(msg.format(pred_dtype)) true_ops, true_tree = tree_flatten((true_operand,)) true_avals = tuple(_map(_abstractify, true_ops)) true_jaxpr, true_consts, out_tree = _initial_style_jaxpr(true_fun, true_tree, true_avals)
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -416,6 +416,25 @@ def false_fun(x): self.assertEqual(fun(4), cfun(4)) self.assertEqual(fun(4), (8, 16)) + def testIssue1379(self): + + def fun(pred): + return lax.cond(pred, pred, lambda x: (True, x), pred, lambda x: (False, x)) + + @api.jit + def cfun(pred): + return fun(pred) + + self.assertEqual(fun(0), cfun(0), (False,0)) + self.assertEqual(fun(0.), cfun(0.), (False,0.)) + self.assertEqual(fun(1), cfun(1), (True,1)) + self.assertEqual(fun(1.), cfun(1.), (True,1.)) + + # test that proper errors are raised for wrong types + for pred in ["abc", [], [1,2]]: + for f in [fun, cfun]: + self.assertRaises(TypeError, f, pred) + def testNestedCond(self): def fun(x): if x < 2:
lax.cond should not fail with RuntimeError when passed a integer It would be reasonable to convert integer predicates into booleans by checking if they are equal to zero, like how Python handles integers. Example behavior: ``` import jax from jax import lax def f(n): return lax.cond(n % 2, n, lambda n: n, n, lambda n: n) f(12) ``` Results in: ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-17-5bf9fbe6fc46> in <module>() 5 return lax.cond(n % 2, n, lambda n: n, n, lambda n: n) 6 ----> 7 f(12) 5 frames <ipython-input-17-5bf9fbe6fc46> in f(n) 3 4 def f(n): ----> 5 return lax.cond(n % 2, n, lambda n: n, n, lambda n: n) 6 7 f(12) google3/third_party/py/jax/lax/lax_control_flow.py in cond(pred, true_operand, true_fun, false_operand, false_fun) 295 *itertools.chain([pred], true_consts, true_ops, false_consts, false_ops), 296 true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, --> 297 true_nconsts=len(true_consts), false_nconsts=len(false_consts)) 298 return tree_unflatten(out_tree, out) 299 google3/third_party/py/jax/core.py in bind(self, *args, **kwargs) 128 top_trace = find_top_trace(args) 129 if top_trace is None: --> 130 return self.impl(*args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in apply_primitive(prim, *args, **params) 121 """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" 122 abstract_args = map(abstractify, args) --> 123 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params) 124 return compiled_fun(*args) 125 google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **params) 134 handle_result = aval_to_result_handler(aval_out) 135 xla_shapes = tuple(map(aval_to_xla_shape, abstract_args)) --> 136 built_c = primitive_computation(prim, *xla_shapes, **params) 137 compiled = built_c.Compile(xla_shapes, xb.get_compile_options(), 138 backend=xb.get_backend(backend)) google3/third_party/py/jax/interpreters/xla.py in primitive_computation(prim, *xla_shapes, **params) 166 "This is a bug in JAX's shape-checking rules; please report it!\n" 167 "https://github.com/google/jax/issues\n") --> 168 raise RuntimeError(msg) 169 170 def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args): RuntimeError: Invalid argument: Argument to predicated-Conditional is not a scalar of PRED type (s32[]).: @ 0x55e6131c5b9d xla::Conditional() @ 0x7f0e34c7e9a1 (unknown) @ 0x7f0e34c91e6b (unknown) @ 0x55e621de056d _PyCFunction_FastCallDict @ 0x55e6216c1a82 _PyEval_EvalFrameDefault @ 0x55e6216ba7d0 _PyEval_EvalFrameDefault @ 0x55e6210eb7ce _PyEval_EvalCodeWithName.llvm.293474707488941182 @ 0x55e62546cd7c function_call @ 0x55e6216c1564 _PyEval_EvalFrameDefault @ 0x55e6210eb7ce _PyEval_EvalCodeWithName.llvm.293474707488941182 @ 0x55e62546cd7c function_call @ 0x55e6210dfdaf PyObject_Call @ 0x7f0e349bdbda (unknown) @ 0x55e6216c1564 _PyEval_EvalFrameDefault @ 0x55e6210eb7ce _PyEval_EvalCodeWithName.llvm.293474707488941182 @ 0x55e62546cd7c function_call @ 0x55e6210dfdaf PyObject_Call @ 0x7f0e349bdbda (unknown) @ 0x55e6216c1564 _PyEval_EvalFrameDefault @ 0x55e6210eb7ce _PyEval_EvalCodeWithName.llvm.293474707488941182 @ 0x55e62546cd7c function_call @ 0x55e6210dfdaf PyObject_Call @ 0x55e6240cd6ed partial_call @ 0x55e6216c1564 _PyEval_EvalFrameDefault @ 0x55e6210eb7ce _PyEval_EvalCodeWithName.llvm.293474707488941182 @ 0x55e620927e47 _PyFunction_FastCallDict @ 0x55e6254651b8 method_call @ 0x55e6216c1564 _PyEval_EvalFrameDefault @ 0x55e6216ba7d0 _PyEval_EvalFrameDefault @ 0x55e6216ba7d0 _PyEval_EvalFrameDefault @ 0x55e6210eb7ce _PyEval_EvalCodeWithName.llvm.293474707488941182 @ 0x55e61c77b679 builtin_exec This is a bug in JAX's shape-checking rules; please report it! https://github.com/google/jax/issues ``` The `RuntimeError` here is particularly problematic because it mean that if you `jit` such a function, you don't get a meaningful traceback: ``` >>> jax.jit(f)(12) --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-18-05daf300cab6> in <module>() ----> 1 jax.jit(f)(12) 9 frames google3/third_party/py/jax/api.py in f_jitted(*args, **kwargs) 147 _check_args(args_flat) 148 flat_fun, out_tree = flatten_fun(f, in_tree) --> 149 out = xla.xla_call(flat_fun, *args_flat, device_assignment=device_assignment, backend=backend) 150 return tree_unflatten(out_tree(), out) 151 google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 567 if top_trace is None: 568 with new_sublevel(): --> 569 outs = primitive.impl(f, *args, **params) 570 else: 571 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params) 365 backend = params.get('backend', None) 366 compiled_fun = _xla_callable(fun, device_assignment, backend, --> 367 *map(abstractify, args)) 368 try: 369 return compiled_fun(*args) google3/third_party/py/jax/linear_util.py in cached_fun(f, *args) 215 216 def cached_fun(f, *args): --> 217 ans, f_prev = cached_fun_body(f, args) 218 if id(f_prev) != id(f): 219 f.populate_stores(f_prev) google3/third_party/py/jax/linear_util.py in cached_fun_body(f, args) 212 @fastcache.clru_cache(maxsize=max_size) 213 def cached_fun_body(f, args): --> 214 return call(f, *args), f 215 216 def cached_fun(f, *args): google3/third_party/py/jax/interpreters/xla.py in _xla_callable(fun, device_assignment, backend, *abstract_args) 383 axis_env = AxisEnv(jaxpr_replicas(jaxpr), [], []) 384 compiled = compile_jaxpr(jaxpr, device_assignment, backend, axis_env, consts, --> 385 *abstract_args) 386 del master, consts, jaxpr, env 387 result_handlers = tuple(map(_pval_to_result_handler, pvals)) google3/third_party/py/jax/interpreters/xla.py in compile_jaxpr(jaxpr, device_assignment, backend, axis_env, const_vals, *abstract_args) 200 raise ValueError(msg.format(axis_env.nreps, xb.device_count(backend))) 201 arg_shapes = tuple(map(aval_to_xla_shape, abstract_args)) --> 202 built_c = jaxpr_computation(jaxpr, backend, axis_env, const_vals, (), *arg_shapes) 203 compile_opts = xb.get_compile_options(num_replicas=axis_env.nreps, 204 device_assignment=device_assignment) google3/third_party/py/jax/interpreters/xla.py in jaxpr_computation(jaxpr, backend, axis_env, const_vals, freevar_shapes, *arg_shapes) 239 freevars = _map(c.ParameterWithShape, freevar_shapes) 240 args = _map(c.ParameterWithShape, arg_shapes) --> 241 out_nodes = jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args) 242 return c.Build(c.Tuple(*out_nodes)) 243 google3/third_party/py/jax/interpreters/xla.py in jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args) 292 raise NotImplementedError(msg.format(eqn.primitive.name)) 293 --> 294 c.GetShape(ans) # force xla to do shape error checking 295 out_nodes = xla_destructure(c, ans) if eqn.primitive.multiple_results else [ans] 296 _map(write, eqn.outvars, out_nodes) google3/third_party/tensorflow/compiler/xla/python/xla_client.py in GetShape(self, operand) 718 719 def GetShape(self, operand): --> 720 return self._builder.GetShape(operand) 721 722 def SetOpMetadata(self, op_metadata): RuntimeError: Invalid argument: Argument to predicated-Conditional is not a scalar of PRED type (s32[]). ```
Is this just adding a ```python if isinstance(pred, int): pred = pred != 0 ``` in the [cond](https://github.com/google/jax/blob/master/jax/lax/lax_control_flow.py#L279) function? If yes I'm happy to submit a PR I think `pred` probably should be converted into an array first and this conversion should be done on all integer dtypes. Otherwise this won't work inside `jit`. I get the handling all int dtypes for `jit` part; converting to array is because we are calling `itertools.chain`? I'll try to have a go and add some tests as well. Converting to an array is just to ensure `pred` is in a consistent form with `pred.dtype` defined. Otherwise this would fail when you actually pass a Python integer is as `pred`, as in my example above, because Python integers don't have a dtype.
2019-09-25T13:43:53
google/jax
1,429
google__jax-1429
[ "1401" ]
e3597384636e39800adf8abcd24091b810db4c90
diff --git a/jax/config.py b/jax/config.py --- a/jax/config.py +++ b/jax/config.py @@ -23,10 +23,13 @@ def __init__(self): self.use_absl = False def update(self, name, val): - self.check_exists(name) - if name not in self.values: - raise Exception("Unrecognized config option: {}".format(name)) - self.values[name] = val + if self.use_absl: + setattr(self.absl_flags.FLAGS, name, val) + else: + self.check_exists(name) + if name not in self.values: + raise Exception("Unrecognized config option: {}".format(name)) + self.values[name] = val def read(self, name): if self.use_absl: diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -171,7 +171,8 @@ def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args): device_num, = compiled.DeviceOrdinals() input_bufs = [device_put(x, device_num, backend=backend) for x in args] out_buf = compiled.Execute(input_bufs) - if FLAGS.jax_debug_nans: check_nans(prim, out_buf) + if FLAGS.jax_debug_nans: + check_nans(prim, out_buf.destructure() if prim.multiple_results else out_buf) return result_handler(out_buf) def check_nans(prim, bufs):
diff --git a/tests/debug_nans_test.py b/tests/debug_nans_test.py new file mode 100644 --- /dev/null +++ b/tests/debug_nans_test.py @@ -0,0 +1,58 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for --debug_nans.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import absltest +from absl.testing import parameterized + +import numpy as onp + +import jax +from jax import test_util as jtu +from jax.test_util import check_grads +from jax import numpy as np +from jax import random + +from jax.config import config +config.parse_flags_with_absl() + +class DebugNaNsTest(jtu.JaxTestCase): + + def setUp(self): + self.cfg = config.read("jax_debug_nans") + config.update("jax_debug_nans", True) + + def tearDown(self): + config.update("jax_debug_nans", self.cfg) + + def testSingleResultPrimitiveNoNaN(self): + A = np.array([[1., 2.], [2., 3.]]) + B = np.tanh(A) + + def testMultipleResultPrimitiveNoNaN(self): + A = np.array([[1., 2.], [2., 3.]]) + D, V = np.linalg.eig(A) + + def testJitComputationNoNaN(self): + A = np.array([[1., 2.], [2., 3.]]) + B = jax.jit(np.tanh)(A) + + def testSingleResultPrimitiveNaN(self): + A = np.array(0.) + with self.assertRaises(FloatingPointError): + B = 0. / A
jax.numpy.linalg.eig - 'jax_debug_nans' related bug Reproducing the error with a simple matrix: (same issue with `np.linalg.eigh`) ``` A = np.array([[1.,2.],[2.,3.]]) D, V = np.linalg.eig(A) ``` Output ``` TypeError Traceback (most recent call last) <ipython-input-171-c4c8dbbffe17> in <module> 1 A = np.array([[1.,2.],[2.,3.]]) ----> 2 V,D = np.linalg.eig(A) ~/RESEARCH/jax/jax/numpy/linalg.py in eig(a) 95 def eig(a): 96 a = _promote_arg_dtypes(np.asarray(a)) ---> 97 w, vl, vr = lax_linalg.eig(a) 98 return w, vr 99 ~/RESEARCH/jax/jax/lax_linalg.py in eig(x) 47 48 def eig(x): ---> 49 w, vl, vr = eig_p.bind(x) 50 return w, vl, vr 51 ~/RESEARCH/jax/jax/core.py in bind(self, *args, **kwargs) 128 top_trace = find_top_trace(args) 129 if top_trace is None: --> 130 return self.impl(*args, **kwargs) 131 132 tracers = map(top_trace.full_raise, args) ~/RESEARCH/jax/jax/lax_linalg.py in eig_impl(operand) 153 154 def eig_impl(operand): --> 155 return xla.apply_primitive(eig_p, operand) 156 157 def eig_translation_rule(c, operand): ~/RESEARCH/jax/jax/interpreters/xla.py in apply_primitive(prim, *args, **params) 122 abstract_args = map(abstractify, args) 123 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params) --> 124 return compiled_fun(*args) 125 126 @cache() ~/RESEARCH/jax/jax/interpreters/xla.py in _execute_compiled_primitive(prim, compiled, backend, result_handler, *args) 172 input_bufs = [device_put(x, device_num, backend=backend) for x in args] 173 out_buf = compiled.Execute(input_bufs) --> 174 if FLAGS.jax_debug_nans: check_nans(prim, out_buf) 175 return result_handler(out_buf) 176 ~/RESEARCH/jax/jax/interpreters/xla.py in check_nans(prim, bufs) 177 def check_nans(prim, bufs): 178 if prim.multiple_results: --> 179 for buf in bufs: 180 _check_nans(prim.name, buf.shape(), buf) 181 else: TypeError: 'jaxlib.xla_extension.PyLocalBuffer' object is not iterable ```
UPDATE (bug still persists) --- The bug is related to the flag `jax_debug_nans` that I have set `True` as default in my workflow. If set it otherwise, an error is not thrown. However, I need this flag set for debugging reasons. Is it a quick fix? ``` from jax.config import config config.update("jax_debug_nans", False) A = np.array([[1.,2.],[2.,3.]]) D, V = np.linalg.eig(A) D, V ``` Output ``` (DeviceArray([-0.23606798+0.j, 4.23606798+0.j]), DeviceArray([[-0.85065081+0.j, -0.52573111+0.j], [ 0.52573111+0.j, -0.85065081+0.j]])) ```
2019-10-02T21:17:51
google/jax
1,473
google__jax-1473
[ "1465" ]
c758aff88ba800f7da94060e13c24861274d585e
diff --git a/examples/advi.py b/examples/advi.py --- a/examples/advi.py +++ b/examples/advi.py @@ -54,7 +54,7 @@ def batch_elbo(logprob, rng, params, num_samples): # ========= Helper function for plotting. ========= @partial(jit, static_argnums=(0, 1, 2, 4)) -def mesh_eval(func, x_limits, y_limits, params, num_ticks=101): +def _mesh_eval(func, x_limits, y_limits, params, num_ticks): # Evaluate func on a 2D grid defined by x_limits and y_limits. x = np.linspace(*x_limits, num=num_ticks) y = np.linspace(*y_limits, num=num_ticks) @@ -63,6 +63,8 @@ def mesh_eval(func, x_limits, y_limits, params, num_ticks=101): zs = vmap(func, in_axes=(0, None))(xy_vec, params) return X, Y, zs.reshape(X.shape) +def mesh_eval(func, x_limits, y_limits, params, num_ticks=101): + return _mesh_eval(func, x_limits, y_limits, params, num_ticks) # ========= Define an intractable unnormalized density =========
advi.py example is broken with jit advi.py example is broken with jit: @partial(jit, static_argnums=(0, 1, 2, 4)) If jit is removed, then It works.
This is because the `4`th positional argument, `num_ticks`, has a default value and not explicitly supplied during the invocation. Just setting the `static_argnums` to (0,1,2) would fix this.
2019-10-09T14:30:17
google/jax
1,512
google__jax-1512
[ "1409" ]
1603a7393a1219fc5cb82402ee51e4f9bfff578f
diff --git a/jax/lax/lax_parallel.py b/jax/lax/lax_parallel.py --- a/jax/lax/lax_parallel.py +++ b/jax/lax/lax_parallel.py @@ -193,11 +193,20 @@ def _allreduce_translation_rule(prim, c, val, replica_groups, backend=None): computation = xla.primitive_computation(prim, scalar, scalar, backend=backend) return c.AllReduce(val, computation, replica_groups=replica_groups) +# psum translation rule has special handling for complex dtypes +def _psum_translation_rule(c, val, replica_groups, backend=None): + psum = partial(_allreduce_translation_rule, lax.add_p, c, + replica_groups=replica_groups, backend=backend) + dtype = c.GetShape(val).numpy_dtype() + if onp.issubdtype(dtype, onp.complexfloating): + return c.Complex(psum(c.Real(val)), psum(c.Imag(val))) + else: + return psum(val) + psum_p = standard_pmap_primitive('psum') pxla.split_axis_rules[psum_p] = \ partial(_allreduce_split_axis_rule, psum_p, lax._reduce_sum) -xla.parallel_translations[psum_p] = \ - partial(_allreduce_translation_rule, lax.add_p) +xla.parallel_translations[psum_p] = _psum_translation_rule pxla.parallel_pure_rules[psum_p] = lambda x, shape: x * prod(shape) ad.deflinear(psum_p, lambda t, axis_name: [t])
diff --git a/tests/pmap_test.py b/tests/pmap_test.py --- a/tests/pmap_test.py +++ b/tests/pmap_test.py @@ -66,6 +66,17 @@ def testBasic(self): ans = f(x) self.assertAllClose(ans, expected, check_dtypes=False) + def testComplexPsum(self): + f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i') + + shape = (xla_bridge.device_count(), 4 * 2) + x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape).view(onp.complex64) + expected = x - onp.sum(x, 0) + + ans = f(x) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testNestedBasic(self): f = lambda x: lax.psum(lax.psum(x, 'i'), 'j') f = pmap(pmap(f, 'i'), 'j')
psum of complex64 not supported for multi GPU. Complex numbers are not supported for AllReduce. I found this bug when trying to do psum across 4 GPUs with complex64 as the dtype. ``` RuntimeError: Unimplemented: Requested AllReduce not implemented on GPU; replica_count: 4; operand_count: 1; IsCrossReplicaAllReduce: 1; NCCL support: 1; first operand array element-type: C64 ```
We can fix this at the JAX level by making [the `psum` lowering](https://github.com/google/jax/blob/01f81875623d7548402d0224703a70e9052492b5/jax/lax/lax_parallel.py#L190-L194) switch on the array element type, and just generate a pair of AllReduces for complex types.
2019-10-15T22:55:54
google/jax
1,514
google__jax-1514
[ "1486" ]
74dc4bf72a59dca2fd4b9e81a7a1aa6ea52a4c47
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -652,7 +652,7 @@ def pmap(fun, axis_name=None, devices=None, backend=None): devices available, as returned by ``jax.local_device_count()`` (unless ``devices`` is specified, see below). For nested ``pmap`` calls, the product of the mapped axis sizes must be less than or equal to the number of XLA - devices. TODO(skye): support < # local devices on multi-host platforms + devices. **Multi-host platforms:** On multi-host platforms such as TPU pods, ``pmap`` is designed to be used in SPMD Python programs, where every host is running @@ -667,7 +667,9 @@ def pmap(fun, axis_name=None, devices=None, backend=None): "sees" only its local shard of the input and output. Args: - fun: Function to be mapped over argument axes. + fun: Function to be mapped over argument axes. Its arguments and return + value should be arrays, scalars, or (nested) standard Python containers + (tuple/list/dict) thereof. axis_name: Optional, a hashable Python object used to identify the mapped axis so that parallel collectives can be applied. devices: This is an experimental feature and the API is likely to change.
Document pmap on pytrees The pmap documentation does not mention how it can be used on pytrees or what it is supposed to do when pytrees are involved. For example, code like what is pasted below in Trax seems to do things on pytrees. ``` @functools.partial(backend.pmap, axis_name='batch') def mapped_predict(x, params, state, rng): return model_predict(x, params=params, state=state, rng=rng) ```
2019-10-15T23:49:54
google/jax
1,515
google__jax-1515
[ "1384" ]
c485a3cc5059427f0d72178578310e9237f8bfd9
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -240,12 +240,19 @@ def _random_bits(key, bit_width, shape): ### random samplers -def _check_shape(name, shape): +def _check_shape(name, shape, *param_shapes): try: shape = tuple(map(int, shape)) except TypeError: msg = "{} requires a concrete tuple of integers as shape argument, got {}." raise ValueError(msg.format(name, shape)) + if param_shapes: + shape_ = lax.broadcast_shapes(shape, *param_shapes) + if shape != shape_: + msg = ("{} parameter shapes must be broadcast-compatible with shape " + "argument, and the result of broadcasting the shapes must equal " + "the shape argument, but got result {} for shape argument {}.") + raise ValueError(msg.format(name, shape_, shape)) def uniform(key, shape=(), dtype=onp.float64, minval=0., maxval=1.): @@ -253,7 +260,8 @@ def uniform(key, shape=(), dtype=onp.float64, minval=0., maxval=1.): Args: key: a PRNGKey used as the random key. - shape: a tuple of nonnegative integers representing the shape. + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). minval: optional, a minimum (inclusive) value for the range (default 0). @@ -302,7 +310,7 @@ def randint(key, shape, minval, maxval, dtype=onp.int64): shape: a tuple of nonnegative integers representing the shape. minval: int or array of ints broadcast-compatible with ``shape``, a minimum (inclusive) value for the range. - maxval: int or array of ints broadcast-compatible with ``shape``, a maximum + maxval: int or array of ints broadcast-compatible with ``shape``, a maximum (exclusive) value for the range. dtype: optional, an int dtype for the returned values (default int64 if jax_enable_x64 is true, otherwise int32). @@ -315,7 +323,7 @@ def randint(key, shape, minval, maxval, dtype=onp.int64): @partial(jit, static_argnums=(1, 4)) def _randint(key, shape, minval, maxval, dtype): - _check_shape("randint", shape) + _check_shape("randint", shape, minval.shape, maxval.shape) if not onp.issubdtype(dtype, onp.integer): raise TypeError("randint only accepts integer dtypes.") @@ -400,7 +408,8 @@ def normal(key, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - shape: a tuple of nonnegative integers representing the shape. + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). @@ -419,63 +428,84 @@ def _normal(key, shape, dtype): return onp.array(onp.sqrt(2), dtype) * lax.erf_inv(u) -def multivariate_normal(key, mean=0.0, cov=1.0, shape=(), dtype=onp.float64): - dtype = xla_bridge.canonicalize_dtype(dtype) - return _multivariate_normal(key, mean, cov, shape, dtype) - -@partial(jit, static_argnums=(3, 4)) -def _multivariate_normal(key, mean, cov, shape, dtype): - """Sample multivariate normal random values with given shape, mean, and covariance. +def multivariate_normal(key, mean, cov, shape=None, dtype=onp.float64): + """Sample multivariate normal random values with given mean and covariance. Args: key: a PRNGKey used as the random key. - mean: optional, a scalar or array of mean values along each dimension - cov: optional, a scalar (isotropic), vector (diagonal covariance matrix), or full covariance matrix - shape: optional, a tuple of nonnegative integers representing the shape. + mean: a mean vector of shape ``(..., n)``. + cov: a positive definite covariance matrix of shape ``(..., n, n)``. The + batch shape ``...`` must be broadcast-compatible with that of ``mean``. + shape: optional, a tuple of nonnegative integers specifying the result + batch shape; that is, the prefix of the result shape excluding the last + axis. Must be broadcast-compatible with ``mean.shape[:-1]`` and + ``cov.shape[:-2]``. The default (None) produces a result batch shape by + broadcasting together the batch shapes of ``mean`` and ``cov``. + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: - A random array with latent dimension of (max(asarray(mean).ndim, asarray(cov).ndim)),) + A random array with the specified dtype and shape given by + ``shape + mean.shape[-1:]`` if ``shape`` is not None, or else + ``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``. """ - _check_shape("multivariate_normal", shape) - if hasattr(mean, "shape") and mean.ndim > 1: - raise ValueError("Mean cannot have more than 1 dimension.") - if hasattr(cov, "shape") and cov.ndim > 0: - if cov.ndim > 2: - raise ValueError("Covariance matrix cannot have more than 2 dimensions.") - shape = shape + cov.shape[:1] - normal_samples = normal(key, shape, dtype) - if cov.ndim == 2: - samples = np.tensordot(normal_samples, cholesky(cov), axes=1) - else: - samples = normal_samples * np.sqrt(cov) + dtype = xla_bridge.canonicalize_dtype(dtype) + return _multivariate_normal(key, mean, cov, shape, dtype) + +@partial(jit, static_argnums=(3, 4)) +def _multivariate_normal(key, mean, cov, shape, dtype): + if not onp.ndim(mean) >= 1: + msg = "multivariate_normal requires mean.ndim >= 1, got mean.ndim == {}" + raise ValueError(msg.format(onp.ndim(mean))) + if not onp.ndim(cov) >= 2: + msg = "multivariate_normal requires cov.ndim >= 2, got cov.ndim == {}" + raise ValueError(msg.format(onp.ndim(cov))) + n = mean.shape[-1] + if onp.shape(cov)[-2:] != (n, n): + msg = ("multivariate_normal requires cov.shape == (..., n, n) for n={n}, " + "but got cov.shape == {shape}.") + raise ValueError(msg.format(n=n, shape=onp.shape(cov))) + + if shape is None: + shape = lax.broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) else: - if hasattr(mean, "shape") and mean.ndim > 0: - shape = shape + mean.shape[:1] - normal_samples = normal(key, shape, dtype) - samples = np.sqrt(cov) * normal_samples - return samples + mean + _check_shape("normal", shape, mean.shape[:-1], mean.shape[:-2]) + chol_factor = cholesky(cov) + normal_samples = normal(key, shape + mean.shape[-1:], dtype) + return mean + np.tensordot(normal_samples, chol_factor, [-1, 1]) -def truncated_normal(key, lower, upper, shape=(), dtype=onp.float64): + +def truncated_normal(key, lower, upper, shape=None, dtype=onp.float64): """Sample truncated standard normal random values with given shape and dtype. Args: key: a PRNGKey used as the random key. - lower: a floating-point lower bound for truncation. - upper: a floating-point upper bound for truncation. - shape: a tuple of nonnegative integers representing the shape. + lower: a float or array of floats representing the lower bound for + truncation. Must be broadcast-compatible with ``upper``. + upper: a float or array of floats representing the upper bound for + truncation. Must be broadcast-compatible with ``lower``. + shape: optional, a tuple of nonnegative integers specifying the result + shape. Must be broadcast-compatible with ``lower`` and ``upper``. The + default (None) produces a result shape by broadcasting ``lower`` and + ``upper``. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). Returns: - A random array with the specified shape and dtype. + A random array with the specified dtype and shape given by ``shape`` if + ``shape`` is not None, or else by broadcasting ``lower`` and ``upper``. """ dtype = xla_bridge.canonicalize_dtype(dtype) return _truncated_normal(key, lower, upper, shape, dtype) @partial(jit, static_argnums=(3, 4)) def _truncated_normal(key, lower, upper, shape, dtype): - _check_shape("truncated_normal", shape) + if shape is None: + shape = lax.broadcast_shapes(lower.shape, upper.shape) + else: + _check_shape("truncated_normal", shape, lower.shape, upper.shape) + sqrt2 = onp.array(onp.sqrt(2), dtype) a = lax.erf(lax.convert_element_type(lower, dtype) / sqrt2) b = lax.erf(lax.convert_element_type(upper, dtype) / sqrt2) @@ -485,18 +515,20 @@ def _truncated_normal(key, lower, upper, shape, dtype): return sqrt2 * lax.erf_inv(a + u * (b - a)) -def bernoulli(key, p=onp.float32(0.5), shape=()): +def bernoulli(key, p=onp.float32(0.5), shape=None): """Sample Bernoulli random values with given shape and mean. Args: key: a PRNGKey used as the random key. - p: optional, an array-like of floating dtype broadcastable to `shape` for - the mean of the random variables (default 0.5). - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + p: optional, a float or array of floats for the mean of the random + variables. Must be broadcast-compatible with ``shape``. Default 0.5. + shape: optional, a tuple of nonnegative integers representing the result + shape. Must be broadcast-compatible with ``p.shape``. The default (None) + produces a result shape equal to ``p.shape``. Returns: - A random array with the specified shape and boolean dtype. + A random array with boolean dtype and shape given by ``shape`` if ``shape`` + is not None, or else ``p.shape``. """ dtype = xla_bridge.canonicalize_dtype(lax.dtype(p)) if not onp.issubdtype(dtype, onp.floating): @@ -507,39 +539,45 @@ def bernoulli(key, p=onp.float32(0.5), shape=()): @partial(jit, static_argnums=(2,)) def _bernoulli(key, p, shape): - _check_shape("bernoulli", shape) - shape = shape or onp.shape(p) - if onp.shape(p) != shape: - p = np.broadcast_to(p, shape) - return lax.lt(uniform(key, shape, lax.dtype(p)), p) + if shape is None: + shape = p.shape + else: + _check_shape("bernoulli", shape, p.shape) + return uniform(key, shape, lax.dtype(p)) < p -def beta(key, a, b, shape=(), dtype=onp.float64): + +def beta(key, a, b, shape=None, dtype=onp.float64): """Sample Bernoulli random values with given shape and mean. Args: key: a PRNGKey used as the random key. - a: an array-like broadcastable to `shape` and used as the shape parameter - alpha of the random variables. - b: an array-like broadcastable to `shape` and used as the shape parameter - beta of the random variables. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + a: a float or array of floats broadcast-compatible with ``shape`` + representing the first parameter "alpha". + b: a float or array of floats broadcast-compatible with ``shape`` + representing the second parameter "beta". + shape: optional, a tuple of nonnegative integers specifying the result + shape. Must be broadcast-compatible with ``a`` and ``b``. The default + (None) produces a result shape by broadcasting ``a`` and ``b``. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). Returns: - A random array with the specified shape and dtype. + A random array with the specified dtype and shape given by ``shape`` if + ``shape`` is not None, or else by broadcasting ``a`` and ``b``. """ dtype = xla_bridge.canonicalize_dtype(dtype) return _beta(key, a, b, shape, dtype) @partial(jit, static_argnums=(3, 4)) def _beta(key, a, b, shape, dtype): - _check_shape("beta", shape) + if shape is None: + shape = lax.broadcast_shapes(a.shape, b.shape) + else: + _check_shape("beta", shape, a.shape, b.shape) + a = lax.convert_element_type(a, dtype) b = lax.convert_element_type(b, dtype) - shape = shape or lax.broadcast_shapes(np.shape(a), np.shape(b)) key_a, key_b = split(key) gamma_a = gamma(key_a, a, shape, dtype) gamma_b = gamma(key_b, b, shape, dtype) @@ -551,8 +589,8 @@ def cauchy(key, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). @@ -570,29 +608,41 @@ def _cauchy(key, shape, dtype): return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5)))) -def dirichlet(key, alpha, shape=(), dtype=onp.float64): +def dirichlet(key, alpha, shape=None, dtype=onp.float64): """Sample Cauchy random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. - alpha: an array-like with `alpha.shape[:-1]` broadcastable to `shape` and - used as the concentration parameter of the random variables. - shape: optional, a tuple of nonnegative integers representing the batch - shape (defaults to `alpha.shape[:-1]`). + alpha: an array of shape ``(..., n)`` used as the concentration + parameter of the random variables. + shape: optional, a tuple of nonnegative integers specifying the result + batch shape; that is, the prefix of the result shape excluding the last + element of value ``n``. Must be broadcast-compatible with + ``alpha.shape[:-1]``. The default (None) produces a result shape equal to + ``alpha.shape``. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). Returns: - A random array with the specified shape and dtype. + A random array with the specified dtype and shape given by + ``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else + ``alpha.shape``. """ dtype = xla_bridge.canonicalize_dtype(dtype) return _dirichlet(key, alpha, shape, dtype) @partial(jit, static_argnums=(2, 3)) def _dirichlet(key, alpha, shape, dtype): - _check_shape("dirichlet", shape) - alpha = asarray(alpha, dtype) - shape = shape or alpha.shape[:-1] + if not onp.ndim(alpha) >= 1: + msg = "dirichlet requires alpha.ndim >= 1, got alpha.ndim == {}" + raise ValueError(msg.format(onp.ndim(alpha))) + + if shape is None: + shape = alpha.shape[:-1] + else: + _check_shape("dirichlet", shape, alpha.shape[:-1]) + + alpha = lax.convert_element_type(alpha, dtype) gamma_samples = gamma(key, alpha, shape + alpha.shape[-1:], dtype) return gamma_samples / np.sum(gamma_samples, axis=-1, keepdims=True) @@ -602,8 +652,8 @@ def exponential(key, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). @@ -676,7 +726,6 @@ def _next_kxv(kxv): z = lax.mul(lax.mul(d, V), boost) return lax.select(lax.eq(z, zero), onp.finfo(z.dtype).tiny, z) - _bivariate_coef = [[0.16009398, -0.094634816, 0.025146379, -0.0030648348, 1, 0.3266811, 0.10406087, 0.0014179033], [0.53487893, 0.12980707, 0.06573594, -0.0015649787, @@ -684,7 +733,6 @@ def _next_kxv(kxv): [0.040121005, -0.0065914079, -0.002628604, -0.0013441777, 0.017050642, -0.0021309345, 0.00085092385, -1.5248239e-07]] - def _gamma_grad_one(z, alpha): # Ref 1: Pathwise Derivatives Beyond the Reparameterization Trick, Martin & Fritz # Ref 2: Case 4 follows https://github.com/fritzo/notebooks/blob/master/gamma-reparameterized.ipynb @@ -780,14 +828,12 @@ def _case4(zagf): _, _, grad, flag = lax.while_loop(lambda zagf: ~zagf[3], _case4, (z, alpha, grad, flag)) return grad - def _gamma_grad(sample, a): samples = np.reshape(sample, -1) alphas = np.reshape(a, -1) grads = vmap(_gamma_grad_one)(samples, alphas) return grads.reshape(a.shape) - @custom_transforms def _gamma_impl(key, a): alphas = np.reshape(a, -1) @@ -795,34 +841,37 @@ def _gamma_impl(key, a): samples = vmap(_gamma_one)(keys, alphas) return np.reshape(samples, np.shape(a)) - defjvp(_gamma_impl, None, lambda tangent, ans, key, a, **kwargs: tangent * _gamma_grad(ans, a)) - -def gamma(key, a, shape=(), dtype=onp.float64): +def gamma(key, a, shape=None, dtype=onp.float64): """Sample Gamma random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. - a: an array-like broadcastable to `shape` and used as the shape parameter - of the random variables. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + a: a float or array of floats broadcast-compatible with ``shape`` + representing the parameter of the distribution. + shape: optional, a tuple of nonnegative integers specifying the result + shape. Must be broadcast-compatible with ``a``. The default (None) + produces a result shape equal to ``a.shape``. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). Returns: - A random array with the specified shape and dtype. + A random array with the specified dtype and with shape given by ``shape`` if + ``shape`` is not None, or else by ``a.shape``. """ dtype = xla_bridge.canonicalize_dtype(dtype) return _gamma(key, a, shape, dtype) @partial(jit, static_argnums=(2, 3)) def _gamma(key, a, shape, dtype): - _check_shape("gamma", shape) + if shape is None: + shape = a.shape + else: + _check_shape("gamma", shape, a.shape) + a = lax.convert_element_type(a, dtype) - shape = shape or onp.shape(a) if onp.shape(a) != shape: a = np.broadcast_to(a, shape) return _gamma_impl(key, a) @@ -833,8 +882,8 @@ def gumbel(key, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). @@ -856,8 +905,8 @@ def laplace(key, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). @@ -880,8 +929,8 @@ def logistic(key, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + shape: optional, a tuple of nonnegative integers representing the result + shape. Default (). dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). @@ -897,33 +946,36 @@ def _logistic(key, shape, dtype): return logit(uniform(key, shape, dtype)) -def pareto(key, b, shape=(), dtype=onp.float64): +def pareto(key, b, shape=None, dtype=onp.float64): """Sample Pareto random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. - b: an array-like broadcastable to `shape` and used as the shape parameter - of the random variables. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + a: a float or array of floats broadcast-compatible with ``shape`` + representing the parameter of the distribution. + shape: optional, a tuple of nonnegative integers specifying the result + shape. Must be broadcast-compatible with ``b``. The default (None) + produces a result shape equal to ``b.shape``. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). Returns: - A random array with the specified shape and dtype. + A random array with the specified dtype and with shape given by ``shape`` if + ``shape`` is not None, or else by ``b.shape``. """ dtype = xla_bridge.canonicalize_dtype(dtype) return _pareto(key, b, shape, dtype) @partial(jit, static_argnums=(2, 3)) def _pareto(key, b, shape, dtype): - _check_shape("pareto", shape) + if shape is None: + shape = b.shape + else: + _check_shape("pareto", shape) + b = lax.convert_element_type(b, dtype) - shape = shape or onp.shape(b) - if onp.shape(b) != shape: - b = np.broadcast_to(b, shape) e = exponential(key, shape, dtype) - return lax.exp(lax.div(e, b)) + return lax.exp(e / b) def t(key, df, shape=(), dtype=onp.float64): @@ -931,24 +983,29 @@ def t(key, df, shape=(), dtype=onp.float64): Args: key: a PRNGKey used as the random key. - df: an array-like broadcastable to `shape` and used as the shape parameter - of the random variables. - shape: optional, a tuple of nonnegative integers representing the shape - (default scalar). + df: a float or array of floats broadcast-compatible with ``shape`` + representing the parameter of the distribution. + shape: optional, a tuple of nonnegative integers specifying the result + shape. Must be broadcast-compatible with ``df``. The default (None) + produces a result shape equal to ``df.shape``. dtype: optional, a float dtype for the returned values (default float64 if jax_enable_x64 is true, otherwise float32). Returns: - A random array with the specified shape and dtype. + A random array with the specified dtype and with shape given by ``shape`` if + ``shape`` is not None, or else by ``df.shape``. """ dtype = xla_bridge.canonicalize_dtype(dtype) return _t(key, df, shape, dtype) @partial(jit, static_argnums=(2, 3)) def _t(key, df, shape, dtype): - _check_shape("t", shape) + if shape is None: + shape = df.shape + else: + _check_shape("t", shape, df.shape) + df = lax.convert_element_type(df, dtype) - shape = shape or onp.shape(df) key_n, key_g = split(key) n = normal(key_n, shape, dtype) two = _constant_like(n, 2)
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -16,8 +16,8 @@ from __future__ import division from __future__ import print_function +from functools import partial from unittest import SkipTest -import re from absl.testing import absltest from absl.testing import parameterized @@ -218,7 +218,9 @@ def testCauchy(self, dtype): @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_alpha={}_{}".format(alpha, dtype), "alpha": alpha, "dtype": onp.dtype(dtype).name} - for alpha in [[0.2, 1., 5.]] + for alpha in [ + onp.array([0.2, 1., 5.]), + ] for dtype in [onp.float32, onp.float64])) def testDirichlet(self, alpha, dtype): key = random.PRNGKey(0) @@ -276,7 +278,7 @@ def testGammaGrad(self, alpha): rng = random.PRNGKey(0) alphas = onp.full((100,), alpha) z = random.gamma(rng, alphas) - actual_grad = api.grad(lambda x: (random.gamma(rng, x)).sum())(alphas) + actual_grad = api.grad(lambda x: random.gamma(rng, x).sum())(alphas) eps = 0.01 * alpha / (1.0 + onp.sqrt(alpha)) cdf_dot = (scipy.stats.gamma.cdf(z, alpha + eps) @@ -367,35 +369,33 @@ def testT(self, df, dtype): self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.t(df).cdf) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_mean={}_cov={}_{}".format(mean, cov, dtype), - "mean": mean, "cov": cov, "dtype": dtype} - for mean in [0, 5, onp.asarray([1, -2, 3]), onp.asarray([[1]])] - for cov in [.1, 5, onp.asarray([4, 5, 6]), - onp.asarray([[4.60, 2.86, 2.33], - [2.86, 3.04, 1.74], - [2.33, 1.74, 1.83]]), - onp.asarray([[[1]]])] + {"testcase_name": "_{}D_{}".format(dim, onp.dtype(dtype).name), + "dim": dim, "dtype": dtype} + for dim in [1, 3, 5] for dtype in [onp.float32, onp.float64])) - def testMultivariateNormal(self, mean, cov, dtype): + def testMultivariateNormal(self, dim, dtype): + r = onp.random.RandomState(dim) + mean = r.randn(dim) + cov_factor = r.randn(dim, dim) + cov = onp.dot(cov_factor, cov_factor.T) + dim * onp.eye(dim) + key = random.PRNGKey(0) - rand = lambda key, mean, cov: random.multivariate_normal(key, mean, cov, (1000,), dtype) + rand = partial(random.multivariate_normal, mean=mean, cov=cov, + shape=(10000,)) crand = api.jit(rand) - if hasattr(cov, "shape") and cov.ndim > 2 or hasattr(mean, "shape") and mean.ndim > 1: - self.assertRaises(ValueError, lambda: rand(key, mean, cov)) - self.assertRaises(ValueError, lambda: crand(key, mean, cov)) - return - - uncompiled_samples = rand(key, mean, cov) - compiled_samples = crand(key, mean, cov) - if hasattr(cov, "shape") and cov.ndim == 2: - inv_scale = scipy.linalg.lapack.dtrtri(onp.linalg.cholesky(cov), lower=True)[0] - rescale = lambda x: onp.tensordot(x, inv_scale, axes=(-1, 1)) - else: - rescale = lambda x: x / np.sqrt(cov) + + uncompiled_samples = onp.asarray(rand(key), onp.float64) + compiled_samples = onp.asarray(crand(key), onp.float64) + + inv_scale = scipy.linalg.lapack.dtrtri(onp.linalg.cholesky(cov), lower=True)[0] for samples in [uncompiled_samples, compiled_samples]: - self._CheckKolmogorovSmirnovCDF( - rescale(samples - mean).reshape(-1), - scipy.stats.norm().cdf) + centered = samples - mean + whitened = onp.einsum('nj,ij->ni', centered, inv_scale) + + # This is a quick-and-dirty multivariate normality check that tests that a + # uniform mixture of the marginals along the covariance matrix's + # eigenvectors follow a standard normal distribution. + self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf) def testIssue222(self): x = random.randint(random.PRNGKey(10003), (), 0, 0) @@ -417,7 +417,7 @@ def feature_map(n, d, sigma=1.0, seed=123): phi = lambda x, t: np.sqrt(2.0 / d) * np.cos(np.matmul(W, x) + w*t + b) return phi - self.assertRaisesRegex(ValueError, re.compile(r'.*requires a concrete.*'), + self.assertRaisesRegex(ValueError, '.*requires a concrete.*', lambda: feature_map(5, 3)) def testIssue756(self):
Multivariate Normal Hi all, I noticed that sampling from predefined distributions works a bit different in jax than it does in numpy. Is it possible to use the jax.random module to sample from a multivariate normal distribution (with dense covariance matrix)? If not, is there any other way? So far I only managed to sample from one with a unit variance, using jax.random.normal. Cheers,
Thanks for the question! The best way to sample from a multivariate Gaussian is to sample a vector of iid unit-variance zero-mean Gaussians (which you can do with `jax.random.normal`), and then perform an affine transformation to produce the mean and covariance structure you want. For example, let's take the zero-mean case (since it's easy to add a mean vector in later). If x ~ N(0, I) is a vector of unit-variance zero-mean Gaussians, then y = A x has covariance E[yy'] = E[(A x)(A x)'] = E[A x x' A'] = A E[xx'] A' = AA'. So if we had a target covariance Q and mean mu, the procedure would look something like 1. sample a standard Gaussian like v ~ N(0, I) 2. return a sample w = A v + mu, where A is a square root of Q such that AA' = Q. See also [this section on Wikipedia](https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Drawing_values_from_the_distribution). There are many matrix square roots (all related by an orthogonal transformation on the right), and any one will work. The standard one to use here would be the Cholesky square root, since (IIRC) it's the cheapest to compute in terms of FLOPs. That's usually written as L = chol(Q) where LL' = Q. (Another option is the symmetric square root, which basically looks like diagonalizing Q, taking the square root of the eigenvalues, and then multiplying the result back out. Fun fact, if you take the QR of the symmetric square root, then R' is the Cholesky!) Since we have `vmap` we don't need to worry about handling batch dimensions by hand. Maybe a good API for a multivariate normal would be ```python # Q is an (n, n) matrix # mu is an (n,) vector w = random.multivariate_normal(key, Q, mu) ``` Thanks for the answer! Sorry for the confusion. I was mostly interested to know whether there is a multivariate normal function already implemented in jax, since I couldn't find one. Are there any plans to add more distributions (including a multivariate normal) to the jax.random? P.S. I ended up doing the Cholesky decomposition. In case you find it useful, we have a multivariate normal distribution in [numpyro](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/distributions/continuous.py#L559) that's basically just doing what Matt proposed above (the remaining code is just to comply with our distributions API). cc. @fehiepsi. In JAX, there is a pending PR #269 which does the job. Thanks a lot! I think this answers my question.
2019-10-16T01:37:16
google/jax
1,524
google__jax-1524
[ "1521" ]
eae59d0b2c9cc13e0f2b835c4daa6c7be23ba3b2
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2405,7 +2405,7 @@ def _take_along_axis(arr, indices, axis): if axis is None: if ndim(indices) != 1: msg = "take_along_axis indices must be 1D if axis=None, got shape {}" - raise ValueError(msg.format(shape(indices))) + raise ValueError(msg.format(indices.shape)) return take_along_axis(arr.ravel(), indices, 0) rank = ndim(arr) if rank != ndim(indices): @@ -2413,11 +2413,19 @@ def _take_along_axis(arr, indices, axis): raise ValueError(msg.format(ndim(indices), ndim(arr))) axis = _canonicalize_axis(axis, rank) - arr_shape = list(shape(arr)) - axis_size = arr_shape[axis] - arr_shape[axis] = 1 - idx_shape = shape(indices) - out_shape = lax.broadcast_shapes(idx_shape, tuple(arr_shape)) + def replace(tup, val): + lst = list(tup) + lst[axis] = val + return tuple(lst) + + bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1)) + indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis])) + arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis])) + + axis_size = arr.shape[axis] + arr_shape = replace(arr.shape, 1) + idx_shape = indices.shape + out_shape = lax.broadcast_shapes(idx_shape, arr_shape) index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1] @@ -3144,4 +3152,4 @@ def _unstack(x): if x.ndim == 0: raise ValueError("Argument to _unstack must be non-scalar") return [lax.index_in_dim(x, i, keepdims=False) for i in range(x.shape[0])] -setattr(DeviceArray, "_unstack", _unstack) \ No newline at end of file +setattr(DeviceArray, "_unstack", _unstack)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1985,6 +1985,16 @@ def testOpGrad(self, op, rng, shapes, dtype, order, tol): def testOpGradSpecialValue(self, op, special_value): check_grads(op, (special_value,), 2, ["fwd", "rev"]) + def testTakeAlongAxisIssue1521(self): + # https://github.com/google/jax/issues/1521 + idx = lnp.repeat(lnp.arange(3), 10).reshape((30, 1)) + + def f(x): + y = x * lnp.arange(3.).reshape((1, 3)) + return lnp.take_along_axis(y, idx, -1).sum() + + check_grads(f, (1.,), order=1) + if __name__ == "__main__": absltest.main()
np.take_along_axis gives wrong gradient The following repro script shows the issue: ``` import jax.numpy as np from jax import value_and_grad idx = np.repeat(np.arange(3), 10).reshape((30, 1)) def f(x): y = x * np.arange(3.).reshape((1, 3)) return np.take_along_axis(y, idx, -1).sum() def g(x): y = x * np.arange(3.).reshape((1, 3)) y = np.broadcast_to(y, (30, 3)) return np.take_along_axis(y, idx, -1).sum() print(value_and_grad(f)(1.)) # get 30, 0 print(value_and_grad(g)(1.)) # get 30, 30 ``` I think `(30, 30)` is the correct answer.
Thanks for catching this. By the way, I recommend using `check_grad` from `jax.test_util` for checking derivatives (because you don't have to worry about scalar output, and you can check fwd/rev and arbitrary order all at once): ```python import jax.numpy as np from jax import value_and_grad from jax.test_util import check_grads idx = np.arange(3).reshape(3, 1) def h(x): y = x * np.arange(3.).reshape((1, 3)) return np.take_along_axis(y, idx, -1) check_grads(h, (1.,), order=1) ``` ``` AssertionError: 0.0 != 9.63398933411 ``` It seems `check_grads(h, (1.,), order=1, modes=["fwd"])` succeeds but `check_grads(h, (1.,), order=1, modes=["rev"])` fails. The issue must be with the gather transpose rule (?).
2019-10-17T22:38:55
google/jax
1,525
google__jax-1525
[ "1522" ]
eae59d0b2c9cc13e0f2b835c4daa6c7be23ba3b2
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -889,23 +889,20 @@ def broadcast_arrays(*args): def broadcast_to(arr, shape): """Like Numpy's broadcast_to but doesn't necessarily return views.""" arr = arr if isinstance(arr, ndarray) or isscalar(arr) else array(arr) - shape = tuple(map(int, shape)) - if _shape(arr) != shape: - # TODO(mattjj): revise this to call lax.broadcast_in_dim rather than - # lax.broadcast and lax.transpose - lax.broadcast_shapes(shape, _shape(arr)) # error checking - nlead = len(shape) - len(_shape(arr)) - diff, = onp.where(onp.not_equal(shape[nlead:], _shape(arr))) - + shape = tuple(map(int, shape)) # check that shape is concrete + arr_shape = _shape(arr) + if arr_shape == shape: + return arr + else: + nlead = len(shape) - len(arr_shape) + compatible = onp.equal(arr_shape, shape[nlead:]) | onp.equal(arr_shape, 1) + if nlead < 0 or not onp.all(compatible): + msg = "Incompatible shapes for broadcasting: {} and requested shape {}" + raise ValueError(msg.format(arr_shape, shape)) + diff, = onp.where(onp.not_equal(shape[nlead:], arr_shape)) new_dims = tuple(range(nlead)) + tuple(nlead + diff) kept_dims = tuple(onp.delete(onp.arange(len(shape)), new_dims)) - perm = onp.argsort(new_dims + kept_dims) - - broadcast_dims = onp.take(shape, new_dims) - squeezed_array = squeeze(arr, diff) - return lax.transpose(lax.broadcast(squeezed_array, broadcast_dims), perm) - else: - return arr + return lax.broadcast_in_dim(squeeze(arr, diff), shape, kept_dims) @_wraps(onp.split) @@ -3144,4 +3141,4 @@ def _unstack(x): if x.ndim == 0: raise ValueError("Argument to _unstack must be non-scalar") return [lax.index_in_dim(x, i, keepdims=False) for i in range(x.shape[0])] -setattr(DeviceArray, "_unstack", _unstack) \ No newline at end of file +setattr(DeviceArray, "_unstack", _unstack)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1230,7 +1230,6 @@ def f(x): f(arr) - def testNonArrayErrorMessage(self): x = [1., 2.] y = onp.array([3., 4.]) @@ -1931,6 +1930,27 @@ def body(i, xy): any(onp.array_equal(x, onp.full((3, 4), 2., dtype=onp.float32)) for x in consts)) + @parameterized.named_parameters( + {"testcase_name": "_from={}_to={}".format(from_shape, to_shape), + "rng": rng, "from_shape": from_shape, "to_shape": to_shape} + for from_shape, to_shape in [ + [(1, 3), (4, 3)], + [(3,), (2, 1, 3)], + [(3,), (3, 3)], + [(1,), (3,)], + ] + for rng in [jtu.rand_default()]) + def testBroadcastTo(self, from_shape, to_shape, rng): + args_maker = self._GetArgsMaker(rng, [from_shape], [onp.float32]) + onp_op = lambda x: onp.broadcast_to(x, to_shape) + lnp_op = lambda x: lnp.broadcast_to(x, to_shape) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) + + def testBroadcastToIssue1522(self): + self.assertRaisesRegex( + ValueError, "Incompatible shapes for broadcasting: .*", + lambda: lnp.broadcast_to(onp.ones((2, 3)), (1, 3))) # Most grad tests are at the lax level (see lax_test.py), but we add some here # as needed for e.g. particular compound ops of interest.
unexpected behaviour of np.broadcast_to As detected during discussions at https://github.com/google/jax/pull/1515, `np.broadcast_to(np.ones((2, 3)), (1, 3)).shape` should raise an error instead of returning `(1, 2, 3)`.
2019-10-17T23:23:28
google/jax
1,541
google__jax-1541
[ "1220" ]
0289536bff1e5d530b0679fe5573009d04a0aa6c
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -180,6 +180,18 @@ def digamma(x): r"""Elementwise digamma: :math:`\psi(x)`.""" return digamma_p.bind(x) +def bessel_i0e(x): + r"""Exponentially scaled modified Bessel function of order 0: + :math:`\mathrm{i0e}(x) = e^{-\mathrm{abs}(x)} \mathrm{i0}(x)` + """ + return bessel_i0e_p.bind(x) + +def bessel_i1e(x): + r"""Exponentially scaled modified Bessel function of order 1: + :math:`\mathrm{i1e}(x) = e^{-\mathrm{abs}(x)} \mathrm{i1}(x)` + """ + return bessel_i1e_p.bind(x) + def erf(x): r"""Elementwise error function: :math:`\mathrm{erf}(x)`.""" return erf_p.bind(x) @@ -1611,6 +1623,19 @@ def _brcast_to(x, shape): digamma_p = standard_unop(_float, 'digamma') +bessel_i0e_p = standard_unop(_float, 'bessel_i0e') +ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y)) + +bessel_i1e_p = standard_unop(_float, 'bessel_i1e') +def _bessel_i1e_jvp(g, y, x): + eps = onp.finfo(_dtype(x)).eps + x_is_not_tiny = abs(x) > eps + safe_x = select(x_is_not_tiny, x, full_like(x, eps)) + dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x)) + dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5)) + return g * dy_dx +ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp) + erf_p = standard_unop(_float, 'erf') ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / onp.sqrt(onp.pi)), mul(g, exp(neg(square(x)))))) diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -521,3 +521,11 @@ def _norm_logpdf(x): defjvp(log_ndtr, lambda g, ans, x: lax.mul(g, lax.exp(lax.sub(_norm_logpdf(x), ans)))) + +@_wraps(osp_special.i0e) +def i0e(x): + return lax.bessel_i0e(x) + +@_wraps(osp_special.i1e) +def i1e(x): + return lax.bessel_i1e(x) \ No newline at end of file
diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py --- a/tests/lax_scipy_test.py +++ b/tests/lax_scipy_test.py @@ -70,6 +70,8 @@ def op_record(name, nargs, dtypes, rng, test_grad, test_name=None): op_record("ndtr", 1, float_dtypes, jtu.rand_default(), True), # TODO(phawkins): gradient of entr yields NaNs. op_record("entr", 1, float_dtypes, jtu.rand_default(), False), + op_record("i0e", 1, float_dtypes, jtu.rand_default(), True), + op_record("i1e", 1, float_dtypes, jtu.rand_default(), True), ] CombosWithReplacement = itertools.combinations_with_replacement @@ -102,16 +104,17 @@ def lax_fun(array_to_reduce): self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": jtu.format_test_name_suffix( - rec.test_name, shapes, dtypes), - "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, - "test_autodiff": rec.test_autodiff, - "scipy_op": getattr(osp_special, rec.name), - "lax_op": getattr(lsp_special, rec.name)} - for rec in JAX_SPECIAL_FUNCTION_RECORDS - for shapes in CombosWithReplacement(all_shapes, rec.nargs) - for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))) + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix( + rec.test_name, shapes, dtypes), + "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, + "test_autodiff": rec.test_autodiff, + "scipy_op": getattr(osp_special, rec.name), + "lax_op": getattr(lsp_special, rec.name)} + for shapes in CombosWithReplacement(all_shapes, rec.nargs) + for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs)) + for rec in JAX_SPECIAL_FUNCTION_RECORDS)) def testScipySpecialFun(self, scipy_op, lax_op, rng, shapes, dtypes, test_autodiff): args_maker = self._GetArgsMaker(rng, shapes, dtypes)
Implement i0e and i1e bessel functions These are useful for the von-Mises distribution on the unit circle.
I'm hoping you can help us with these :) I didn't see any mention of these in TF2XLA; is there an existing implementation we can draw on? Or should we treat this as a CustomCall on CPU and GPU (like we treat linalg)?
2019-10-21T14:01:10
google/jax
1,549
google__jax-1549
[ "1548" ]
0ffcd769ef492313aa201bf4a22524b2c557ea43
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -599,7 +599,7 @@ def broadcast(operand, sizes): return broadcast_p.bind(operand, sizes=tuple(sizes)) def broadcast_in_dim(operand, shape, broadcast_dimensions): - if operand.ndim == len(shape) and not len(broadcast_dimensions): + if onp.ndim(operand) == len(shape) and not len(broadcast_dimensions): return operand if any(x < 0 or x >= len(shape) for x in broadcast_dimensions): msg = ("broadcast dimensions must be >= 0 and < ndim(shape), got {} for "
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1952,6 +1952,10 @@ def testBroadcastToIssue1522(self): ValueError, "Incompatible shapes for broadcasting: .*", lambda: lnp.broadcast_to(onp.ones((2, 3)), (1, 3))) + def testBroadcastToIntIssue1548(self): + self.assertAllClose(lnp.broadcast_to(1, (3, 2)), onp.ones((3, 2)), + check_dtypes=False) + # Most grad tests are at the lax level (see lax_test.py), but we add some here # as needed for e.g. particular compound ops of interest.
np.broadcast_to regression in the latest JAX version The new version 0.1.47 introduces a regression where `np.broadcast_to(1, (3, 2))` throws the error `AttributeError: 'int' object has no attribute 'ndim'`. cc @mattjj @neerajprad
2019-10-21T22:12:46
google/jax
1,568
google__jax-1568
[ "1526" ]
251834367f3093c669612ddc119e8fefdcf53387
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -433,6 +433,32 @@ def shuffle(key: np.ndarray, x: np.ndarray, axis: int = 0) -> np.ndarray: """ return _shuffle(key, x, axis) + +def permutation(key, x): + """ + Permute elements of an array along its first axis or return a permuted range. + + Args:n + key: a PRNGKey used as the random key. + x: the array or integer range to be shuffled. + + Returns: + A shuffled version of x or array range + """ + if not onp.ndim(x): + # scalar case, must be a concrete integer + if not onp.issubdtype(lax.dtype(x), onp.integer): + raise TypeError("x must be an integer or at least 1-dimensional") + x = int(x) + return _shuffle(key, np.arange(x), 0) + elif onp.ndim(x) == 1: + return _shuffle(key, x, 0) + else: + msg = ("permutation for >1d inputs x not yet implemented, see " + "https://github.com/google/jax/issues/2066 for updates.") + raise NotImplementedError(msg) + + @partial(jit, static_argnums=(2,)) def _shuffle(key, x, axis): # On parallel architectures, Fisher-Yates is more expensive than doing
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -25,6 +25,7 @@ import scipy.stats from jax import api +from jax import core from jax import grad from jax import lax from jax import numpy as np @@ -164,10 +165,49 @@ def testShuffle(self, dtype): perm1 = rand(key) perm2 = crand(key) - self.assertTrue(onp.all(perm1 == perm2)) - self.assertTrue(onp.all(perm1.dtype == perm2.dtype)) + self.assertAllClose(perm1, perm2, check_dtypes=True) self.assertFalse(onp.all(perm1 == x)) # seems unlikely! - self.assertTrue(onp.all(onp.sort(perm1) == x)) + self.assertAllClose(onp.sort(perm1), x, check_dtypes=False) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name} + for dtype in [onp.float32, onp.float64, onp.int32, onp.int64])) + def testPermutationArray(self, dtype): + key = random.PRNGKey(0) + x = onp.arange(100).astype(dtype) + rand = lambda key: random.permutation(key, x) + crand = api.jit(rand) + + perm1 = rand(key) + perm2 = crand(key) + + self.assertAllClose(perm1, perm2, check_dtypes=True) + self.assertEqual(perm1.dtype, perm2.dtype) + self.assertFalse(onp.all(perm1 == x)) # seems unlikely! + self.assertAllClose(onp.sort(perm1), x, check_dtypes=False) + self.assertArraysAllClose(x, onp.arange(100).astype(dtype), + check_dtypes=True) + + def testPermutationInteger(self): + key = random.PRNGKey(0) + x = 100 + rand = lambda key: random.permutation(key, x) + crand = api.jit(rand) + + perm1 = rand(key) + perm2 = crand(key) + + self.assertAllClose(perm1, perm2, check_dtypes=True) + self.assertEqual(perm1.dtype, perm2.dtype) + self.assertFalse(onp.all(perm1 == onp.arange(100))) # seems unlikely! + self.assertAllClose(onp.sort(perm1), onp.arange(100), check_dtypes=False) + + def testPermutationErrors(self): + key = random.PRNGKey(0) + with self.assertRaises(TypeError): + random.permutation(key, 10.) + with self.assertRaises(core.ConcretizationTypeError): + api.jit(random.permutation)(key, 10) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_p={}_{}".format(p, dtype),
[feature request] random permutation It would be nice if we could get `jax.random` functions analogous to `np.random.permutation` and `np.random.shuffle`.
There is a [`random.shuffle`](https://github.com/google/jax/blob/03cd6564e944cf60e58172abe0668762fa05d7e4/jax/random.py#L357). A `random.permutation` could be a simple wrapper around that IIUC. I'd be happy to take a look at this.
2019-10-25T19:17:44
google/jax
1,594
google__jax-1594
[ "1010" ]
f7a44523be65098069ad5fadae128d09be1419db
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -354,7 +354,7 @@ def convert_element_type(operand, new_dtype): if (onp.issubdtype(old_dtype, onp.complexfloating) and not onp.issubdtype(new_dtype, onp.complexfloating)): msg = "Casting complex values to real discards the imaginary part" - warnings.warn(msg, onp.ComplexWarning) + warnings.warn(msg, onp.ComplexWarning, stacklevel=2) operand = real(operand) old_dtype = _dtype(operand) return convert_element_type_p.bind( diff --git a/jax/numpy/fft.py b/jax/numpy/fft.py --- a/jax/numpy/fft.py +++ b/jax/numpy/fft.py @@ -33,13 +33,14 @@ def _promote_to_complex(arg): dtype = onp.complex64 return lax.convert_element_type(arg, dtype) -@_wraps(onp.fft.fftn) -def fftn(a, s=None, axes=None, norm=None): + +def _fft_core(func_name, fft_type, a, s, axes, norm): # TODO(skye): implement padding/cropping based on 's'. + full_name = "jax.np.fft." + func_name if s is not None: - raise NotImplementedError("jax.np.fftn only supports s=None, got %s" % s) + raise NotImplementedError("%s only supports s=None, got %s" % (full_name, s)) if norm is not None: - raise NotImplementedError("jax.np.fftn only supports norm=None, got %s" % norm) + raise NotImplementedError("%s only supports norm=None, got %s" % (full_name, norm)) if s is not None and axes is not None and len(s) != len(axes): # Same error as numpy. raise ValueError("Shape and axes have different lengths.") @@ -57,17 +58,28 @@ def fftn(a, s=None, axes=None, norm=None): if len(axes) != len(set(axes)): raise ValueError( - "jax.np.fftn does not support repeated axes. Got axes %s." % axes) + "%s does not support repeated axes. Got axes %s." % (full_name, axes)) if any(axis in range(a.ndim - 3) for axis in axes): raise ValueError( - "jax.np.fftn only supports 1D, 2D, and 3D FFTs over the innermost axes." - " Got axes %s with input rank %s." % (orig_axes, a.ndim)) + "%s only supports 1D, 2D, and 3D FFTs over the innermost axes." + " Got axes %s with input rank %s." % (full_name, orig_axes, a.ndim)) if s is None: s = [a.shape[axis] for axis in axes] a = _promote_to_complex(a) - return lax.fft(a, xla_client.FftType.FFT, s) + return lax.fft(a, fft_type, s) + + +@_wraps(onp.fft.fftn) +def fftn(a, s=None, axes=None, norm=None): + return _fft_core('fftn', xla_client.FftType.FFT, a, s, axes, norm) + + +@_wraps(onp.fft.ifftn) +def ifftn(a, s=None, axes=None, norm=None): + return _fft_core('ifftn', xla_client.FftType.IFFT, a, s, axes, norm) + for func in get_module_functions(onp.fft): if func.__name__ not in globals():
diff --git a/tests/fft_test.py b/tests/fft_test.py --- a/tests/fft_test.py +++ b/tests/fft_test.py @@ -51,17 +51,21 @@ def _get_fftn_test_axes(shape): class FftTest(jtu.JaxTestCase): @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_shape={}_axes={}".format( - jtu.format_shape_dtype_string(shape, dtype), axes), - "axes": axes, "shape": shape, "dtype": dtype, "rng": rng} + {"testcase_name": "_inverse={}_shape={}_axes={}".format( + inverse, jtu.format_shape_dtype_string(shape, dtype), axes), + "axes": axes, "shape": shape, "dtype": dtype, "rng": rng, + "inverse": inverse} + for inverse in [False, True] for rng in [jtu.rand_default()] for dtype in all_dtypes for shape in [(10,), (10, 10), (2, 3, 4), (2, 3, 4, 5)] for axes in _get_fftn_test_axes(shape))) - def testFftn(self, shape, dtype, axes, rng): + def testFftn(self, inverse, shape, dtype, axes, rng): args_maker = lambda: (rng(shape, dtype),) - np_fn = lambda a: np.fft.fftn(a, axes=axes) - onp_fn = lambda a: onp.fft.fftn(a, axes=axes) + np_op = np.fft.ifftn if inverse else np.fft.fftn + onp_op = onp.fft.ifftn if inverse else onp.fft.fftn + np_fn = lambda a: np_op(a, axes=axes) + onp_fn = lambda a: onp_op(a, axes=axes) self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=True, tol=1e-4) self._CompileAndCheck(np_fn, args_maker, check_dtypes=True) @@ -72,24 +76,30 @@ def testFftn(self, shape, dtype, axes, rng): jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol) jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol) - def testFftnErrors(self): + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_inverse={}".format(inverse), + "inverse": inverse} + for inverse in [False, True])) + def testFftnErrors(self, inverse): rng = jtu.rand_default() - self.assertRaisesRegexp( + name = 'ifftn' if inverse else 'fftn' + func = np.fft.ifftn if inverse else np.fft.fftn + self.assertRaisesRegex( ValueError, - "jax.np.fftn only supports 1D, 2D, and 3D FFTs over the innermost axes. " - "Got axes None with input rank 4.", - lambda: np.fft.fftn(rng([2, 3, 4, 5], dtype=onp.float64), axes=None)) - self.assertRaisesRegexp( + "jax.np.fft.{} only supports 1D, 2D, and 3D FFTs over the innermost axes. " + "Got axes None with input rank 4.".format(name), + lambda: func(rng([2, 3, 4, 5], dtype=onp.float64), axes=None)) + self.assertRaisesRegex( ValueError, - "jax.np.fftn only supports 1D, 2D, and 3D FFTs over the innermost axes. " - "Got axes \[0\] with input rank 4.", - lambda: np.fft.fftn(rng([2, 3, 4, 5], dtype=onp.float64), axes=[0])) - self.assertRaisesRegexp( + "jax.np.fft.{} only supports 1D, 2D, and 3D FFTs over the innermost axes. " + "Got axes \\[0\\] with input rank 4.".format(name), + lambda: func(rng([2, 3, 4, 5], dtype=onp.float64), axes=[0])) + self.assertRaisesRegex( ValueError, - "jax.np.fftn does not support repeated axes. Got axes \[1, 1\].", - lambda: np.fft.fftn(rng([2, 3], dtype=onp.float64), axes=[1, 1])) + "jax.np.fft.{} does not support repeated axes. Got axes \\[1, 1\\].".format(name), + lambda: func(rng([2, 3], dtype=onp.float64), axes=[1, 1])) self.assertRaises( - IndexError, lambda: np.fft.fftn(rng([2, 3], dtype=onp.float64), axes=[2])) + IndexError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[2])) if __name__ == "__main__":
Feature request: IFFT Hi there! I'm finding Jax to be very useful in my work, but a new project (spectral Gaussian process stuff) requires me to differentiate through both FFT and IFFT operations. It is possible to express IFFT in terms of FFT, but it would be cleaner to just have an IFFT primitive, I think. Thank you! edit: also, vmap support would be very helpful as well. Also for people from the future, here is an implementation using FFT only: ```python from typing import Optional, Sequence def ifftn( a: np.ndarray, s: Sequence[int] = None, axes: Sequence[int] = None, norm: Optional[str] = None, ) -> np.ndarray: """Compute the n-dimensional inverse Fourier transform. Args: a: Input array. s: Shape of the output. axes: Axes over which to compute the IFFT. norm: Normalization mode, one of ``None`` or ``"ortho"``. Returns: The n-dimensional IFFT of ``a``. """ if s is not None: raise NotImplementedError if norm is not None: raise NotImplementedError n = np.prod(np.array(a.shape)[axes]) return (1 / n) * np.conj(np.fft.fftn(np.conj(a), axes=axes)) ```
@skye fyi
2019-10-30T01:04:17
google/jax
1,605
google__jax-1605
[ "1600" ]
2bab332e5a0bb46bf8369f015511caf314e49372
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -846,7 +846,7 @@ def pmap(fun, axis_name=None, devices=None, backend=None): [ 13. 13.] """ _check_callable(fun) - axis_name = _TempAxisName() if axis_name is None else axis_name + axis_name = _TempAxisName(fun) if axis_name is None else axis_name @wraps(fun) def f_pmapped(*args, **kwargs): @@ -875,13 +875,19 @@ def _pmap_axis_size(xs): raise ValueError(msg.format([x for x in xs if not hasattr(x, 'shape')])) class _TempAxisName(object): + def __init__(self, obj): + self.obj = obj def __repr__(self): - return '<axis {}>'.format(hex(id(self))) + return '<axis {}>'.format(hex(id(self.obj))) + def __hash__(self): + return hash(self.obj) + def __eq__(self, other): + return self.obj is other.obj def soft_pmap(fun, axis_name=None, backend=None): _check_callable(fun) - axis_name = _TempAxisName() if axis_name is None else axis_name + axis_name = _TempAxisName(fun) if axis_name is None else axis_name @wraps(fun) def f_pmapped(*args, **kwargs): @@ -930,7 +936,7 @@ def _reshape_merge(x): def _papply(fun): # This function is for testing purposes. - axis_name = _TempAxisName() + axis_name = _TempAxisName(fun) def papply_fun(*args, **kwargs): f = lu.wrap_init(fun) @@ -944,7 +950,7 @@ def papply_fun(*args, **kwargs): def _parallelize(fun): - axis_name = _TempAxisName() + axis_name = _TempAxisName(fun) def pfun(*args): f = lu.wrap_init(fun) diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -26,7 +26,7 @@ from ..abstract_arrays import raise_to_shaped from ..util import unzip2, unzip3, safe_map, safe_zip, partial, split_list from ..tree_util import build_tree, register_pytree_node, tree_map -from ..linear_util import thunk, staged, transformation, transformation_with_aux, wrap_init +from ..linear_util import thunk, transformation, transformation_with_aux, wrap_init from ..api_util import flatten_fun, flatten_fun_nokwargs from ..tree_util import tree_flatten, tree_unflatten diff --git a/jax/linear_util.py b/jax/linear_util.py --- a/jax/linear_util.py +++ b/jax/linear_util.py @@ -69,7 +69,7 @@ def scale_transformer_aux(scale, x): from __future__ import division from __future__ import print_function -import fastcache +import weakref from .util import curry, partial @@ -112,18 +112,6 @@ def __nonzero__(self): __bool__ = __nonzero__ -@curry -def staged(f, *init_args): - store = Store() - def f_partial(*rest): - ans, aux = f(*(init_args + rest)) - store.store(aux) - return ans - - f_partial.__name__ = f.__name__ + "_staged" - return f_partial, thunk(lambda: store.val) - - class WrappedFun(object): """Represents a function `f` to which `transforms` are to be applied. @@ -149,8 +137,8 @@ def wrap(self, gen, gen_args, out_store): return WrappedFun(self.f, ((gen, gen_args),) + self.transforms, (out_store,) + self.stores, self.params) - def populate_stores(self, other): - for self_store, other_store in zip(self.stores, other.stores): + def populate_stores(self, stores): + for self_store, other_store in zip(self.stores, stores): if self_store is not None: self_store.store(other_store.val) @@ -208,15 +196,17 @@ def wrap_init(f, params={}): return WrappedFun(f, (), (), tuple(sorted(params.items()))) -def cache(call, max_size=4096): - @fastcache.clru_cache(maxsize=max_size) - def cached_fun_body(f, args): - return call(f, *args), f - - def cached_fun(f, *args): - ans, f_prev = cached_fun_body(f, args) - if id(f_prev) != id(f): - f.populate_stores(f_prev) +def cache(call): + fun_caches = weakref.WeakKeyDictionary() + def memoized_fun(fun, *args): + cache = fun_caches.setdefault(fun.f, {}) + key = (fun.transforms, fun.params, args) + result = cache.get(key, None) + if result is not None: + ans, stores = result + fun.populate_stores(stores) + else: + ans = call(fun, *args) + cache[key] = (ans, fun.stores) return ans - - return cached_fun + return memoized_fun
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -20,6 +20,7 @@ from functools import partial import unittest import warnings +import weakref from absl.testing import absltest import numpy as onp @@ -1181,6 +1182,48 @@ def foo(tree_arg): foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),)) self.assertEqual(vfoo(tree).shape, (6, 2, 5)) + def test_jit_reference_dropping(self): + x = onp.ones(10) + f = (lambda x: lambda: x)(x) # reference to x in f's closure + g = jit(f) + x = weakref.ref(x) # no more strong ref to x in this scope + assert x() is not None # x is still around + f() # f runs + g() # g runs + g() # g runs a second time + del f # delete the raw callable + assert x() is not None # x is still around + g() # g still runs + del g # no more references to x + assert x() is None # x is gone + + def test_jit_global_cache(self): + def f(x): + assert python_should_be_executing + return x + + python_should_be_executing = True + api.jit(f)(2) + python_should_be_executing = False + api.jit(f)(3) + + def test_pmap_global_cache(self): + def f(x): + assert python_should_be_executing + return x + + x = onp.ones(1) + + python_should_be_executing = True + api.pmap(f)(x) + python_should_be_executing = False + api.pmap(f)(x) + + python_should_be_executing = True + api.pmap(f, 'i')(x) + python_should_be_executing = False + api.pmap(f, 'i')(x) + if __name__ == '__main__': absltest.main()
make pmap compilation cache "act globally" with no axis name Both the `jit` and `pmap` compilation caches are global. The `jit` cache acts that way: ```python from jax import jit def f(x): print("miss") return x jit(f)(3) # prints jit(f)(3) # doesn't print, i.e. cache hit ``` But the `pmap` one seems not to when you don't provide a name for the axis: ```python from jax import pmap from jax.lib import xla_bridge as xb import jax.numpy as np def f(x): print("miss") return x x = np.ones(xb.device_count()) pmap(f)(x) # prints pmap(f)(x) # prints pmap(f, axis_name='i')(x) # prints pmap(f, axis_name='i')(x) # doesn't print ``` The issue is that when an axis name isn't provided, we create a dummy one, and we create a new one each time we evaluate `pmap(f)` to produce a new pmapped version of `f`. We should probably remedy that, since patterns like ```python for i in range(100): x = pmap(update)(x) ``` are handy, but they're currently a foot-gun with respect to compilation caching.
2019-10-30T22:04:40
google/jax
1,622
google__jax-1622
[ "1621" ]
e6ad9c29da1f3df4aa145da08b315d960aa24597
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2548,9 +2548,11 @@ def _rewriting_take(arr, idx): def _gather(arr, treedef, static_idx, dynamic_idx): idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx) indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update + y = arr - y = lax.gather(arr, indexer.gather_indices, indexer.dnums, - indexer.gather_slice_shape) + if indexer.gather_indices.size: + y = lax.gather(y, indexer.gather_indices, indexer.dnums, + indexer.gather_slice_shape) # Reverses axes with negative strides. if indexer.reversed_y_dims:
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -730,6 +730,11 @@ def testJVPOfGradOfIndexing(self): self.assertAllClose(expected, primals, check_dtypes=True) self.assertAllClose(onp.zeros_like(x), tangents, check_dtypes=True) + def testTrivialGatherIsntGenerated(self): + # https://github.com/google/jax/issues/1621 + jaxpr = api.make_jaxpr(lambda x: x[:, None])(onp.arange(4)) + self.assertEqual(len(jaxpr.eqns), 1) + self.assertNotIn('gather', str(jaxpr)) def _broadcastable_shapes(shape): @@ -891,6 +896,5 @@ def testSegmentSum(self): self.assertAllClose(ans, expected, check_dtypes=False) - if __name__ == "__main__": absltest.main()
don't generate trivial gathers from numpy indexing A NumPy expression like `x[:, None]` should only produce a `lax.reshape` call, but currently it produces a trivial `lax.gather` followed by the `lax.reshape`. We should avoid generating the trivial gather. We'll need to change lax_numpy.py's `_gather` or `_index_to_gather` to detect when a gather is trivial.
Maybe we just need this `if` check: ```python if indexer.gather_indices.size: y = lax.gather(y, indexer.gather_indices, indexer.dnums, indexer.gather_slice_shape) ```
2019-11-01T20:54:24
google/jax
1,626
google__jax-1626
[ "1624" ]
d946075feda3bafeb21a29af59436433e46f5123
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -516,7 +516,10 @@ def logaddexp(x1, x2): x1, x2 = _promote_shapes("logaddexp", *_promote_to_result_dtype(onp.logaddexp, x1, x2)) amax = lax.max(x1, x2) - return lax.add(amax, lax.log1p(lax.exp(-lax.abs(lax.sub(x1, x2))))) + delta = lax.sub(x1, x2) + return lax.select(isnan(delta), + lax.add(x1, x2), # NaNs or infinities of the same sign. + lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta))))) @_wraps(onp.logaddexp2) @@ -524,8 +527,12 @@ def logaddexp2(x1, x2): x1, x2 = _promote_shapes("logaddexp2", *_promote_to_result_dtype(onp.logaddexp2, x1, x2)) amax = lax.max(x1, x2) - return lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(lax.sub(x1, x2)))), - _constant_like(x1, onp.log(2)))) + delta = lax.sub(x1, x2) + return lax.select(isnan(delta), + lax.add(x1, x2), # NaNs or infinities of the same sign. + lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))), + _constant_like(x1, onp.log(2))))) + @_wraps(onp.log2) def log2(x): diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -81,6 +81,7 @@ def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): shape = lax.subvals(onp.shape(a), zip(dims, (1,) * len(dims))) dimadd = lambda x: lax.reshape(x, shape) amax = lax.reduce(a, _constant_like(a, -onp.inf), lax.max, dims) + amax = lax.select(lax.is_finite(amax), amax, lax.full_like(amax, 0)) amax_singletons = dimadd(amax) out = lax.add(lax.log(lax.reduce(lax.exp(lax.sub(a, amax_singletons)), _constant_like(a, 0), lax.add, dims)), amax)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -191,9 +191,11 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None, test_name="log1p_large", tolerance={onp.float64: 1e-12}), op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), [], tolerance={onp.float64: 1e-12}), - op_record("logaddexp", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"], + op_record("logaddexp", 2, float_dtypes, all_shapes, + jtu.rand_some_inf_and_nan(), ["rev"], tolerance={onp.float64: 1e-12}), - op_record("logaddexp2", 2, float_dtypes, all_shapes, jtu.rand_default(), ["rev"], + op_record("logaddexp2", 2, float_dtypes, all_shapes, + jtu.rand_some_inf_and_nan(), ["rev"], tolerance={onp.float16: 1e-2}), op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes, jtu.rand_default(), [], check_dtypes=False, @@ -1851,7 +1853,7 @@ def testMathSpecialFloatValues(self, op, dtype): onp_op = getattr(onp, op) lnp_op = getattr(lnp, op) dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type - for x in (onp.nan, -onp.inf, -100., -2. -1., 0., 1., 2., 100., onp.inf, + for x in (onp.nan, -onp.inf, -100., -2., -1., 0., 1., 2., 100., onp.inf, lnp.finfo(dtype).max, onp.sqrt(lnp.finfo(dtype).max), onp.sqrt(lnp.finfo(dtype).max) * 2.): if onp.isnan(x) and op in ("sinh", "cosh", "expm1", "exp"): diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py --- a/tests/lax_scipy_test.py +++ b/tests/lax_scipy_test.py @@ -84,7 +84,10 @@ def _GetArgsMaker(self, rng, shapes, dtypes): @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_inshape={}_axis={}_keepdims={}".format( jtu.format_shape_dtype_string(shape, dtype), axis, keepdims), - "rng": jtu.rand_default(), "shape": shape, "dtype": dtype, + # TODO(b/133842870): re-enable when exp(nan) returns NaN on CPU. + "rng": jtu.rand_some_inf_and_nan() if jtu.device_under_test() != "cpu" + else jtu.rand_default(), + "shape": shape, "dtype": dtype, "axis": axis, "keepdims": keepdims} for shape in all_shapes for dtype in float_dtypes for axis in range(-len(shape), len(shape))
logaddexp and logsumexp do not handle -inf correctly, don't match numpy/scipy logaddexp(-inf, -inf) should be -inf, is nan in jax implementations. numpy.logaddexp(np.NINF, np.NINF) => -inf jax.numpy.logaddexp(np.NINF, np.NINF) => nan Same issue for special.logsumexp
I'm seeing -inf for logaddexp but NaN for logsumexp. Can you share the exact code that gives you NaN with jax.numpy.logaddexp? The jax.numpy.logaddexp behavior is really weird, I only see it when running on an array with more than one item: import jax.numpy as np np.logaddexp(np.NINF, np.NINF) -> -inf np.logaddexp(np.array([np.NINF]), np.array([np.NINF])) -> [-inf] np.logaddexp(np.array([np.NINF, -1.0]), np.array([np.NINF, -1.0])) -> [nan, -0.30685282] I think the last behavior is because of XLA mishandling `exp` for NaN inputs on CPU: ``` In [7]: lax.exp(np.nan) Out[7]: DeviceArray(0., dtype=float32) In [8]: lax.exp(np.array([np.nan, 0.])) Out[8]: DeviceArray([inf, 1.], dtype=float32) ``` I filed a bug with the XLA folks.
2019-11-04T22:29:44
google/jax
1,658
google__jax-1658
[ "1571" ]
d77cf175a9cbcbed54cb1e63d049dbb42348cf0c
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1677,6 +1677,7 @@ def arange(start, stop=None, step=None, dtype=None): # Fall back to instantiating an ndarray in host memory return onp.arange(start, stop=stop, step=step, dtype=dtype) + def _wrap_numpy_nullary_function(f): """Adapts `f` to return a DeviceArray instead of an onp.ndarray. @@ -1687,24 +1688,58 @@ def wrapper(*args, **kwargs): return asarray(f(*args, **kwargs)) return wrapper + +@_wraps(onp.linspace) def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): + """Implementation of linspace differentiable in start and stop args.""" lax._check_user_dtype_supported(dtype, "linspace") - try: - out = onp.linspace(start, stop, num, endpoint, retstep, dtype, axis) - if retstep: - return asarray(out[0]), out[1] - else: - return asarray(out) - except TypeError: # Old versions of onp may lack axis arg. - out = onp.linspace(start, stop, num, endpoint, retstep, dtype) - if retstep: - return moveaxis(asarray(out[0]), 0, axis), out[1] - else: - return moveaxis(asarray(out), 0, axis) + dtype = dtype or onp.result_type(start, stop, float(num)) + bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop))) + broadcast_start = broadcast_to(start, bounds_shape) + axis = len(bounds_shape) + axis + 1 if axis < 0 else axis + bounds_shape.insert(axis, 1) + iota_shape = [1,] * len(bounds_shape) + iota_shape[axis] = num + if endpoint: + delta = (stop - start) / (num - 1) + else: + delta = (stop - start) / num + out = (reshape(broadcast_start, bounds_shape) + + reshape(lax.iota(dtype, num), iota_shape) * + reshape(delta, bounds_shape)) + if retstep: + return lax.convert_element_type(out, dtype), delta + else: + return lax.convert_element_type(out, dtype) + + +@_wraps(onp.logspace) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): + """Implementation of logspace differentiable in start and stop args.""" + lin = linspace(start, stop, num, + endpoint=endpoint, retstep=False, dtype=None, axis=axis) + if dtype is None: + return power(base, lin) + else: + return lax.convert_element_type(power(base, lin), dtype) + + +@_wraps(onp.geomspace) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """Implementation of geomspace differentiable in start and stop args.""" + dtype = dtype or onp.result_type(start, stop, float(num), + zeros((), dtype)) + # follow the numpy geomspace convention for negative and complex endpoints + signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2 + res = signflip * logspace(log10(signflip * start), + log10(signflip * stop), num, + endpoint=endpoint, base=10.0, + dtype=dtype, axis=0) + if axis != 0: + res = moveaxis(res, 0, axis) + return lax.convert_element_type(res, dtype) -logspace = _wrap_numpy_nullary_function(onp.logspace) -geomspace = _wrap_numpy_nullary_function(onp.geomspace) @_wraps(onp.meshgrid) def meshgrid(*args, **kwargs):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -2034,6 +2034,167 @@ def testMeshGrid(self, shapes, dtype, indexing, sparse, rng_factory): lnp_fun = partial(lnp.meshgrid, indexing=indexing, sparse=sparse) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + def assertDowncastDtypeEqual(self, x, y): + """Heuristic for comparing numpy and jax downcast dtypes.""" + x_dt = jtu._dtype(x) + y_dt = jtu._dtype(y) + testing_tpu = jtu.device_under_test().startswith("tpu") + testing_x32 = not jax.config.read('jax_enable_x64') + to32dtype = {onp.int64: onp.int32, onp.uint64: onp.uint32, + onp.float64: onp.float32, onp.float128: onp.float32, + onp.complex128: onp.complex64, onp.complex256: onp.complex64} + to32dtype = {onp.dtype(k): onp.dtype(v) for k,v in to32dtype.items()} + if testing_tpu or testing_x32: + x_dt = to32dtype.get(x_dt, x_dt) + y_dt = to32dtype.get(y_dt, y_dt) + assert x_dt == y_dt, "truncated dtypes %s != %s" % (x_dt, y_dt) + + @parameterized.named_parameters( + jtu.cases_from_list( + {"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}" + "_retstep={}_dtype={}").format( + start_shape, stop_shape, num, endpoint, retstep, dtype), + "start_shape": start_shape, "stop_shape": stop_shape, + "num": num, "endpoint": endpoint, "retstep": retstep, + "dtype": dtype, "rng_factory": rng_factory} + for start_shape in [(), (2,), (2, 2)] + for stop_shape in [(), (2,), (2, 2)] + for num in [5, 20] + for endpoint in [True, False] + for retstep in [True, False] + for dtype in number_dtypes + [None,] + for rng_factory in [jtu.rand_default])) + def testLinspace(self, start_shape, stop_shape, num, endpoint, + retstep, dtype, rng_factory): + rng = rng_factory() + # relax default tolerances slightly + tol = tolerance(dtype if dtype else onp.float32) * 10 + args_maker = self._GetArgsMaker(rng, + [start_shape, stop_shape], + [dtype, dtype]) + start, stop = args_maker() + ndim = len(onp.shape(start + stop)) + for axis in range(-ndim, ndim): + lnp_op = lambda start, stop: lnp.linspace( + start, stop, num, + endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis) + onp_op = lambda start, stop: onp.linspace( + start, stop, num, + endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, + check_dtypes=False, tol=tol) + # Check dtype equivalence within expected 32bit downcasting. + a, b = lnp_op(start, stop), onp_op(start, stop) + if retstep: + self.assertDowncastDtypeEqual(a[0], b[0]) + self.assertDowncastDtypeEqual(a[1], b[1]) + else: + self.assertDowncastDtypeEqual(a, b) + # floating-point compute between jitted platforms and non-jit + rounding + # cause unavoidable variation in integer truncation for some inputs. + if dtype in (inexact_dtypes + [None,]): + self._CompileAndCheck(lnp_op, args_maker, + check_dtypes=False, atol=tol, rtol=tol) + + @parameterized.named_parameters( + jtu.cases_from_list( + {"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}" + "_base={}_dtype={}").format( + start_shape, stop_shape, num, endpoint, base, dtype), + "start_shape": start_shape, + "stop_shape": stop_shape, + "num": num, "endpoint": endpoint, "base": base, + "dtype": dtype, "rng_factory": rng_factory} + for start_shape in [(), (2,), (2, 2)] + for stop_shape in [(), (2,), (2, 2)] + for num in [5, 20] + for endpoint in [True, False] + for base in [10.0, 2, onp.e] + for dtype in number_dtypes + [None,] + for rng_factory in [jtu.rand_default])) + def testLogspace(self, start_shape, stop_shape, num, + endpoint, base, dtype, rng_factory): + if (dtype in int_dtypes and + jtu.device_under_test() == "gpu" and + not FLAGS.jax_enable_x64): + raise unittest.SkipTest("GPUx32 truncated exponentiation" + " doesn't exactly match other platforms.") + rng = rng_factory() + # relax default tolerances slightly + tol = tolerance(dtype if dtype else onp.float32) * 10 + args_maker = self._GetArgsMaker(rng, + [start_shape, stop_shape], + [dtype, dtype]) + start, stop = args_maker() + ndim = len(onp.shape(start + stop)) + for axis in range(-ndim, ndim): + lnp_op = lambda start, stop: lnp.logspace( + start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis) + onp_op = lambda start, stop: onp.logspace( + start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, + check_dtypes=False, tol=tol) + # Check dtype equivalence within expected 32bit downcasting. + a, b = lnp_op(start, stop), onp_op(start, stop) + self.assertDowncastDtypeEqual(a, b) + if dtype in (inexact_dtypes + [None,]): + # Why do compiled and op-by-op float16 np.power numbers differ + # slightly more than expected? + atol = tol if dtype != onp.float16 else 10 * tol + self._CompileAndCheck(lnp_op, args_maker, + check_dtypes=False, atol=atol, rtol=tol) + + @parameterized.named_parameters( + jtu.cases_from_list( + {"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}" + "_dtype={}").format( + start_shape, stop_shape, num, endpoint, dtype), + "start_shape": start_shape, + "stop_shape": stop_shape, + "num": num, "endpoint": endpoint, + "dtype": dtype, "rng_factory": rng_factory} + for start_shape in [(), (2,), (2, 2)] + for stop_shape in [(), (2,), (2, 2)] + for num in [5, 20] + for endpoint in [True, False] + # NB: numpy's geomspace gives nonsense results on integer types + for dtype in inexact_dtypes + [None,] + for rng_factory in [jtu.rand_default])) + def testGeomspace(self, start_shape, stop_shape, num, + endpoint, dtype, rng_factory): + rng = rng_factory() + # relax default tolerances slightly + tol = tolerance(dtype if dtype else onp.float32) * 10 + def args_maker(): + """Test the set of inputs onp.geomspace is well-defined on.""" + start, stop = self._GetArgsMaker(rng, + [start_shape, stop_shape], + [dtype, dtype])() + # onp.geomspace can't handle differently ranked tensors + # w. negative numbers! + start, stop = lnp.broadcast_arrays(start, stop) + if dtype in complex_dtypes: + return start, stop + # to avoid NaNs, non-complex start and stop cannot + # differ in sign, elementwise + start = start * lnp.sign(start) * lnp.sign(stop) + return start, stop + start, stop = args_maker() + ndim = len(onp.shape(start + stop)) + for axis in range(-ndim, ndim): + lnp_op = lambda start, stop: lnp.geomspace( + start, stop, num, endpoint=endpoint, dtype=dtype, axis=axis) + onp_op = lambda start, stop: onp.geomspace( + start, stop, num, endpoint=endpoint, dtype=dtype, axis=axis) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, + check_dtypes=False, tol=tol) + # Check dtype equivalence within expected 32bit downcasting. + a, b = lnp_op(start, stop), onp_op(start, stop) + self.assertDowncastDtypeEqual(a, b) + if dtype in (inexact_dtypes + [None,]): + self._CompileAndCheck(lnp_op, args_maker, + check_dtypes=False, atol=tol, rtol=tol) + def testDisableNumpyRankPromotionBroadcasting(self): try: prev_flag = FLAGS.jax_numpy_rank_promotion
Cannot take gradient of function using linspace I've found I will get an exception "Tracer can't be used with raw numpy function" when I use linspace where one of the arguments to linspace is a parameter which needs to be differentiated with respect to. Here is a MWE that shows the problem ``` import jax.numpy as jnp import jax def testlinspace(x, *args): #does not work out = jnp.linspace(x[0],x[1],50) #linspace depends on parameters out = x[2]*jnp.square(out) out = jnp.sum(out) return out def testlinspace2(x, *args): #works out = jnp.linspace(1.,50.,50) #linspace is just some array of numbers out = x[2]*jnp.square(out)+x[0]*x[1] out = jnp.sum(out) return out x = [2.,3.,1.5] testobj = testlinspace(x) testgrad = jax.grad(testlinspace) testobj2 = testlinspace2(x) testgrad2 = jax.grad(testlinspace2) print(testgrad(x)) #throws exception, note gradient using finite differences is DeviceArray([174.48979293, 200.51020897, 316.83673001]) print(testgrad2(x)) #works, gives DeviceArray([ 149.99968698, 99.99930626, 42924.99943404]) ``` So the problem is you can't do something like jax.numpy.linspace(x[0],10,50) because the linspace is depending on the parameters 'x' being reverse-mode differentiated.
Yeah, we seem to just be wrapping the original numpy implementation. The below is a draft of a native differentiable form that mirrors the numpy api close to machine precision for an extensive set of all the linspace arguments that I could think to test. ```python import numpy as onp from jax import lax import jax.numpy as np def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): """Implementation of linspace differentiable w.r.t. start and stop args.""" lax._check_user_dtype_supported(dtype, "linspace") dtype = onp.float32 if dtype is None else dtype bounds_shape = list(lax.broadcast_shapes(onp.shape(start), onp.shape(stop))) axis = len(bounds_shape) if axis == -1 else axis bounds_shape.insert(axis, 1) iota_shape = [1,] * len(bounds_shape) iota_shape[axis] = num delta = (stop - start) / num if endpoint: delta *= num / (num - 1) out = (np.reshape(start, bounds_shape) + np.reshape(lax.iota(dtype, num), iota_shape) * np.reshape(delta, bounds_shape)) if retstep: return np.array(out, dtype=dtype), delta else: return np.array(out, dtype=dtype) ``` you can see it working with your gradient test in the public colab notebook: https://colab.research.google.com/drive/1bdyUypR-S2stCe2H_LyrEBB4sSFZ9ssL If this looks good to others we could upstream it to replace the current wrapper implementation. I can't see the notebook My apologies, this should be a public link (edited above as well): https://colab.research.google.com/drive/1bdyUypR-S2stCe2H_LyrEBB4sSFZ9ssL Sorry it took me awhile to reply. I looked at your solution and verified it worked for my MWE. I tested this for a more complex example as well and it also worked for me there. So in my opinion this should replace the existing jax linspace. Please let me know if you want me to close this issue.
2019-11-10T07:19:33
google/jax
1,664
google__jax-1664
[ "810" ]
b1f59228c3ef64b4a4a0de5526a996ec01968c33
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -617,34 +617,81 @@ def _scan_partial_eval(trace, *tracers, **kwargs): carry_uk = _map(operator.or_, carry_uk, carry_uk_out) else: assert False, "Fixpoint not reached" + num_res = len(jaxpr_1.out_avals) - len(jaxpr_2.out_avals) + + # The residuals are treated as extensive outputs of jaxpr_1 (and extensive + # inputs to jaxpr_2), but residuals that are loop-invariant can be hoisted. + # TODO(mattjj): hoist other loop-invariant values here too (instantiate=False) + invariant_pvals = [pe.PartialVal((None, core.unit if uk else t.pval[1])) + for uk, t in zip(unknowns[:num_consts], tracers[:num_consts])] + other_pvals = [pe.PartialVal((a, core.unit)) for a in jaxpr_1.in_avals[num_consts:]] + in_pvals_1 = invariant_pvals + other_pvals + untyped_jaxpr_1, out_pvals_1, consts_1 = pe.trace_to_jaxpr( + lu.wrap_init(core.jaxpr_as_fun(jaxpr_1)), in_pvals_1, + instantiate=[True] * (num_carry + num_ys) + [False] * num_res) + const_avals_1 = [raise_to_shaped(core.get_aval(c)) for c in consts_1] + in_avals_1 = [core.abstract_unit] * num_consts + jaxpr_1.in_avals[num_consts:] + out_avals_1 = [core.abstract_unit if pv is None else pv for pv, c in out_pvals_1] + jaxpr_1_opt = pe.TypedJaxpr(pe.closure_convert_jaxpr(untyped_jaxpr_1), + (), const_avals_1 + in_avals_1, out_avals_1) + num_consts_1 = num_consts + len(consts_1) + # any now-known residuals are intensive, so we want to revise jaxpr_2 to take + # those inputs as constants rather than as extensive inputs + _, _, res_pvals = split_list(out_pvals_1, [num_carry, num_ys]) + intensive_residuals = [const for pv, const in res_pvals if pv is None] + move = [False] * len(jaxpr_1.in_avals) + [pv is None for pv, _ in res_pvals] + jaxpr_2_opt = _move_binders_to_front(jaxpr_2, move) + num_consts_2 = num_consts + len(intensive_residuals) + + in_consts = (list(consts_1) + [core.unit] * num_consts + + [core.unit if uk else t.pval[1] + for uk, t in zip(unknowns[num_consts:], tracers[num_consts:])]) + linear_1 = ([False] * len(consts_1) + [True] * num_consts + + [lin or uk for uk, lin in zip(unknowns[num_consts:], linear[num_consts:])]) + out_flat = scan_p.bind( + *in_consts, forward=forward, length=length, jaxpr=jaxpr_1_opt, + num_consts=num_consts_1, num_carry=num_carry, linear=linear_1) + out_carry, ys, res_and_units = split_list(out_flat, [num_carry, num_ys]) + extensive_residuals = [r for r, (pv, _) in zip(res_and_units, res_pvals) if pv is not None] - in_consts = [core.unit if uk else t.pval[1] for uk, t in zip(unknowns, tracers)] new_tracers = [trace.instantiate_const(t) if uk else trace.new_instantiated_literal(core.unit) for uk, t in zip(unknowns, tracers)] - carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry]) ys_avals = _map(partial(_promote_aval_rank, length), y_avals) out_avals = carry_avals + ys_avals out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uk)] - linear_1 = [lin or uk for uk, lin in zip(unknowns, linear)] - out_flat = scan_p.bind( - *in_consts, forward=forward, length=length, jaxpr=jaxpr_1, - num_consts=num_consts, num_carry=num_carry, linear=linear_1) - out_carry, ys, residuals = split_list(out_flat, [num_carry, num_ys]) out_consts = out_carry + ys - residual_tracers = _map(trace.new_instantiated_const, residuals) + int_res_tracers = _map(trace.new_instantiated_const, intensive_residuals) + ext_res_tracers = _map(trace.new_instantiated_const, extensive_residuals) out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None) for pv, const in zip(out_pvs, out_consts)] - linear_2 = ([lin or not uk for uk, lin in zip(unknowns, linear)] - + [False] * len(residual_tracers)) - eqn = pe.new_jaxpr_eqn(new_tracers + residual_tracers, out_tracers, scan_p, - (), dict(forward=forward, length=length, jaxpr=jaxpr_2, - num_consts=num_consts, num_carry=num_carry, - linear=linear_2)) + linear_2 = ([False] * len(int_res_tracers) + + [lin or not uk for uk, lin in zip(unknowns, linear)] + + [False] * len(ext_res_tracers)) + eqn = pe.new_jaxpr_eqn(int_res_tracers + new_tracers + ext_res_tracers, + out_tracers, scan_p, (), + dict(forward=forward, length=length, jaxpr=jaxpr_2_opt, + num_consts=num_consts_2, + num_carry=num_carry, linear=linear_2)) for t in out_tracers: t.recipe = eqn return out_tracers +def _move_binders_to_front(typed_jaxpr, to_move): + assert not typed_jaxpr.jaxpr.constvars and not typed_jaxpr.jaxpr.freevars + assert len(typed_jaxpr.in_avals) == len(to_move) + new_invars = _move_to_front(typed_jaxpr.jaxpr.invars, to_move) + new_jaxpr = core.Jaxpr((), (), new_invars, typed_jaxpr.jaxpr.outvars, + typed_jaxpr.jaxpr.eqns) + new_in_avals = _move_to_front(typed_jaxpr.in_avals, to_move) + new_typed_jaxpr = core.TypedJaxpr(new_jaxpr, typed_jaxpr.literals, + new_in_avals, typed_jaxpr.out_avals) + return new_typed_jaxpr + +def _move_to_front(lst, to_move): + return ([elt for elt, move in zip(lst, to_move) if move] + + [elt for elt, move in zip(lst, to_move) if not move]) + def _promote_aval_rank(sz, aval): if aval is core.abstract_unit: return core.abstract_unit @@ -655,54 +702,66 @@ def _scan_transpose(cts, *args, **kwargs): forward, length, num_consts, num_carry, jaxpr, linear = split_dict( kwargs, ["forward", "length", "num_consts", "num_carry", "jaxpr", "linear"]) - # we can only transpose scans for which the nonlinear values appear in xs + # we've only implemented transposing scans with specific lin/nonlin patterns consts_lin, init_lin, xs_lin = split_list(linear, [num_consts, num_carry]) - num_lin = sum(xs_lin) - if not all(consts_lin) or not all(init_lin) or not all(xs_lin[:num_lin]): + num_ires = len(consts_lin) - sum(consts_lin) + num_eres = len(xs_lin) - sum(xs_lin) + if consts_lin != [False] * num_ires + [True] * (len(consts_lin) - num_ires): + raise NotImplementedError + if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres: + raise NotImplementedError + if not all(init_lin): raise NotImplementedError - consts, init, xs, res = split_list(args, [num_consts, num_carry, num_lin]) - assert not any(r is ad.undefined_primal for r in res) + consts, init, xs = split_list(args, [num_consts, num_carry]) + ires, consts = split_list(consts, [num_ires]) + xs, eres = split_list(xs, [sum(xs_lin)]) + assert not any(r is ad.undefined_primal for r in ires) + assert not any(r is ad.undefined_primal for r in eres) carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry]) ys_avals = _map(partial(_promote_aval_rank, length), y_avals) ct_carry, ct_ys = split_list(cts, [num_carry]) ct_carry = _map(ad.instantiate_zeros_aval, carry_avals, ct_carry) ct_ys = _map(ad.instantiate_zeros_aval, ys_avals, ct_ys) - ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[:num_consts]) + ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[num_ires:num_consts]) - # jaxpr :: [T d] -> [T c] -> [T a, res] -> ([T c], [T b]) - # jaxpr_trans :: [] -> [CT d, CT c] -> [CT b, res] -> ([CT d, CT c], [CT a]) - jaxpr_trans = _transpose_jaxpr(num_consts, len(res), jaxpr) - linear_trans = ([True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) - + [False] * len(res)) + # jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b]) + # jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a]) + jaxpr_trans = _transpose_jaxpr(num_ires, num_consts - num_ires, num_eres, jaxpr) + linear_trans = ([False] * num_ires + + [True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) + + [False] * num_eres) outs = scan_p.bind( - *(ct_consts + ct_carry + ct_ys + res), forward=not forward, length=length, - jaxpr=jaxpr_trans, num_consts=0, num_carry=num_consts+num_carry, - linear=linear_trans) - ct_consts, ct_init, ct_xs = split_list(outs, [num_consts, num_carry]) - return ct_consts + ct_init + ct_xs + [None] * len(res) - -# transpose_jaxpr :: ([c, a, res] -> b) -> ([CT c, CT b, res] -> [CT c, CT a] -def _transpose_jaxpr(num_c, num_res, jaxpr): - num_a = len(jaxpr.in_avals) - num_c - num_res - c_avals, a_avals, res_avals = split_list(jaxpr.in_avals, [num_c, num_a]) + *(ires + ct_consts + ct_carry + ct_ys + eres), forward=not forward, + length=length, jaxpr=jaxpr_trans, num_consts=num_ires, + num_carry=num_consts-num_ires+num_carry, linear=linear_trans) + ct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry]) + return [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres + +# transpose_jaxpr :: ([res1, c, a, res2] -> b) +# -> ([res1, CT c, CT b, res2] -> [CT c, CT a]) +def _transpose_jaxpr(num_res1, num_c, num_res2, jaxpr): + num_a = len(jaxpr.in_avals) - num_res1 - num_c - num_res2 + res1_avals, c_avals, a_avals, res2_avals = split_list( + jaxpr.in_avals, [num_res1, num_c, num_a]) num_b = len(jaxpr.out_avals) b_avals = list(jaxpr.out_avals) @lu.wrap_init - def transposed(*cbar_bbar_res): - c_bar, b_bar, res = split_list(cbar_bbar_res, [num_c, num_b]) - primals = [ad.undefined_primal] * (num_c + num_a) + res + def transposed(*res1_cbar_bbar_res2): + res1, c_bar, b_bar, res2 = split_list( + res1_cbar_bbar_res2, [num_res1, num_c, num_b]) + primals = res1 + [ad.undefined_primal] * (num_c + num_a) + res2 _, cbar_abar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, (), primals, b_bar) - new_c_bar, a_bar, _ = split_list(cbar_abar, [num_c, num_a]) + _, new_c_bar, a_bar, _ = split_list(cbar_abar, [num_res1, num_c, num_a]) a_bar = _map(ad.instantiate_zeros_aval, a_avals, a_bar) c_bar = _map(ad.instantiate_zeros_aval, c_avals, _map(ad.add_tangents, c_bar, new_c_bar)) return c_bar + a_bar - return _make_typed_jaxpr(transposed, c_avals + b_avals + res_avals) + return _make_typed_jaxpr(transposed, res1_avals + c_avals + b_avals + res2_avals) def _make_typed_jaxpr(traceable, in_avals): pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -1365,6 +1365,20 @@ def bad_matvec_usage(a): with self.assertRaisesRegex(ValueError, re.escape("matvec() output shapes")): api.jvp(bad_matvec_usage, (1.0,), (1.0,)) + def testIssue810(self): + def loss(A): + def step(x, i): + return np.matmul(A, x), None + init_x = np.zeros(A.shape[-1:]) + last_x, _ = lax.scan(step, init_x, np.arange(10)) + return np.sum(last_x) + + A = np.zeros((3, 3)) + # The second DUS was unnecessarily replicating A across time. + # We check XLA because _scan_impl is "underneath" the jaxpr language. + s = str(api.xla_computation(api.grad(loss))(A).GetHloText()) + assert s.count("dynamic-update-slice(") < 2 + if __name__ == '__main__': absltest.main()
`lax.scan` is ~6x slower to run than hand-written loops This is on a GPU backend, haven't tried on others. https://colab.research.google.com/drive/1N1jrvGNFRhTnYLOCL6vXUzJN3pRR3pdr Minimal repro below ``` from jax import numpy as np from jax import grad, jit, vmap, lax from jax import random as jax_random import numpy as onp @jit def rewards_to_go(rewards, mask, gamma=0.99): r"""Computes rewards to go. Args: rewards: np.ndarray of shape (B, T) of rewards. mask: np.ndarray of shape (B, T) of mask for the rewards. gamma: float, discount factor. Returns: rewards to go, np.ndarray of shape (B, T). """ B, T = rewards.shape # pylint: disable=invalid-name,unused-variable masked_rewards = rewards * mask # (B, T) # Compute r2g_{T-1} at the start and then compute backwards in time. r2gs = [masked_rewards[:, -1]] # Go from T-2 down to 0. for t in reversed(range(T - 1)): r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1])) # The list should have length T. assert T == len(r2gs) # First we stack them in the correct way to make it (B, T), but these are # still from newest (T-1) to oldest (0), so then we flip it on time axis. return np.flip(np.stack(r2gs, axis=1), axis=1) @jit def scan_rewards_to_go(rewards, mask, gamma=0.99): masked_rewards = rewards * mask # (B, T) reversed_rewards = np.flip(masked_rewards, axis=1) # (B, T) flipped on time. rrt = np.transpose(reversed_rewards) # (T, B) transpose to scan over time. def discounting_add(carry, reward): x = reward + (gamma * carry) return x, x _, ys = lax.scan(discounting_add, np.zeros_like(rrt[0], dtype=np.float32), rrt.astype(np.float32)) # ys is (T, B) and T is in reverse order. return np.flip(np.transpose(ys), axis=1) B, T = 16, 128 num_examples = 100 rewards = [] pvs = [] mask = [] for _ in range(num_examples): rewards.append(onp.random.randn(B, T)) pvs.append(onp.random.randn(B, T+1)) ones = onp.full((B, T), 1, dtype=onp.int32) for one in ones: l = onp.random.randint(0, T) one[range(l,T)] = 0 mask.append(ones) ``` Now time the invocations: ``` %timeit [rewards_to_go(rewards[i], mask[i]) for i in range(num_examples)] ``` and ``` %timeit [scan_rewards_to_go(rewards[i], mask[i]) for i in range(num_examples)] ```
I have a related scan file here: https://gist.github.com/cshesse/73f27d42c356e6e763275432422dacbe On CPU: ``` reward_to_go_jax 0.028926010942086577 reward_to_go_scipy 0.001616921043023467 scipy speedup 17.889563047554923 ``` In the CPU case, for large values of N, the performance gets much closer to scipy lfilter. On GPU: ``` reward_to_go_jax 5.656240948010236 reward_to_go_scipy 0.001609566854313016 scipy speedup 3514.1385602304745 ``` `scipy.signal.lfilter` may not be a useful benchmark to compare against, but the jax version seems to get significantly slower on GPU. This is using `git+https://github.com/google/jax.git@11c512a194add25ade20dbfacf635d6e0834eba3` and `https://storage.googleapis.com/jax-wheels/cuda92/jaxlib-0.1.15-cp37-none-linux_x86_64.whl` @jlebar Any gut reactions here? I've been meaning to dump this JAX code (both in the OP and in @cshesse's gist) into HLO text for easier repros, but I haven't gotten to it yet. Let me know if you can take a look without that step :) Wild guess: could these slowdowns be related to the TupleThunk issue you just fixed? It looks like this benchmark is including compilation time? > Wild guess: could these slowdowns be related to the TupleThunk issue you just fixed? Meaning, the TupleThunk change makes things slower? That would be surprising. Possible, I suppose, but I don't immediately see how. Other than the benchmarking methodology question I don't have an immediate guess here. I don't know enough about the difference in HLO implementation of the two functions to have any ideas, without actually looking at the HLO. I'm always trying to think bigger picture, though. We're getting a fair number of these performance bugs, which is genuinely great. And the bugs are also fairly high quality. But how do we streamline the process of receiving them, triaging them, and acting upon them, for you and for us? I'm wondering if e.g. we could have a script/document that people can follow when they're reporting a performance bug, which will capture the textual HLO and xla_hlo_profile? Ideally it could capture some sort of execution trace too, but I'm not sure how much of that is OSS at the moment. Thinking even bigger, if JAX had some sort of benchmark framework then it could take care of ensuring that we ignore the compilation time, in addition to doing all of the things above for bug reports. > Meaning, the TupleThunk change makes things slower? That would be surprising. Possible, I suppose, but I don't immediately see how. Oh, sorry, I meant the opposite but my wording was bad: these numbers are all from before the TupleThunk fix, so I was guessing that your TupleThunk fix could have improved things. Great thoughts on the big picture. We should at least have a doc, and I bet we can make a script, including maybe an internal one that collects a trace (which we can run ourselves on small repros). Maybe we (on the JAX side) can take this opportunity to start developing those things! Thanks for weighing in, even with very little info from our side here. In my script, I call the function once during setup, is that not enough to avoid capturing compilation time? For a benchmarking script, maybe a `jax.experimental.benchmark(fn)` vaguely like go bench: ```python import time import jaxlib first_run = True def benchmark(fn, duration_sec=10): global first_run if first_run: print("jax version", jax.__version__) print("jaxlib version", jaxlib.__version__) first_run = False fn(1) n = 1 while True: start = time.time() fn(n) elapsed = time.time() - start if elapsed > duration_sec: break n *= 10 print("%s %d calls %f us/call" % (fn.__name__, n, elapsed / n * 1e6)) def benchmark_reward_to_go_scipy(n): for _ in range(n): reward_to_go_scipy(rews, 0.99) def benchmark_reward_to_go_jax(n): for _ in range(n): reward_to_go_jax(jrews, 0.99).block_until_ready() benchmark(benchmark_reward_to_go_scipy) benchmark(benchmark_reward_to_go_jax) ``` > In my script, I call the function once during setup, is that not enough to avoid capturing compilation time? Should be, yes. On Thu, Jun 6, 2019 at 9:30 AM cshesse <[email protected]> wrote: > In my script, I call the function once during setup, is that not enough to > avoid capturing compilation time? > > For a benchmarking script, maybe a jax.experimental.benchmark(fn) vaguely > like go bench setup: > > import time > import jaxlib > > first_run = True > > > def benchmark(fn, duration_sec=10): > global first_run > if first_run: > print("jax version", jax.__version__) > print("jaxlib version", jaxlib.__version__) > first_run = False > fn(1) > n = 1 > while True: > start = time.time() > fn(n) > elapsed = time.time() - start > if elapsed > duration_sec: > break > n *= 10 > print("%s %d calls %f us/call" % (fn.__name__, n, elapsed / n * 1e6)) > > > def benchmark_reward_to_go_scipy(n): > for _ in range(n): > reward_to_go_scipy(rews, 0.99) > > > def benchmark_reward_to_go_jax(n): > for _ in range(n): > reward_to_go_jax(jrews, 0.99) > > > benchmark(benchmark_reward_to_go_scipy) > benchmark(benchmark_reward_to_go_jax) > > — > You are receiving this because you were mentioned. > Reply to this email directly, view it on GitHub > <https://github.com/google/jax/issues/810?email_source=notifications&email_token=AABEZB5LLNWV7NJYD55MXCDPZE3QVA5CNFSM4HSJ4MHKYY3PNVWWK3TUL52HS4DFVREXG43VMVBW63LNMVXHJKTDN5WW2ZLOORPWSZGODXDNLWY#issuecomment-499570139>, > or mute the thread > <https://github.com/notifications/unsubscribe-auth/AABEZBYK2I4BXXAUVG7CF7DPZE3QVANCNFSM4HSJ4MHA> > . > The main problem with the example in the first comment is that there is almost no useful work in the loop (a elementwise multiply and an add of `f32[16]` vector). You won't get good GPU utilization from this no matter what we do; CPU will be faster. The loop benchmark (using `scan`) is mostly measuring loop overheads; we could probably lower them but I am wondering if this code is actually representative of something you want to do. Put another way: even if the `scan` variant is 6x slower, does it actually matter? @hawkinsp I've ran into this with bigger workloads. Specifically, a scan implementation lead to about a ~4x slow down, ~6x larger GPU memory usage, ~4-6x higher transfer rates as reported by `nvidia-smi` than an unrolled implementation. I'm not so certain that this is just an issue of higher overhead. For a bit of context, the workload at each iterations was: 1. `extract_image_patch(Nx28x28x2)` 2. `tensordot(Nx28x28x9, Nx28x28x9x10 -> Nx28x28x10)` 3. `max(Nx28x28x10, axis=-1)` with batch size `N=10`. While the scan version compiles very quickly, the slow down makes it completely impractical. This was on an Nvidia 2080 Ti. Additional observations: for the workload I describe earlier, if I run 25 unrolled steps per scan iteration, I still see a drastic increase in GPU usage, memory usage and CPU-GPU communication. Everything I've observed seems to hint at some memory reuse issue or some arrays placed on the wrong device issue. This issue is a big blocker for me. If there is anything I can do to help fix it, please let me know. Note that while the reported GPU usage increases (from ~60% to 99%) when going from unrolled to scan, the actual performance is much much worse. @gehring Would it be possible to get us a self-contained example that demonstrates your slowdown? There are a few known drawbacks of XLA:GPU loops, but `scan` is typically supposed to be able to avoid the worst of it (raw `while_loop` loops have to bring their condition boolean back to the host at each step, while `scan` always has a fixed trip count). @jekbradbury I'll try to strap a standalone example by the end of the week. In the meantime, if it makes any difference, I realize I forgot to mention that the workload I described is part of a SGD (pure python) loop, doing a forward and backwards pass in each jitted call. @jekbradbury It ended up being much easier to do than I thought. The code where I first encountered this is somewhat complex but I was able to reproduce it with just a scan looping `amat @ x + bvec`. I saved it in a [notebook](https://colab.research.google.com/gist/gehring/bee2f5bd37c108c7620404e2c47652ed/jax-scan-issue.ipynb) for convenience. On Google's public colab (GPU runtime), I get the following results: ``` Time taken unrolled: 12.955157 sec Time taken scan: 197.164844 sec ``` I tried this example on a different workstation (with an old Quadro K2200) and observed pretty much the same symptoms as described earlier (using `nvidia-smi`). Thanks! This looks like a performance issue with JVP and possibly transpose of scan, which emit `dynamic-slice` and `dynamic-update-slice` operations (respectively) that appear to be the bottleneck here (and seemingly not necessary, since they don't show up in the unrolled version). Going to look into it, but I don't know this part of the code very well. @jekbradbury Any progress or updates on this? No pressure, I'm just trying to get an idea as to whether a fix is in the works or if I should commit to a workaround. I had a partial solution in the linked PR, but we ended up deciding against that approach and sketched out a more direct (and complete) solution two weeks ago. I haven’t implemented it yet, though it’s been on my todos; I expect I’ll have a new PR sometime in the next week or so.
2019-11-11T20:44:19
google/jax
1,694
google__jax-1694
[ "1688" ]
e95098de546ad53a1a0153e047bbb1c5899bf261
diff --git a/jax/interpreters/pxla.py b/jax/interpreters/pxla.py --- a/jax/interpreters/pxla.py +++ b/jax/interpreters/pxla.py @@ -16,7 +16,7 @@ from __future__ import division from __future__ import print_function -from collections import namedtuple +from collections import namedtuple, defaultdict from contextlib import contextmanager import itertools as it import operator as op @@ -49,16 +49,16 @@ def identity(x): return x def shard_args(backend, devices, assignments, axis_size, tuple_args, args): - """Shard an argument data array arg along its leading axis. + """Shard each argument data array along its leading axis. Args: - devices: list of Devices of length num_replicas mapping a logical replica - index to a physical device. - assignments: replica to shard assignment. - axis_size: int, size of the axis to be sharded. + backend: the platform to be used + devices: list of Devices mapping replica index to a physical device. + assignments: list of integers with the same length as `devices` mapping + replica index to an index along the leading axis (i.e. a shard). + axis_size: int, size of the leading axis to be sharded. args: a sequence of JaxTypes representing arguments to be sharded along - their leading axes (or the leading axess of their leaves in the tuple - case) and placed on `devices`. + their leading axes and placed on `devices`. Returns: A list of device buffers with the same length as `devices` indexed by @@ -72,12 +72,28 @@ def shard_args(backend, devices, assignments, axis_size, tuple_args, args): # inline handling for ShardedDeviceArray as a special case for performance if type(arg) is ShardedDeviceArray: if nrep == len(arg.device_buffers): + # The argument is already prepared for the right number of replicas, so + # we just ensure that buf[r] is on devices[r] for each replica index r + # TODO(mattjj): compared to the other case, this logic has less looping + # but could incur more device-to-device data movement for r, buf in enumerate(arg.device_buffers): - buffers[r][a] = (buf if buf.device() == devices[r] - else buf.copy_to_device(devices[r])) + buffers[r][a] = buf if buf.device() == devices[r] else buf.copy_to_device(devices[r]) else: + # The argument is prepared for a different number of replicas, so for + # each of our replica indices we check if there's already a buffer with + # the correct logical assignment on the correct device, and if not just + # copy one of them + prev_assignments = assign_shards_to_replicas(len(arg.device_buffers), axis_size) + candidates = defaultdict(list) for r, buf in enumerate(arg.device_buffers): - buffers[r][a] = xla.device_put(x[assignments[r]], devices[r], backend=backend) + candidates[prev_assignments[r]].append(buf) + for r in range(nrep): + for buf in candidates[assignments[r]]: + if buf.device() == devices[r]: + buffers[r][a] = buf + break + else: + buffers[r][a] = buf.copy_to_device(devices[r]) else: bufs = shard_arg_handlers[type(arg)](arg, devices, assignments, backend=backend) for r, buf in enumerate(bufs): @@ -89,6 +105,7 @@ def shard_args(backend, devices, assignments, axis_size, tuple_args, args): return buffers + shard_arg_handlers = {} shard_arg_handlers[core.Unit] = \ lambda x, devices, _, backend=None: [
Undefined variable x I'm hitting this line in `pxla.py` which doesn't seem to be tested right now: https://github.com/google/jax/blob/505b47c1fcef45534aa0cc90509ce146f9939c15/jax/interpreters/pxla.py#L80 The variable `x` isn't defined anywhere.
Gooooood catch! I think I know how to write a repro, but if you've got one please share it. My code is too complicated unfortunately :( I'm not sure how to make a minimal repro so if you can think of a way I would say go ahead. Here's a repro when 8 XLA devices are available (like in our favorite colab notebooks): ```python from jax import pmap import numpy as onp a = pmap(lambda x: x)(onp.ones((2, 2))) b = pmap(pmap(lambda x: x))(a) ``` By the way, as a workaround you can always bounce to the host (calling `onp.asarray`) or to a single device (calling `device_put`). I'm working on a more efficient solution though. Let me know if you need better workarounds. Meetings! Okay, here's something you can paste into a colab cell to fix things: ```python from jax.interpreters import pxla, xla from jax.interpreters.pxla import ShardedDeviceArray, shard_arg_handlers, assign_shards_to_replicas def shard_args(backend, devices, assignments, axis_size, tuple_args, args): nargs, nrep = len(args), len(devices) buffers = [[None] * nargs for _ in range(nrep)] for a, arg in enumerate(args): # The shard_arg_handlers allow an extensible set of types to be sharded, but # inline handling for ShardedDeviceArray as a special case for performance if type(arg) is ShardedDeviceArray: if nrep == len(arg.device_buffers): # The argument is already prepared for the right number of replicas, so # we just ensure that buf[r] is on devices[r] for each replica index r for r, buf in enumerate(arg.device_buffers): buffers[r][a] = buf if buf.device() == devices[r] else buf.copy_to_device(devices[r]) else: # The argument is prepared for a different number of replicas, so for # each of our replica indices we check if there's already a buffer with # the correct logical assignment on the correct device, and if not just # copy one of them prev_assignments = assign_shards_to_replicas(len(arg.device_buffers), axis_size) for r in range(nrep): candidates = (buf for i, buf in enumerate(arg.device_buffers) if prev_assignments[i] == assignments[r]) for buf in candidates: if buf.device() == devices[r]: buffers[r][a] = buf break else: buffers[r][a] = buf.copy_to_device(devices[r]) else: bufs = shard_arg_handlers[type(arg)](arg, devices, assignments, backend=backend) for r, buf in enumerate(bufs): buffers[r][a] = buf if tuple_args: buffers = [[xla.make_tuple(bufs, devices[r], backend)] for r, bufs in enumerate(buffers)] return buffers pxla.shard_args = shard_args ``` I'll get this merged into master and google asap.
2019-11-15T00:25:44
google/jax
1,697
google__jax-1697
[ "1696" ]
be28700b8babb7e4951f21f7f0356864fcda64ea
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -388,6 +388,13 @@ def value_and_grad(fun, argnums=0, has_aux=False, holomorphic=False): @wraps(fun, docstr=docstr, argnums=argnums) def value_and_grad_f(*args, **kwargs): + max_argnum = argnums if type(argnums) is int else max(argnums) + if max_argnum >= len(args): + msg = ("differentiating with respect to argnums={} requires at least " + "{} positional arguments to be passed by the caller, but got only " + "{} positional arguments.") + raise TypeError(msg.format(argnums, max_argnum + 1, len(args))) + f = lu.wrap_init(fun, kwargs) f_partial, dyn_args = _argnums_partial(f, argnums, args) if not has_aux:
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1253,5 +1253,17 @@ def test_repr(self): rep = repr(np.ones(()) + 1.) self.assertStartsWith(rep, 'DeviceArray') + def test_grad_without_enough_args_error_message(self): + # https://github.com/google/jax/issues/1696 + def f(x, y): return x + y + df = api.grad(f, argnums=0) + self.assertRaisesRegexp( + TypeError, + "differentiating with respect to argnums=0 requires at least 1 " + "positional arguments to be passed by the caller, but got only 0 " + "positional arguments.", + lambda: partial(df, x=0.)(y=1.)) + + if __name__ == '__main__': absltest.main()
Possible issue with partial gradient application I'm trying to define the derivative of a function at a point, for then compiling it with jit, but either I'm not doing it in the right way (with functools.partial) or there is a problem somewhere. Define a simple function of x and y and its derivative with respect to x: ```python from jax import grad def f(x,y): return x+y df = grad(f, argnums=0) ``` This works: ```python from functools import partial partial(f, x = 0.)(y = 1) # 1.0 ``` This doesn't: ```python from functools import partial partial(df, x = 0.)(y = 1) # IndexError: tuple index out of range ``` why?
Thanks for the question! We should provide a better error message here, but essentially it's because we're trying to differentiate with respect to the first positional parameter (`argnums=0`) but we're not passing any positional arguments (both are passed as keywords). So this works: ```python partial(df, 0.)(y = 1) ``` You might think that we could identify keyword arguments with their positions, but unfortunately there's no robust way to do that in Python: it would require inspecting argument signatures, but signatures can be obscured by decorators and the like. The bottom line is that all `df` knows is that you're passing it two keyword arguments, yet its code expects there to be at least one positional argument when `argnums=0`, and `n+1` positional arguments when `argnums=n`. We should improve this error message to something that explains at least `n+1` positional arguments must be passed by the caller when `argnums=n`, like we do for `jit` with `static_argnums`.
2019-11-15T05:18:57
google/jax
1,698
google__jax-1698
[ "1686" ]
dbf41348a03eefa0bd3f9e7db9bd128de9cbb93c
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -112,6 +112,14 @@ def fori_loop(lower, upper, body_fun, init_val): Unlike that Python version, ``fori_loop`` is implemented in terms of a call to ``while_loop``. See the docstring for ``while_loop`` for more information. + Also unlike the Python analogue, the loop-carried value ``val`` must hold a + fixed shape and dtype across all iterations (and not just be consistent up to + NumPy rank/shape broadcasting and dtype promotion rules, for example). In + other words, the type ``a`` in the type signature above represents an array + with a fixed shape and dtype (or a nested tuple/list/dict container data + structure with a fixed structure and arrays with fixed shape and dtype at the + leaves). + Args: lower: an integer representing the loop index lower bound (inclusive) upper: an integer representing the loop index upper bound (exclusive) @@ -155,6 +163,14 @@ def while_loop(cond_fun, body_fun, init_val): for jit-compiled functions, since native Python loop constructs in an ``@jit`` function are unrolled, leading to large XLA computations. + Also unlike the Python analogue, the loop-carried value ``val`` must hold a + fixed shape and dtype across all iterations (and not just be consistent up to + NumPy rank/shape broadcasting and dtype promotion rules, for example). In + other words, the type ``a`` in the type signature above represents an array + with a fixed shape and dtype (or a nested tuple/list/dict container data + structure with a fixed structure and arrays with fixed shape and dtype at the + leaves). + Another difference from using Python-native loop constructs is that ``while_loop`` is not reverse-mode differentiable because XLA computations require static bounds on memory requirements. @@ -455,6 +471,13 @@ def scan(f, init, xs): for jit-compiled functions, since native Python loop constructs in an ``@jit`` function are unrolled, leading to large XLA computations. + Finally, the loop-carried value ``carry`` must hold a fixed shape and dtype + across all iterations (and not just be consistent up to NumPy rank/shape + broadcasting and dtype promotion rules, for example). In other words, the type + ``c`` in the type signature above represents an array with a fixed shape and + dtype (or a nested tuple/list/dict container data structure with a fixed + structure and arrays with fixed shape and dtype at the leaves). + Args: f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning that ``f`` accepts two arguments where the first is a value of the loop @@ -968,7 +991,7 @@ def _check_tree_and_avals(what, tree1, avals1, tree2, avals2): raise TypeError(msg.format(what, tree1, tree2)) if not all(safe_map(typematch, avals1, avals2)): msg = ("{} must have identical types, " - "got {} and {}.") + "got\n{}\nand\n{}.") raise TypeError(msg.format(what, tree_unflatten(tree1, avals1), tree_unflatten(tree2, avals2)))
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -202,8 +202,12 @@ def testWhileTypeErrors(self): with self.assertRaisesRegex(TypeError, re.escape("body_fun output and input must have same type structure, got PyTreeDef(tuple, [*,*]) and *.")): lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.) - with self.assertRaisesRegex(TypeError, - re.escape("body_fun output and input must have identical types, got ShapedArray(bool[]) and ShapedArray(float32[]).")): + with self.assertRaisesRegex( + TypeError, + "body_fun output and input must have identical types, got\\n" + "ShapedArray\(bool\[\]\)\\n" + "and\\n" + "ShapedArray\(float32\[\]\)."): lax.while_loop(lambda c: True, lambda c: True, np.float32(0.)) def testNestedWhileWithDynamicUpdateSlice(self): @@ -526,8 +530,12 @@ def testCondTypeErrors(self): re.escape("true_fun and false_fun output must have same type structure, got * and PyTreeDef(tuple, [*,*]).")): lax.cond(True, 1., lambda top: 1., 2., lambda fop: (2., 2.)) - with self.assertRaisesRegex(TypeError, - re.escape("true_fun and false_fun output must have identical types, got ShapedArray(float32[1]) and ShapedArray(float32[]).")): + with self.assertRaisesRegex( + TypeError, + "true_fun and false_fun output must have identical types, got\n" + "ShapedArray\(float32\[1\]\)\n" + "and\n" + "ShapedArray\(float32\[\]\)."): lax.cond(True, 1., lambda top: np.array([1.], np.float32), 2., lambda fop: np.float32(1.)) @@ -903,9 +911,12 @@ def testScanTypeErrors(self): with self.assertRaisesRegex(TypeError, re.escape("scan carry output and input must have same type structure, got * and PyTreeDef(None, []).")): lax.scan(lambda c, x: (0, x), None, a) - with self.assertRaisesRegex(TypeError, - re.escape("scan carry output and input must have identical types, " - "got ShapedArray(int32[]) and ShapedArray(float32[]).")): + with self.assertRaisesRegex( + TypeError, + "scan carry output and input must have identical types, got\n" + "ShapedArray\(int32\[\]\)\\n" + "and\\n" + "ShapedArray\(float32\[\]\)."): lax.scan(lambda c, x: (np.int32(0), x), np.float32(1.0), a) with self.assertRaisesRegex(TypeError, re.escape("scan carry output and input must have same type structure, got * and PyTreeDef(tuple, [*,*]).")):
bug in shape-checking rules for the case of fori_loop The following three loops should all give the same results but the lax.fori_loop throws an error. Could be related to #1681 import jax.numpy as np from jax.lax import fori_loop as fori_loop_jax def fori_loop_python(lower, upper, body_fun, init_val): val = init_val for i in range(lower, upper): val = body_fun(i, val) return val def body_fun(i,arr): a,b = arr return a-b[i]*b[i+1], b s = np.ones([10,10]) r = s.shape[1]-1 E = 0 for i in range(r): E -= s[:, i] * s[:, i + 1] print(E) E, _ = fori_loop_python(0, s.shape[0]-1, body_fun, (0,s)) print(E) E, _ = fori_loop_jax(0, r, body_fun, (0,s))[0] print(E)
Thanks for raising this! I tweaked the error message to print with some more newlines, to make things line up: ``` TypeError: body_fun output and input must have identical types, got (ShapedArray(int32[]), ShapedArray(int32[]), (ShapedArray(float32[10]), ShapedArray(float32[10,10]))) and (ShapedArray(int32[]), ShapedArray(int32[]), (ShapedArray(int32[]), ShapedArray(float32[10,10]))). ``` As you can see, there's an `int32[]` input where we get a `float32[10]` output. The issue is that in the Python case you're relying on `0` getting rank- and dtype-promoted to a `float32[10]`, and that's not allowed for `lax.fori_loop`. This works (notice I removed a `[0]` from the end): ```python E, _ = fori_loop_jax(0, r, body_fun, (np.zeros(10),s)) ``` We could probably be clearer in our documentation that, unlike the pure Python analogue, the input and output types have to be the same, not just consistent up to NumPy-style broadcasting/promotion.
2019-11-15T05:38:17
google/jax
1,704
google__jax-1704
[ "1703" ]
4fc765241f4317d4f61e7cb4acafbf2272a83fa0
diff --git a/jax/lax/lax_parallel.py b/jax/lax/lax_parallel.py --- a/jax/lax/lax_parallel.py +++ b/jax/lax/lax_parallel.py @@ -114,7 +114,7 @@ def ppermute(x, axis_name, perm): An array with the same shape as ``x`` with slices along the axis ``axis_name`` gathered from ``x`` according to the permutation ``perm``. """ - return ppermute_p.bind(x, axis_name=axis_name, perm=perm) + return ppermute_p.bind(x, axis_name=axis_name, perm=tuple(perm)) def pswapaxes(x, axis_name, axis): """Swap the pmapped axis ``axis_name`` with the unmapped axis ``axis``.
diff --git a/tests/pmap_test.py b/tests/pmap_test.py --- a/tests/pmap_test.py +++ b/tests/pmap_test.py @@ -331,6 +331,16 @@ def testCollectivePermuteCyclicGrad(self): expected = onp.roll(onp.pi + onp.arange(device_count), 1) self.assertAllClose(ans, expected, check_dtypes=False) + @jtu.skip_on_devices("cpu", "gpu") + def testIssue1703(self): + num_devices = xla_bridge.device_count() + perm = [num_devices - 1] + list(range(num_devices - 1)) + f = pmap( + lambda x: lax.ppermute(x, "i", zip(range(num_devices), perm)), "i") + result = f(np.arange(num_devices, dtype=np.float32)) + expected = np.asarray(perm, dtype=np.float32) + self.assertAllClose(result, expected) + @jtu.skip_on_devices("cpu", "gpu") def testRule30(self): # This is a test of collective_permute implementing a simple halo exchange
ppermute is returning 0s Tested with TPUs. ```python @partial(jax.pmap, axis_name="shard") def f(a): return jax.lax.ppermute( a, "shard", zip(range(8), [1, 0, 2, 3, 4, 5, 6, 7])) a = np.ones((8, 2, 2)) print(f(a)) ``` Prints out ``` [[[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]] [[0. 0.] [0. 0.]]] ```
@hawkinsp pointed out that adding `tuple(...)` around the `zip(...)` fixes it. For a quick fix, we can always explicitly cast the `perm` argument to `ppermute` to a `tuple`. I'll make a PR for this.
2019-11-15T22:35:58
google/jax
1,718
google__jax-1718
[ "1712" ]
6cf2e4b8bf7de1f561e1fac166ca70d15816df6b
diff --git a/jax/core.py b/jax/core.py --- a/jax/core.py +++ b/jax/core.py @@ -300,6 +300,7 @@ def aval(self): assert False def __neg__(self): return self.aval._neg(self) + def __pos__(self): return self.aval._pos(self) def __eq__(self, other): return self.aval._eq(self, other) def __ne__(self, other): return self.aval._ne(self, other) def __lt__(self, other): return self.aval._lt(self, other) diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -3194,6 +3194,7 @@ def _unimplemented_setitem(self, i, x): "getitem": _rewriting_take, "setitem": _unimplemented_setitem, "neg": negative, + "pos": positive, "eq": equal, "ne": not_equal, "lt": less,
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -256,6 +256,7 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []), op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []), op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []), + op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []), op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []), op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [], tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}),
unary + does not work in jax ``` >>> import jax >>> +jax.numpy.asarray([1,2,3]) /Users/lukasheinrich/Code/excursiondev/excvenv/lib/python3.6/site-packages/jax/lib/xla_bridge.py:119: UserWarning: No GPU/TPU found, falling back to CPU. warnings.warn('No GPU/TPU found, falling back to CPU.') Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: bad operand type for unary +: 'DeviceArray' ``` while in numpy it works ``` >>> import numpy >>> +numpy.asarray([1,2,3]) array([1, 2, 3]) ``` a similar issue appear for the `-` operator
I see the issue for unary `+`; that's just a case we forgot to implement (the `+` operator isn't that useful!) Do you have an example of unary `-` failing? It works for me. sorry, I misremembered: unary minus works for me.
2019-11-19T02:17:58
google/jax
1,736
google__jax-1736
[ "1731" ]
2b0cde3648e3f405b87558e3a3eff352a6c377a8
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -262,6 +262,8 @@ def broadcast(x, sz, axis): def moveaxis(x, src, dst): if core.get_aval(x) is core.abstract_unit: return core.unit + if src == dst: + return x src, dst = src % x.ndim, dst % x.ndim perm = [i for i in range(onp.ndim(x)) if i != src] perm.insert(dst, src) diff --git a/jax/lax/__init__.py b/jax/lax/__init__.py --- a/jax/lax/__init__.py +++ b/jax/lax/__init__.py @@ -16,7 +16,8 @@ from .lax import * from .lax import (_reduce_sum, _reduce_max, _reduce_min, _reduce_or, _reduce_and, _reduce_window_sum, _reduce_window_max, - _reduce_window_min, _reduce_window_prod, _float, _complex, + _reduce_window_min, _reduce_window_prod, + _select_and_gather_add, _float, _complex, _input_dtype, _const, _eq_meet, _safe_mul, _broadcasting_select, _check_user_dtype_supported, _one, _const, _upcast_fp16_for_computation) diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3778,7 +3778,7 @@ def _select_and_gather_add_translation( canonicalize_types=False) if double_word_reduction: - # XLA doesn't yet implement ReduceWindow on tuples (Google bug b/73062247), so + # TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so # we implement a pair-wise ReduceWindow by packing two k-bit values into # 2k-bit unsigned integer using bit tricks. word_dtype = _UINT_DTYPES[nbits] @@ -3852,7 +3852,7 @@ def reducer(): return c.Build() - assert select_prim is ge_p or select_prim is le_p + assert select_prim is ge_p or select_prim is le_p, select_prim init = -onp.inf if select_prim is ge_p else onp.inf out = c.ReduceWindow(pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)), @@ -3885,12 +3885,30 @@ def _select_and_gather_add_transpose( window_strides, padding) return [result, None] +def _select_and_gather_add_batching_rule( + batched_args, batch_dims, select_prim, window_dimensions, window_strides, + padding): + t, x = batched_args + t_bdim, x_bdim = batch_dims + size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims) + if bdim is not None) + t = batching.bdim_at_front(t, t_bdim, size) + x = batching.bdim_at_front(x, x_bdim, size) + window_dimensions = (1,) + window_dimensions + window_strides = (1,) + window_strides + out = _select_and_gather_add(t, x, select_prim, window_dimensions, + window_strides, padding) + return (out, 0) + + select_and_gather_add_p = standard_primitive( _select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add', _select_and_gather_add_translation) ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp ad.primitive_transposes[select_and_gather_add_p] = \ - _select_and_gather_add_transpose + _select_and_gather_add_transpose +batching.primitive_batchers[select_and_gather_add_p] = \ + _select_and_gather_add_batching_rule xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial( _select_and_gather_add_translation, max_bits=32)
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2894,6 +2894,32 @@ def fun(operand): for bdims in all_bdims(shape): self._CheckBatching(fun, 3, bdims, (shape,), dtype, rng) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_dtype={}_padding={}".format(onp.dtype(dtype).name, + padding), + "dtype": dtype, "padding": padding, "rng_factory": rng_factory} + for dtype in float_dtypes + for padding in ["VALID", "SAME"] + for rng_factory in [jtu.rand_small])) + def testSelectAndGatherAdd(self, dtype, padding, rng_factory): + rng = rng_factory() + all_configs = itertools.chain( + itertools.product( + [(4, 6)], + [(2, 1), (1, 2)], + [(1, 1), (2, 1), (1, 2)]), + itertools.product( + [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)], + [(1, 2, 2, 1), (1, 1, 1, 1)])) + + def fun(operand, tangents): + return lax._select_and_gather_add(operand, tangents, lax.ge_p, dims, + strides, padding) + + for shape, dims, strides in all_configs: + for bdims in all_bdims(shape, shape): + self._CheckBatching(fun, 3, bdims, (shape, shape), dtype, rng) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_bdims={}_fft_ndims={}" .format(shape, bdims, fft_ndims),
NotImplementedError: Batching rule for 'select_and_gather_add' not implemented I'm trying to compute per-example hessian-vector products, where the model includes a MaxPool layer, and I'm getting this error: ``` File "cnn.py", line 62, in loss logits = predict_fun(params, inputs) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/experimental/stax.py", line 295, in apply_fun inputs = fun(param, inputs, rng=rng, **kwargs) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/experimental/stax.py", line 186, in apply_fun out = lax.reduce_window(inputs, init_val, reducer, dims, strides, padding) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/lax/lax.py", line 949, in reduce_window return monoid_reducer(operand, window_dimensions, window_strides, padding) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/lax/lax.py", line 984, in _reduce_window_max window_strides=tuple(window_strides), padding=padding) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/core.py", line 155, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/ad.py", line 222, in process_primitive primal_out, tangent_out = jvp(primals_in, tangents_in, **params) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/ad.py", line 320, in standard_jvp val_out = primitive.bind(*primals, **params) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/core.py", line 155, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/ad.py", line 222, in process_primitive primal_out, tangent_out = jvp(primals_in, tangents_in, **params) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/ad.py", line 321, in standard_jvp tangents_out = [rule(t, *primals, **params) for rule, t in zip(jvprules, tangents) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/ad.py", line 322, in <listcomp> if rule is not None and t is not zero] File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/lax/lax.py", line 3590, in _reduce_window_chooser_jvp_rule window_strides, padding) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/lax/lax.py", line 1013, in _select_and_gather_add window_strides=tuple(window_strides), padding=padding) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/core.py", line 155, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/batching.py", line 116, in process_primitive batched_primitive = get_primitive_batcher(primitive) File "/h/choidami/software/anaconda/lib/python3.7/site-packages/jax/interpreters/batching.py", line 168, in get_primitive_batcher raise NotImplementedError(msg.format(p)) NotImplementedError: Batching rule for 'select_and_gather_add' not implemented ``` The code I used is a simple modification of the resnet50.py example in the repo. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function from functools import partial import numpy.random as npr from six.moves import xrange import jax.numpy as np from jax.config import config from jax import jit, grad, random, vmap, jvp from jax.experimental import optimizers from jax.experimental import stax from jax.experimental.stax import (Conv, Dense, Flatten, GeneralConv, MaxPool, Relu, LogSoftmax) from jax.flatten_util import ravel_pytree def CNN(num_classes): return stax.serial( stax.Conv(32, (5, 5), strides=(2, 2)), Relu, stax.MaxPool((2, 2), strides=(2, 2)), Flatten, Dense(num_classes), LogSoftmax) if __name__ == "__main__": rng_key = random.PRNGKey(0) batch_size = 8 num_classes = 10 input_shape = (batch_size, 32, 32, 3) step_size = 0.1 num_steps = 10 init_fun, predict_fun = CNN(num_classes) _, init_params = init_fun(rng_key, input_shape) def loss(params, batch): inputs, targets = batch logits = predict_fun(params, inputs) return -np.sum(logits * targets) def accuracy(params, batch): inputs, targets = batch target_class = np.argmax(targets, axis=-1) predicted_class = np.argmax(predict_fun(params, inputs), axis=-1) return np.mean(predicted_class == target_class) def synth_batches(): rng = npr.RandomState(0) while True: images = rng.rand(*input_shape).astype('float32') labels = rng.randint(num_classes, size=(batch_size, 1)) onehot_labels = labels == np.arange(num_classes) yield images, onehot_labels opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9) batches = synth_batches() def fgh_fn(loss, params, batch, v): _loss = partial(loss, batch=batch) grads, hvp = jvp(grad(_loss), [params], [v]) return grads, hvp def per_example_gh(loss, params, batch, v): fgh = partial(fgh_fn, loss) # Add extra batch dimension in case some functions assume it. def batch_fgh(params, batch, v): X, y = batch X = np.expand_dims(X, 0) y = np.expand_dims(y, 0) return fgh(params, (X, y), v) return vmap(batch_fgh, in_axes=(None, 0, None))(params, batch, v) @jit def update(i, opt_state, batch): params = get_params(opt_state) flat_params, unravel = ravel_pytree(params) def flat_loss(flat_params, batch): return loss(unravel(flat_params), batch) gs, hvps = per_example_gh(flat_loss, flat_params, batch, np.ones_like(flat_params)) return opt_update(i, grad(loss)(params, batch), opt_state) opt_state = opt_init(init_params) for i in xrange(num_steps): opt_state = update(i, opt_state, next(batches)) trained_params = get_params(opt_state) ```
2019-11-21T16:10:50
google/jax
1,790
google__jax-1790
[ "1789" ]
c5a9eba3a8742c9d8f4e40fa9c2f43839f1d8481
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -41,8 +41,11 @@ from jax import abstract_arrays from jax.numpy.linalg import cholesky from jax.scipy.special import logit +from jax.interpreters import ad from jax.interpreters import batching +from jax.interpreters import partial_eval as pe from jax.interpreters import xla +from jax.util import prod def PRNGKey(seed): @@ -616,7 +619,6 @@ def beta(key, a, b, shape=None, dtype=onp.float64): dtype = dtypes.canonicalize_dtype(dtype) return _beta(key, a, b, shape, dtype) -@partial(jit, static_argnums=(3, 4)) def _beta(key, a, b, shape, dtype): if shape is None: shape = lax.broadcast_shapes(onp.shape(a), onp.shape(b)) @@ -626,6 +628,8 @@ def _beta(key, a, b, shape, dtype): a = lax.convert_element_type(a, dtype) b = lax.convert_element_type(b, dtype) key_a, key_b = split(key) + a = np.broadcast_to(a, shape) + b = np.broadcast_to(b, shape) gamma_a = gamma(key_a, a, shape, dtype) gamma_b = gamma(key_b, b, shape, dtype) return gamma_a / (gamma_a + gamma_b) @@ -876,20 +880,43 @@ def _case4(zagf): return grad def _gamma_grad(sample, a): - samples = np.reshape(sample, -1) - alphas = np.reshape(a, -1) + samples = np.reshape(sample, -1) + alphas = np.reshape(a, -1) + if xla_bridge.get_backend().platform == 'cpu': + grads = lax.map(lambda args: _gamma_grad_one(*args), (samples, alphas)) + else: grads = vmap(_gamma_grad_one)(samples, alphas) - return grads.reshape(onp.shape(a)) + return grads.reshape(onp.shape(a)) -@custom_transforms def _gamma_impl(key, a): - alphas = np.reshape(a, -1) - keys = split(key, onp.size(alphas)) + a_shape = np.shape(a) + # split key to match the shape of a + key_ndim = np.ndim(key) - 1 + key = np.reshape(key, (-1, 2)) + key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:])) + keys = np.reshape(key, (-1, 2)) + alphas = np.reshape(a, -1) + if xla_bridge.get_backend().platform == 'cpu': + samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas)) + else: samples = vmap(_gamma_one)(keys, alphas) - return np.reshape(samples, onp.shape(a)) - -defjvp(_gamma_impl, None, - lambda tangent, ans, key, a, **kwargs: tangent * _gamma_grad(ans, a)) + return np.reshape(samples, a_shape), + +def _gamma_batching_rule(batched_args, batch_dims): + k, a = batched_args + bk, ba = batch_dims + size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None) + k = batching.bdim_at_front(k, bk, size) + a = batching.bdim_at_front(a, ba, size) + return random_gamma_p.bind(k, a), (0,) + +random_gamma_p = core.Primitive('random_gamma') +random_gamma_p.multiple_results = True +random_gamma_p.def_impl(_gamma_impl) +random_gamma_p.def_abstract_eval(lambda key, a: (abstract_arrays.raise_to_shaped(a),)) +ad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a: (tangent * _gamma_grad(ans[0], a),)) +xla.translations[random_gamma_p] = xla.lower_fun(_gamma_impl, instantiate=True) +batching.primitive_batchers[random_gamma_p] = _gamma_batching_rule def gamma(key, a, shape=None, dtype=onp.float64): """Sample Gamma random values with given shape and float dtype. @@ -921,7 +948,7 @@ def _gamma(key, a, shape, dtype): a = lax.convert_element_type(a, dtype) if onp.shape(a) != shape: a = np.broadcast_to(a, shape) - return _gamma_impl(key, a) + return random_gamma_p.bind(key, a)[0] def gumbel(key, shape=(), dtype=onp.float64):
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -27,10 +27,12 @@ import scipy.stats from jax import api +from jax import grad from jax import lax from jax import numpy as np from jax import random from jax import test_util as jtu +from jax import vmap from jax.interpreters import xla from jax.config import config @@ -467,6 +469,12 @@ def testIssue756(self): else: self.assertEqual(onp.result_type(w), onp.float32) + def testIssue1789(self): + def f(x): + return random.gamma(random.PRNGKey(0), x) + + grad(lambda x: np.sum(vmap(f)(x)))(np.ones(2)) + def testNoOpByOpUnderHash(self): def fail(*args, **kwargs): assert False apply_primitive, xla.apply_primitive = xla.apply_primitive, fail
grad and vmap do not composable with gamma sampler Here is a repro code, which triggers error `NotImplementedError: Forward-mode differentiation rule for 'while' not implemented` when trying to take gradient of `g`. ``` import jax import jax.numpy as np jax.config.update('jax_platform_name', 'cpu') def f(a): return jax.random.gamma(jax.random.PRNGKey(0), a) def g(x): return np.sum(jax.vmap(f)(x)) print(g(np.ones(3))) print(jax.grad(g)(np.ones(3))) ```
This is another manifestation of https://github.com/google/jax/issues/1249. Oh... Thanks, @shoyer! That seems like the problem which I get. I'll try to see if I can go back to several months ago to not use custom_transform for gamma sampler.
2019-12-01T15:12:39
google/jax
1,807
google__jax-1807
[ "1782" ]
1817f24c06256bdc4ad3e0890340ad8d3316fafb
diff --git a/build/build.py b/build/build.py --- a/build/build.py +++ b/build/build.py @@ -176,6 +176,7 @@ def check_bazel_version(bazel_path, min_version, max_version): # Sets the default Apple platform to macOS. build --apple_platform_type=macos +build --macos_minimum_os=10.9 # Make Bazel print out all options from rc files. build --announce_rc
ImportError: ..... ____chkstk_darwin Hi folks, Wondering if someone can please help with this import error on a fresh install. Background: Recently started a new conda environment with a couple of basics like jupyter, numpy, scipy. I'm using MacOS High Sierra 10.13.4, and python3.7 The specific error is: ``` --------------------------------------------------------------------------- ImportError Traceback (most recent call last) <ipython-input-1-77da20ac745a> in <module> ----> 1 from jax import vmap # for auto-vectorizing functions 2 from functools import partial # for use with vmap 3 from jax import jit # for compiling functions for speedup 4 from jax import random # stax initialization uses jax.random 5 from jax.experimental import stax # neural network library ``` ... ``` ~/miniconda3/envs/lew_jax/lib/python3.7/site-packages/jaxlib/xla_client.py in <module> 36 # and TensorFlow may fail with duplicate protocol buffer message definitions. 37 ---> 38 from . import xla_extension as _xla 39 from .xla_extension import ops 40 ImportError: dlopen(/Users/lmar3213/miniconda3/envs/lew_jax/lib/python3.7/site-packages/jaxlib/xla_extension.so, 2): Symbol not found: ____chkstk_darwin Referenced from: /Users/lmar3213/miniconda3/envs/lew_jax/lib/python3.7/site-packages/jaxlib/xla_extension.so (which was built for Mac OS X 10.15) Expected in: /usr/lib/libSystem.B.dylib in /Users/lmar3213/miniconda3/envs/lew_jax/lib/python3.7/site-packages/jaxlib/xla_extension.so ``` To install, I ran as per the installation instructions i.e. `pip install --upgrade pip` which was already up to date, and then `pip install --upgrade jax jaxlib` which installed happily. The only clue I have as to understanding the error is that it mentions `xla_extension.so` was built for mac OS X 10.15 but Im on 10.13. Any help is appreciated! Thankyou!
2019-12-03T16:59:24
google/jax
1,836
google__jax-1836
[ "1830" ]
7df124a7b45dc9c9161ca28f9ed045a3ad34b380
diff --git a/jax/dtypes.py b/jax/dtypes.py --- a/jax/dtypes.py +++ b/jax/dtypes.py @@ -135,7 +135,14 @@ def finfo(dtype): def issubdtype(a, b): if a == bfloat16: - return b in [onp.floating, onp.inexact, onp.number] + return b in [bfloat16, _bfloat16_dtype, onp.floating, onp.inexact, + onp.number] + if not issubclass(b, onp.generic): + # Workaround for JAX scalar types. NumPy's issubdtype has a backward + # compatibility behavior for the second argument of issubdtype that + # interacts badly with JAX's custom scalar types. As a workaround, + # explicitly cast the second argument to a NumPy type object. + b = onp.dtype(b).type return onp.issubdtype(a, b) can_cast = onp.can_cast diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -94,7 +94,8 @@ def g(c, *args, **kwargs): # primitives -_cpu_lapack_types = {np.float32, np.float64, np.complex64, np.complex128} +_cpu_lapack_types = {onp.dtype(onp.float32), onp.dtype(onp.float64), + onp.dtype(onp.complex64), onp.dtype(onp.complex128)} # Cholesky decomposition @@ -137,7 +138,7 @@ def _nan_like(c, operand): def cholesky_cpu_translation_rule(c, operand): shape = c.GetShape(operand) dtype = shape.element_type().type - if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types: + if len(shape.dimensions()) == 2 and onp.dtype(dtype) in _cpu_lapack_types: result, info = lapack.potrf(c, operand, lower=True) return c.Select(c.Eq(info, c.ConstantS32Scalar(0)), result, _nan_like(c, result)) @@ -392,7 +393,7 @@ def _triangular_solve_cpu_translation_rule( c, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal): shape = c.GetShape(a) dtype = shape.element_type().type - if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types: + if len(shape.dimensions()) == 2 and onp.dtype(dtype) in _cpu_lapack_types: if conjugate_a and not transpose_a: a = c.Conj(a) conjugate_a = False diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -126,29 +126,50 @@ def __init__(shape, dtype=None, buffer=None, offset=0, strides=None, size = onp.size _dtype = dtypes.result_type -bool_ = onp.bool_ -int_ = dtypes.int_ -float_ = dtypes.float_ -complex_ = dtypes.complex_ - -uint8 = onp.uint8 -uint16 = onp.uint16 -uint32 = onp.uint32 -uint64 = onp.uint64 -int8 = onp.int8 -int16 = onp.int16 -int32 = onp.int32 -int64 = onp.int64 -bfloat16 = dtypes.bfloat16 -float16 = onp.float16 -float32 = single = onp.float32 -float64 = double = onp.float64 -complex64 = csingle = onp.complex64 -complex128 = cdouble = onp.complex128 +# At present JAX doesn't have a reason to distinguish between scalars and arrays +# in its object system. Further, we want JAX scalars to have the same type +# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX +# scalar object with JAX promotion behaviors, instead we make the JAX scalar +# types return JAX arrays when instantiated. + +class _ScalarMeta(type): + def __hash__(self): + return hash(self.dtype.type) + + def __eq__(self, other): + return id(self) == id(other) or self.dtype == other + + def __ne__(self, other): + return not (self == other) + + def __call__(self, x): + return array(self.dtype.type(x), dtype=self.dtype) + +def _make_scalar_type(onp_scalar_type): + return type(onp_scalar_type.__name__, + (six.with_metaclass(_ScalarMeta, object),), + {"dtype": onp.dtype(onp_scalar_type)}) + +bool_ = _make_scalar_type(onp.bool_) +uint8 = _make_scalar_type(onp.uint8) +uint16 = _make_scalar_type(onp.uint16) +uint32 = _make_scalar_type(onp.uint32) +uint64 = _make_scalar_type(onp.uint64) +int8 = _make_scalar_type(onp.int8) +int16 = _make_scalar_type(onp.int16) +int32 = _make_scalar_type(onp.int32) +int64 = _make_scalar_type(onp.int64) +bfloat16 = _make_scalar_type(dtypes.bfloat16) +float16 = _make_scalar_type(onp.float16) +float32 = single = _make_scalar_type(onp.float32) +float64 = double = _make_scalar_type(onp.float64) +complex64 = csingle = _make_scalar_type(onp.complex64) +complex128 = cdouble = _make_scalar_type(onp.complex128) + +int_ = int32 if dtypes.int_ == onp.int32 else int64 +float_ = float32 if dtypes.float_ == onp.float32 else float64 +complex_ = complex64 if dtypes.complex_ == onp.complex64 else complex128 -flexible = onp.flexible -character = onp.character -object_ = onp.object_ number = onp.number inexact = onp.inexact complexfloating = onp.complexfloating @@ -157,6 +178,10 @@ def __init__(shape, dtype=None, buffer=None, offset=0, strides=None, signedinteger = onp.signedinteger unsignedinteger = onp.unsignedinteger +flexible = onp.flexible +character = onp.character +object_ = onp.object_ + iinfo = dtypes.iinfo dtype = onp.dtype @@ -363,8 +388,6 @@ def finfo(dtype): return dtypes.finfo(dtype) @_wraps(onp.issubdtype) def issubdtype(arg1, arg2): return dtypes.issubdtype(arg1, arg2) -issubdtype = dtypes.issubdtype - @_wraps(onp.isscalar) def isscalar(num): return dtypes.is_python_scalar(num) or onp.isscalar(num) @@ -459,7 +482,7 @@ def _logical_op(np_op, bitwise_op): @_wraps(np_op, update_doc=False) def op(*args): zero = lambda x: lax.full_like(x, shape=(), fill_value=0) - args = (x if issubdtype(_dtype(x), onp.bool_) else lax.ne(x, zero(x)) + args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x)) for x in args) return bitwise_op(*_promote_args(np_op.__name__, *args)) return op @@ -481,7 +504,7 @@ def divide(x1, x2): # decide whether to perform integer division based on Numpy result dtype, as a # way to check whether Python 3 style division is active in Numpy result_dtype = _result_dtype(onp.divide, x1, x2) - if issubdtype(result_dtype, onp.integer): + if issubdtype(result_dtype, integer): return floor_divide(x1, x2) else: return true_divide(x1, x2) @@ -514,7 +537,7 @@ def floor_divide(x1, x2): @_wraps(onp.divmod) def divmod(x1, x2): x1, x2 = _promote_args("divmod", x1, x2) - if issubdtype(_dtype(x1), onp.integer): + if issubdtype(_dtype(x1), integer): return floor_divide(x1, x2), remainder(x1, x2) else: return _float_divmod(x1, x2) @@ -704,7 +727,7 @@ def arcsinh(x): x, = _promote_dtypes_inexact(x) one = lax._const(x, 1) result = lax.log(x + lax.sqrt(x * x + one)) - if issubdtype(_dtype(result), onp.complexfloating): + if issubdtype(_dtype(result), complexfloating): return result a = abs(x) sqrt_max_value = onp.sqrt(finfo(_dtype(x)).max) @@ -723,7 +746,7 @@ def arccosh(x): x, = _promote_dtypes_inexact(x) one = lax._const(x, 1) result = lax.log(x + lax.sqrt((x + one) * (x - one))) - if issubdtype(_dtype(result), onp.complexfloating): + if issubdtype(_dtype(result), complexfloating): return result sqrt_max_value = onp.sqrt(finfo(_dtype(x)).max) log2 = lax._const(x, onp.log(2)) @@ -736,7 +759,7 @@ def arctanh(x): x, = _promote_dtypes_inexact(x) one = lax._const(x, 1) result = lax._const(x, 0.5) * lax.log((one + x) / (one - x)) - if issubdtype(_dtype(result), onp.complexfloating): + if issubdtype(_dtype(result), complexfloating): return result return lax.select(abs(x) <= 1, result, lax.full_like(x, onp.nan)) @@ -981,7 +1004,7 @@ def _maybe_numpy_1_13_isclose_behavior(a, out): def where(condition, x=None, y=None): if x is None or y is None: raise ValueError("Must use the three-argument form of where().") - if not issubdtype(_dtype(condition), onp.bool_): + if not issubdtype(_dtype(condition), bool_): condition = lax.ne(condition, zeros_like(condition)) x, y = _promote_dtypes(x, y) condition, x, y = broadcast_arrays(condition, x, y) @@ -1070,7 +1093,7 @@ def clip(a, a_min=None, a_max=None): def _dtype_info(dtype): """Helper function for to get dtype info needed for clipping.""" - if issubdtype(dtype, onp.integer): + if issubdtype(dtype, integer): return iinfo(dtype) return finfo(dtype) @@ -1199,7 +1222,7 @@ def reduction(a, axis=None, dtype=None, out=None, keepdims=False): computation_dtype = result_dtype a = lax.convert_element_type(a, computation_dtype) result = lax.reduce(a, _reduction_init_val(a, init_val), - op if computation_dtype != bool_ else bool_op, dims) + op if computation_dtype != onp.bool_ else bool_op, dims) if keepdims: shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims))) result = lax.reshape(result, shape_with_singletons) @@ -1224,7 +1247,7 @@ def _reduction_init_val(a, init_val): try: return onp.array(init_val, dtype=a_dtype) except OverflowError: - assert issubdtype(a_dtype, onp.integer) + assert issubdtype(a_dtype, integer) sign, info = onp.sign(init_val), iinfo(a_dtype) return onp.array(info.min if sign < 0 else info.max, dtype=a_dtype) @@ -1250,8 +1273,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False): else: normalizer = onp.prod(onp.take(shape(a), axis)) if dtype is None: - if (issubdtype(_dtype(a), onp.bool_) or - issubdtype(_dtype(a), onp.integer)): + if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer): dtype = float_ else: dtype = _dtype(a) @@ -1391,8 +1413,7 @@ def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs): def nanmean(a, axis=None, dtype=None, out=None, keepdims=False): if out is not None: raise ValueError("nanmean does not support the `out` argument.") - if (issubdtype(_dtype(a), onp.bool_) or - issubdtype(_dtype(a), onp.integer)): + if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer): return mean(a, axis, dtype, out, keepdims) if dtype is None: dtype = _dtype(a) @@ -1759,7 +1780,7 @@ def eye(N, M=None, k=None, dtype=None): return lax.broadcasted_eye(dtype, (N, M), (0, 1)) else: k_dtype = _dtype(k) - if not issubdtype(k_dtype, onp.integer): + if not issubdtype(k_dtype, integer): msg = "eye argument `k` must be of integer dtype, got {}" raise TypeError(msg.format(k_dtype)) rows = k + lax.broadcasted_iota(k_dtype, (N, M), 0) @@ -1779,7 +1800,7 @@ def arange(start, stop=None, step=None, dtype=None): # If called like np.arange(N), we create a lazy lax._IotaConstant. if stop is None and step is None: dtype = dtype or _dtype(start) - if issubdtype(dtype, onp.integer): + if issubdtype(dtype, integer): return lax.iota(dtype, start) # avoids materializing # Fall back to instantiating an ndarray in host memory @@ -2189,7 +2210,7 @@ def matmul(a, b, precision=None): # pylint: disable=missing-docstring @_wraps(onp.vdot, lax_description=_PRECISION_DOC) def vdot(a, b, precision=None): - if issubdtype(_dtype(a), onp.complexfloating): + if issubdtype(_dtype(a), complexfloating): a = conj(a) return dot(a.ravel(), b.ravel(), precision=precision) @@ -2735,7 +2756,7 @@ def _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx): return treedef.unflatten(idx) def _int(aval): - return not aval.shape and issubdtype(aval.dtype, onp.integer) + return not aval.shape and issubdtype(aval.dtype, integer) def _index_to_gather(x_shape, idx): # Remove ellipses and add trailing slice(None)s. @@ -2945,8 +2966,8 @@ def _expand_bool_indices(idx): abstract_i = core.get_aval(i) except TypeError: abstract_i = None - if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, onp.bool_) - or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), onp.bool_) + if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_) + or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), bool_) for e in i)): if isinstance(i, list): i = array(i)
diff --git a/tests/dtypes_test.py b/tests/dtypes_test.py --- a/tests/dtypes_test.py +++ b/tests/dtypes_test.py @@ -34,6 +34,7 @@ from jax import dtypes from jax import numpy as np from jax import test_util as jtu +from jax.interpreters import xla from jax.config import config config.parse_flags_with_absl() @@ -58,6 +59,10 @@ all_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes + complex_dtypes) +scalar_types = [np.bool_, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.bfloat16, np.float16, np.float32, np.float64, + np.complex64, np.complex128] class DtypesTest(jtu.JaxTestCase): @@ -138,6 +143,31 @@ def testPromoteDtypes(self): self.assertEqual(onp.promote_types(t1, t2), dtypes.promote_types(t1, t2)) + def testScalarInstantiation(self): + for t in [np.bool_, np.int32, np.bfloat16, np.float32, np.complex64]: + a = t(1) + self.assertEqual(a.dtype, np.dtype(t)) + self.assertIsInstance(a, xla.DeviceArray) + self.assertEqual(0, np.ndim(a)) + + def testIsSubdtype(self): + for t in scalar_types: + self.assertTrue(dtypes.issubdtype(t, t)) + self.assertTrue(dtypes.issubdtype(onp.dtype(t).type, t)) + self.assertTrue(dtypes.issubdtype(t, onp.dtype(t).type)) + if t != np.bfloat16: + for category in [onp.generic, np.inexact, np.integer, np.signedinteger, + np.unsignedinteger, np.floating, np.complexfloating]: + self.assertEqual(dtypes.issubdtype(t, category), + onp.issubdtype(onp.dtype(t).type, category)) + self.assertEqual(dtypes.issubdtype(t, category), + onp.issubdtype(onp.dtype(t).type, category)) + + def testArrayCasts(self): + for t in [np.bool_, np.int32, np.bfloat16, np.float32, np.complex64]: + a = onp.array([1, 2.5, -3.7]) + self.assertEqual(a.astype(t).dtype, np.dtype(t)) + self.assertEqual(np.array(a).astype(t).dtype, np.dtype(t)) @unittest.skipIf(six.PY2, "Test requires Python 3") def testEnumPromotion(self): diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -329,7 +329,7 @@ def testForiLoopErrors(self): """Test typing error messages for while.""" with self.assertRaisesRegex( TypeError, "arguments to fori_loop must have equal types"): - lax.fori_loop(np.int16(0), np.int32(10), (lambda i, c: c), np.float32(7)) + lax.fori_loop(onp.int16(0), np.int32(10), (lambda i, c: c), np.float32(7)) def testForiLoopBatched(self): def body_fun(i, loop_carry):
Potential type promotion bug. Since the latest jax type promotion changes I'm seeing the following strange type promotion behavior (x64, CPU at head): ```python import jax.numpy as np from jax.config import config config.update('jax_enable_x64', True) xs = np.array([1., 2.], np.float32) sigma = np.float32(1.0) print(np.exp(xs / sigma ** 2).dtype) # Float64 ```
This isn't really a bug, but I agree it's something we might like to improve. I believe this behavior hasn't been altered by the recent type promotion changes; it has always held. This behavior follows from two facts: * `jax.numpy.float32` is an alias for `numpy.float32`. This means it acts identically in every way. * Numpy has the following promotion behavior: ``` In [6]: type(onp.float32(1.0) ** 2) Out[6]: numpy.float64 ``` i.e., the RHS argument to the `/` operator in your example above is a `float64`, which JAX respects. So this is really a NumPy vs JAX difference. The best fixes I can think of are: a) use an array instead of a `NumPy` scalar: ``` print(np.exp(xs / np.array(sigma) ** 2).dtype) float32 ``` b) we change JAX to offer its own scalar objects whose promotion behavior matches the JAX array promotion behavior. c) we change `jax.numpy.float32(x)` to return a JAX array object. For what it's worth, I think the idiomatic way to write something like this, in both JAX and NumPy, is now to use Python floats: ```python xs = np.array([1., 2.], np.float32) sigma = 1.0 print(np.exp(xs / sigma ** 2).dtype) # float32 ``` These end up giving the same result in `jax.numpy` and `numpy`, but for two different reasons: - JAX special cases Python's builtin `float` in its casting rules as having lower priority than array dtypes. - NumPy has *value dependent casting* for scalars and 0d arrays. So NumPy has different behavior than JAX if you use Python scalars not representable in float32, e.g., `sigma = 1e40`. > c) we change `jax.numpy.float32(x)` to return a JAX array object. This seems like a reasonable choice, but keep in mind we will probably also have to special-case `jax.numpy.float32` (whatever it is) when it's passed into a `dtype` argument in JAX. One of the most common uses for these type scalars is to write something like `x.astype(np.float32)`. The downside is that there is no way we can make `x.astype(np.float32)` work if `x` is a NumPy array. Users will get `TypeError: data type not understood`. Given that it may be impossible to make something that matches all user expectations, this may be an argument for dropping aliases like `jax.numpy.float32` entirely, though I'm sure that would encounter resistance :) @shoyer I'm not so sure about that: ``` In [1]: import numpy as onp; from jax import numpy as np, lax; import jax c In [2]: class Foo(onp.int32): ...: pass ...: In [3]: Foo Out[3]: __main__.Foo In [4]: Foo(3) Out[4]: 3 In [5]: onp.array([2, 4], dtype=Foo) Out[5]: array([2, 4], dtype=int32) In [8]: x = onp.random.randn(3, 4) In [9]: x Out[9]: array([[ 0.61102705, -1.28747357, -0.87507959, -0.83302679], [-1.21343036, -1.44278927, 1.71148559, 1.00484012], [-0.09954842, -0.32945393, 0.75188577, 1.38867382]]) In [10]: x.astype(Foo) Out[10]: array([[ 0, -1, 0, 0], [-1, -1, 1, 1], [ 0, 0, 0, 1]], dtype=int32) ``` I don't know why it works, but it does! ``` import numpy as onp from jax import numpy as np class MyMeta(type): def __call__(cls, *args, **kwds): return np.array(*args, dtype=onp.int32) class MyInt32(onp.int32, metaclass=MyMeta): pass ``` seems pretty promising. ``` In [31]: MyInt32(77) Out[31]: DeviceArray(77, dtype=int32) ``` This looks like good luck rather than an intentional feature, so I would be hesitant to count on long term :) Either way, if we use it definitely use `__new__` rather than writing a meta class. This appears not to be an accident: https://github.com/numpy/numpy/blob/bf9614b3253e9ef4dbbcef1f48751955b8f2a598/numpy/core/src/multiarray/scalarapi.c#L466 To quote: ``` /* * Otherwise --- type is a sub-type of an array scalar * not corresponding to a registered data-type object. */ ``` The numpy tests also pointed me to what appears to be a slightly cleaner solution: https://github.com/numpy/numpy/blob/bf9614b3253e9ef4dbbcef1f48751955b8f2a598/numpy/core/tests/test_dtype.py#L1057 ``` import numpy as onp from jax import numpy as np class MyInt32(object): dtype = onp.dtype(onp.int32) def __new__(cls, x): return np.array(x, dtype=onp.int32) ``` i.e., it appears to be a tested property of `np.dtype` that it checks for a `dtype` attribute. > i.e., it appears to be a tested property of `np.dtype` that it checks for a `dtype` attribute. Yes, this looks like a clean solution. It's tested, so we can probably rely on it, though of course it appears to be undocumented!
2019-12-10T15:26:50
google/jax
1,872
google__jax-1872
[ "1851" ]
a6da7e8bf90b4d1be5acce8bb9fe89e11035941c
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -535,18 +535,6 @@ def dot(lhs, rhs, precision=None): Returns: An array containing the product. """ - # TODO(b/134526360): XLA doesn't support integer dots, so we emit a sum of - # products instead. - if dtypes.issubdtype(lhs.dtype, onp.integer): - lhs_shape = onp.shape(lhs) - lhs_ndim = len(lhs_shape) - rhs_ndim = onp.ndim(rhs) - if rhs_ndim > 1: - lhs = broadcast_in_dim(lhs, lhs_shape + (1,), tuple(range(len(lhs_shape)))) - if lhs_ndim > 1: - rhs = broadcast(rhs, (1,)) - return reduce(mul(lhs, rhs), _zero(lhs), add, (len(lhs_shape) - 1,)) - if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]: return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())), precision=precision) @@ -576,9 +564,9 @@ def dot_general(lhs, rhs, dimension_numbers, precision=None): contract_dims, batch_dims = dimension_numbers contract_dims = tuple(map(tuple, contract_dims)) batch_dims = tuple(map(tuple, batch_dims)) - if dtypes.issubdtype(lhs.dtype, onp.integer): - # TODO(b/134526360): XLA doesn't support integer dots, so we emit a sum of - # products instead. + if not dtypes.issubdtype(lhs.dtype, onp.inexact): + # TODO(b/134526360): XLA doesn't support bool or integer dots, so we emit a + # sum of products instead. lhs_contract_dims, rhs_contract_dims = contract_dims lhs_batch_dims, rhs_batch_dims = batch_dims lhs_noncontract_dims = tuple(sorted( @@ -589,16 +577,19 @@ def dot_general(lhs, rhs, dimension_numbers, precision=None): lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims) rhs = transpose(rhs, rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims) - new_lhs_shape = onp.insert( - onp.shape(lhs), len(lhs_batch_dims) + len(lhs_noncontract_dims), - (1,) * len(rhs_noncontract_dims)) - new_rhs_shape = onp.insert(onp.shape(rhs), len(lhs_batch_dims), + new_lhs_shape = onp.insert(onp.array(onp.shape(lhs), dtype=onp.int64), + len(lhs_batch_dims) + len(lhs_noncontract_dims), + (1,) * len(rhs_noncontract_dims)) + new_rhs_shape = onp.insert(onp.array(onp.shape(rhs), dtype=onp.int64), + len(lhs_batch_dims), (1,) * len(lhs_noncontract_dims)) lhs = reshape(lhs, new_lhs_shape) rhs = reshape(rhs, new_rhs_shape) out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) + len(rhs_noncontract_dims)) - return reduce(mul(lhs, rhs), _zero(lhs), add, + op_product = bitwise_and if lhs.dtype == onp.bool_ else mul + op_sum = bitwise_or if lhs.dtype == onp.bool_ else add + return reduce(op_product(lhs, rhs), _zero(lhs), op_sum, tuple(range(out_ndim, out_ndim + len(lhs_contract_dims)))) return dot_general_p.bind(lhs, rhs, @@ -1526,7 +1517,7 @@ def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs): if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes): msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.' typename = str(onp.dtype(aval.dtype).name) - accepted_typenames = (str(onp.dtype(t).name) for t in accepted_dtypes) + accepted_typenames = (t.__name__ for t in accepted_dtypes) raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames))) return result_dtype(aval.dtype) @@ -1548,7 +1539,7 @@ def binop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs): msg = ('{} does not accept dtype {} at position {}. ' 'Accepted dtypes at position {} are subtypes of {}.') typename = str(onp.dtype(aval_dtype).name) - typenames = ', '.join(str(onp.dtype(t).name) for t in types) + typenames = ', '.join(t.__name__ for t in types) raise TypeError(msg.format(name, typename, i, i, typenames)) _check_same_dtypes(name, False, *aval_dtypes) return result_dtype(*avals) @@ -1611,7 +1602,7 @@ def _brcast_to(x, shape): return broadcast(x, shape) -_float = {onp.floating, dtypes.bfloat16} +_float = {onp.floating} _complex = {onp.complexfloating} _complex_elem_types = {onp.float32, onp.float64} _int = {onp.integer} @@ -3384,6 +3375,12 @@ def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes, batching.primitive_batchers[reduce_p] = _reduce_batch_rule +def _reduce_number_dtype_rule(name, operand, *args, **kw): + if not dtypes.issubdtype(operand.dtype, onp.number): + raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes " + "of number.".format(name, onp.dtype(operand.dtype).name)) + return dtypes.canonicalize_dtype(operand.dtype) + def _reduce_sum_shape_rule(operand, axes, input_shape): assert operand.shape == input_shape, ('{} != {}' .format(operand.shape, input_shape)) @@ -3402,8 +3399,9 @@ def _reduce_sum_transpose_rule(cotangent, input_shape, axes): assert result.shape == input_shape return [result] -reduce_sum_p = standard_primitive(_reduce_sum_shape_rule, _input_dtype, - 'reduce_sum', _reduce_sum_translation_rule) +reduce_sum_p = standard_primitive( + _reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'), + 'reduce_sum', _reduce_sum_translation_rule) ad.deflinear(reduce_sum_p, _reduce_sum_transpose_rule) batching.defreducer(reduce_sum_p) _masking_defreducer(reduce_sum_p, @@ -3450,8 +3448,9 @@ def _reduce_prod_jvp_rule(tangent, operand, axes): # Multiply partial products with the tangents and sum. return _reduce_sum(mul(tangent, mul(left_products, right_products)), (0,)) -reduce_prod_p = standard_primitive(_reduce_prod_shape_rule, _input_dtype, - 'reduce_prod', _reduce_prod_translation_rule) +reduce_prod_p = standard_primitive( + _reduce_prod_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'), + 'reduce_prod', _reduce_prod_translation_rule) ad.defjvp(reduce_prod_p, _reduce_prod_jvp_rule) batching.defreducer(reduce_prod_p) diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -388,6 +388,12 @@ def _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False): fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2)) return _wraps(numpy_fn)(fn) +def _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn): + def fn(x1, x2): + x1, x2 = _promote_args(numpy_fn.__name__, x1, x2) + return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2) + return _wraps(numpy_fn)(fn) + absolute = abs = _one_to_one_unop(onp.absolute, lax.abs) fabs = _one_to_one_unop(onp.fabs, lax.abs, True) bitwise_not = _one_to_one_unop(onp.bitwise_not, lax.bitwise_not) @@ -413,14 +419,14 @@ def _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False): sqrt = _one_to_one_unop(onp.sqrt, lax.sqrt, True) -add = _one_to_one_binop(onp.add, lax.add) +add = _maybe_bool_binop(onp.add, lax.add, lax.bitwise_or) bitwise_and = _one_to_one_binop(onp.bitwise_and, lax.bitwise_and) bitwise_or = _one_to_one_binop(onp.bitwise_or, lax.bitwise_or) bitwise_xor = _one_to_one_binop(onp.bitwise_xor, lax.bitwise_xor) right_shift = _one_to_one_binop(onp.right_shift, lax.shift_right_arithmetic) left_shift = _one_to_one_binop(onp.left_shift, lax.shift_left) equal = _one_to_one_binop(onp.equal, lax.eq) -multiply = _one_to_one_binop(onp.multiply, lax.mul) +multiply = _maybe_bool_binop(onp.multiply, lax.mul, lax.bitwise_and) not_equal = _one_to_one_binop(onp.not_equal, lax.ne) subtract = _one_to_one_binop(onp.subtract, lax.sub) arctan2 = _one_to_one_binop(onp.arctan2, lax.atan2, True) @@ -2244,7 +2250,9 @@ def einsum_path(subscripts, *operands, **kwargs): @partial(jit, static_argnums=(1, 2)) def _einsum(operands, contractions, precision): operands = list(_promote_dtypes(*operands)) - sum = lambda x, axes: lax.reduce(x, onp.array(0, x.dtype), lax.add, axes) + def sum(x, axes): + return lax.reduce(x, onp.array(0, x.dtype), + lax.add if x.dtype != bool_ else lax.bitwise_or, axes) def sum_uniques(operand, names, uniques): if uniques:
diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py --- a/tests/lax_numpy_einsum_test.py +++ b/tests/lax_numpy_einsum_test.py @@ -225,7 +225,8 @@ def test_tf_unsupported_3(self): # these tests are based on https://github.com/dask/dask/pull/3412/files @parameterized.named_parameters( - {"testcase_name": "_{}".format(einstr), "einstr": einstr} + {"testcase_name": "_{}_dtype={}".format(einstr, dtype.__name__), + "einstr": einstr, "dtype": dtype} for einstr in [ 'abc,bad->abcd', 'abcdef,bcdfg->abcdeg', @@ -256,9 +257,10 @@ def test_tf_unsupported_3(self): 'aab,bcc->ac', 'fdf,cdd,ccd,afe->ae', 'fff,fae,bef,def->abd', - ]) - def test_from_dask(self, einstr): - r = rng() + ] + for dtype in [np.float32, np.int32, np.complex64, np.bool_]) + def test_from_dask(self, einstr, dtype): + r = jtu.rand_default() if '->' in einstr: input_str, result_names = einstr.split('->') else: @@ -269,7 +271,7 @@ def test_from_dask(self, einstr): shapes = defaultdict(lambda: next(dims)) input_shapes = [tuple(shapes[c] for c in names.replace('...', '01')) for names in input_names] - operands = [r.randn(*shape) for shape in input_shapes] + operands = [r(shape, dtype) for shape in input_shapes] self._check(einstr, *operands) diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -96,7 +96,7 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, JAX_ONE_TO_ONE_OP_RECORDS = [ op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]), - op_record("add", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]), + op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]), op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []), op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]), op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []), @@ -109,23 +109,23 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, onp.float64: 1e-12, onp.complex64: 2e-4, onp.complex128: 1e-12}, check_dtypes=False), op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []), - op_record("greater", 2, number_dtypes, all_shapes, jtu.rand_some_equal, []), - op_record("greater_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, []), - op_record("less", 2, number_dtypes, all_shapes, jtu.rand_some_equal, []), - op_record("less_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, []), + op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []), + op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []), + op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []), + op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []), op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"], inexact=True), op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []), op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []), op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []), op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []), - op_record("maximum", 2, number_dtypes, all_shapes, jtu.rand_some_inf, []), - op_record("minimum", 2, number_dtypes, all_shapes, jtu.rand_some_inf, []), - op_record("multiply", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]), + op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []), + op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []), + op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]), op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]), op_record("nextafter", 2, [f for f in float_dtypes if f != lnp.bfloat16], all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0), - op_record("not_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]), + op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]), op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]), op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []), op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]), diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -634,7 +634,7 @@ def fun_via_grad(lhs, rhs): "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "precision": precision, "rng_factory": rng_factory} for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)] - for dtype in default_dtypes + for dtype in all_dtypes for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH, lax.Precision.HIGHEST] for rng_factory in [jtu.rand_default])) @@ -651,14 +651,16 @@ def testDot(self, lhs_shape, rhs_shape, dtype, precision, rng_factory): "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "rng_factory": rng_factory} for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)] - for dtype in default_dtypes + for dtype in all_dtypes for rng_factory in [jtu.rand_default])) def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng_factory): rng = rng_factory() args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)] tol = { onp.float16: 1e-2, - onp.float64: max(jtu.default_tolerance()[onp.dtype(onp.float64)], 1e-14) + onp.float64: max(jtu.default_tolerance()[onp.dtype(onp.float64)], 1e-14), + onp.complex128: max(jtu.default_tolerance()[onp.dtype(onp.complex128)], + 1e-14) } lax_op = partial(lax.dot, precision=lax.Precision.HIGHEST) self._CheckAgainstNumpy(lax_op, lax_reference.dot, args_maker, tol=tol) @@ -680,7 +682,7 @@ def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng_factory): [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]], [(3, 2), (2, 4), [1], [0]], ] - for dtype in default_dtypes + for dtype in all_dtypes for rng_factory in [jtu.rand_small])) def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype, lhs_contracting, rhs_contracting, rng_factory): @@ -705,7 +707,7 @@ def fun(lhs, rhs): ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))), ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))), ] - for dtype in default_dtypes + for dtype in all_dtypes for rng_factory in [jtu.rand_small])) def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype, dimension_numbers, rng_factory): @@ -729,7 +731,7 @@ def fun(lhs, rhs): ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))), ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))), ] - for dtype in default_dtypes + for dtype in all_dtypes for rng_factory in [jtu.rand_small])) def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype, dimension_numbers, rng_factory):
einsum crash on bool dtype In colab, ```python import jax, jax.numpy as jp x = jp.array(np.ones((3, 3), dtype=bool)) print(jp.einsum("ij->i", x)) ``` results in > RuntimeError: Invalid argument: Expected element type in shape to be arithmetic type for operation add; got PRED.: > This is a bug in JAX's shape-checking rules; please report it! > https://github.com/google/jax/issues Note that in numpy, `einsum` on `bool` dtypes does a logical OR reduction ("einany") rather than summation.
2019-12-16T21:48:13
google/jax
1,880
google__jax-1880
[ "1869" ]
96677d9c6f549b643fd0cdf5664da2f22f45fbaf
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -39,8 +39,8 @@ from jax.lib import cuda_prng from jax import core from jax import abstract_arrays +from jax.numpy.linalg import cholesky from jax.scipy.special import logit -from jax.scipy.linalg import cholesky from jax.interpreters import batching from jax.interpreters import xla
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -409,6 +409,30 @@ def testMultivariateNormal(self, dim, dtype): # eigenvectors follow a standard normal distribution. self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf) + def testMultivariateNormalCovariance(self): + # test code based on https://github.com/google/jax/issues/1869 + N = 100000 + cov = np.array([[ 0.19, 0.00, -0.13, 0.00], + [ 0.00, 0.29, 0.00, -0.23], + [ -0.13, 0.00, 0.39, 0.00], + [ 0.00, -0.23, 0.00, 0.49]]) + mean = np.zeros(4) + + out_onp = onp.random.RandomState(0).multivariate_normal(mean, cov, N) + + key = random.PRNGKey(0) + out_jnp = random.multivariate_normal(key, mean=mean, cov=cov, shape=(N,)) + + var_onp = out_onp.var(axis=0) + var_jnp = out_jnp.var(axis=0) + self.assertAllClose(var_onp, var_jnp, rtol=1e-2, atol=1e-2, + check_dtypes=False) + + var_onp = onp.cov(out_onp, rowvar=False) + var_jnp = onp.cov(out_jnp, rowvar=False) + self.assertAllClose(var_onp, var_jnp, rtol=1e-2, atol=1e-2, + check_dtypes=False) + def testIssue222(self): x = random.randint(random.PRNGKey(10003), (), 0, 0) assert x == 0
multivariate_normal unexpected variances Hello! multivariate_normal does not produce the variances that I would expect. I checked the output of the multivariate_normal function in Numpy, which does provide the behaviour I expected. In the code, I perform a sanity check that the covariance matrix is indeed positive definite (symmetric, positive eigenvalues), get 10.000 samples and calculate the variances. ``` import jax.numpy as np import numpy as onp from jax import random N = 10000 cov_list = [[ 0.19, 0., -0.13, 0.], [ 0., 0.19, 0., -0.13], [-0.13, 0., 0.19, 0.], [ 0., -0.13, 0., 0.19]] cov_np = np.array(cov_list) cov_onp = onp.array(cov_list) mean_np = np.zeros(4) mean_onp = onp.zeros(4) positive_eig = (onp.linalg.eigvals(cov_onp) > 0.).all() symmetric = ((cov_onp - cov_onp.T) == 0.).all() print('Covariance matrix is symmetric:', symmetric, '\nAll eigenvalues are positive:', positive_eig) out_onp = onp.random.multivariate_normal(mean_onp, cov_onp, N) var = out_onp.var(axis=0) print('\nNumpy: Variance of samples \n', var) key = random.PRNGKey(0) out_np = random.multivariate_normal(key, mean=mean_np, cov=cov_np, shape=(N,)) var = out_np.var(axis=0) print('\nJax: Variance of samples \n', var) ``` The resulting output is as follows: ``` Covariance matrix is symmetric: True All eigenvalues are positive: True Numpy: Variance of samples [0.19001905 0.18978024 0.18641562 0.18503367] Jax: Variance of samples [0.28731692 0.28114378 0.09892643 0.10218105] ``` I get this result on two different machines (Ubuntu 16 and 18). **jax version used** jax 0.1.53 jaxlib 0.1.36 Any idea why this happens? Cheers Christian
Thanks for catching this. [There's a missing transpose in `random.multivariate_normal`](https://github.com/google/jax/blob/96677d9c6f549b643fd0cdf5664da2f22f45fbaf/jax/random.py#L523) (I think because I assumed the result would be lower-triangular, like in `numpy.linalg`, but imported the `cholesky` from `scipy.linalg`.) More concerning is we didn't have a good enough test on `multivariate_normal` to catch this.
2019-12-17T21:20:41
google/jax
1,882
google__jax-1882
[ "1858" ]
96677d9c6f549b643fd0cdf5664da2f22f45fbaf
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2656,9 +2656,13 @@ def _gather(arr, treedef, static_idx, dynamic_idx): indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update y = arr - # We avoid generating a gather when indexer.gather_indices.size is empty - # unless indexer.slice_shape also corresponds to an empty array. - if indexer.gather_indices.size or not _prod(indexer.slice_shape): + # Avoid calling gather if the slice shape is empty, both as a fast path and to + # handle cases like zeros(0)[array([], int32)]. + if _prod(indexer.slice_shape) == 0: + return zeros(indexer.slice_shape, dtype=y.dtype) + + # We avoid generating a gather when indexer.gather_indices.size is empty. + if indexer.gather_indices.size: y = lax.gather(y, indexer.gather_indices, indexer.dnums, indexer.gather_slice_shape)
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -223,6 +223,7 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])), IndexSpec(shape=(3,), indexer=onp.array([-1, 1])), IndexSpec(shape=(3,), indexer=onp.array([-2, -1])), + IndexSpec(shape=(0,), indexer=onp.array([], dtype=onp.int32)), ]), ("One2DIntArrayIndex", [IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])), @@ -274,6 +275,7 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 1])), IndexSpec(shape=(3,), indexer=onp.array([-1, 1])), IndexSpec(shape=(3,), indexer=onp.array([-2, -1])), + IndexSpec(shape=(0,), indexer=onp.array([], dtype=onp.int32)), ]), ("One2DIntArrayIndex", [IndexSpec(shape=(3,), indexer=onp.array([[0, 1]])),
Indexing into an empty array fails Indexing a non-empty array with an empty array of indices works fine: ``` import jax.numpy as np values = np.zeros(5, dtype=np.float32) indices = np.array([], dtype=np.int32) values[indices] -> DeviceArray([], dtype=float32) ``` But trying to index an empty array with an empty array of indices causes a runtime error prompting me to file an issue: ``` import jax.numpy as np values = np.zeros(0, dtype=np.float32) indices = np.array([], dtype=np.int32) values[indices] ... RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 1), got 1.: This is a bug in JAX's shape-checking rules; please report it! https://github.com/google/jax/issues ``` The same code works fine in vanilla numpy: ``` import numpy as np values = np.zeros(0, dtype=np.float32) indices = np.array([], dtype=np.int32) values[indices] -> array([], dtype=float32) ``` This is on jax 0.1.49.
2019-12-17T22:12:01
google/jax
1,903
google__jax-1903
[ "1901" ]
178c0d821eac27e78308ffc401e6aaf1a8e2856f
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3232,6 +3232,9 @@ def _scatter_jvp(primals, tangents, update_jaxpr, update_consts, tangent_out = ad_util.zero return val_out, tangent_out + g_operand = ad.instantiate_zeros(operand, g_operand) + g_updates = ad.instantiate_zeros(updates, g_updates) + # If there are overlapping indices in the scatter, it is unspecified which # update "wins". So we use the following perhaps surprising scheme: # a) attach a positive ID to each update in updates, forming (value, id) pairs @@ -3298,8 +3301,6 @@ def _scatter_jvp(primals, tangents, update_jaxpr, update_consts, slice_sizes=slice_sizes) # c) mask off input JVP elements that do not correspond to a primal output. - g_operand = ad.instantiate_zeros(operand, g_operand) - g_updates = ad.instantiate_zeros(updates, g_updates) masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)), g_operand, _zeros(g_operand)) masked_g_updates = select(eq(update_ids, gathered_update_ids),
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2465,6 +2465,16 @@ def testScatterGrad(self, arg_shape, dtype, idxs, update_shape, dnums, y = rng(update_shape, dtype) check_grads(scatter, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.) + def testScatterGradSymbolicZeroUpdate(self): + # https://github.com/google/jax/issues/1901 + def f(x): + n = x.shape[0] + y = onp.arange(n, dtype=x.dtype) + return jax.ops.index_update(x, onp.diag_indices(n), y) + rng = jtu.rand_default() + check_grads(f, (rng((5, 5), onp.float32),), 2, ["fwd", "rev"], 1e-2, 1e-2, + 1.) + def testStopGradient(self): def f(x): return lax.sin(x) * lax.cos(lax.stop_gradient(x))
Error with jax.grad when using jax.ops.index_update. (Appears to be different than issue #1104) Taking the gradient through jax.ops.index_update fails in certain cases. Code to reproduce: ``` def fun(): def _fun(x): n = x.shape[0] y = np.array(list(range(n))) x = jax.ops.index_update(x, np.diag_indices(n), y) return np.sum(x) x = np.ones((5,5)) print(_fun(x)) # Succeeds return jax.grad(_fun)(x) fun() ``` Interestingly, the above function succeeds when using `jax.ops.index_update(x, np.diag_indices(n), y - 0 * np.diag(x))`, so my guess is the lack of dependency on `x` is somehow the problem. Stack trace ``` <ipython-input-2-176922dbb306> in fun() 8 x = np.ones((5,5)) 9 print(_fun(x)) # Succeeds ---> 10 return jax.grad(_fun)(x) /usr/local/lib/python3.6/dist-packages/jax/api.py in grad_f(*args, **kwargs) 345 @wraps(fun, docstr=docstr, argnums=argnums) 346 def grad_f(*args, **kwargs): --> 347 _, g = value_and_grad_f(*args, **kwargs) 348 return g 349 /usr/local/lib/python3.6/dist-packages/jax/api.py in value_and_grad_f(*args, **kwargs) 400 f_partial, dyn_args = _argnums_partial(f, argnums, args) 401 if not has_aux: --> 402 ans, vjp_py = vjp(f_partial, *dyn_args) 403 else: 404 ans, vjp_py, aux = vjp(f_partial, *dyn_args, has_aux=True) /usr/local/lib/python3.6/dist-packages/jax/api.py in vjp(fun, *primals, **kwargs) 1255 if not has_aux: 1256 flat_fun, out_tree = flatten_fun_nokwargs(fun, in_tree) -> 1257 out_primal, out_vjp = ad.vjp(flat_fun, primals_flat) 1258 out_tree = out_tree() 1259 else: /usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in vjp(traceable, primals, has_aux) 105 def vjp(traceable, primals, has_aux=False): 106 if not has_aux: --> 107 out_primals, pvals, jaxpr, consts = linearize(traceable, *primals) 108 else: 109 out_primals, pvals, jaxpr, consts, aux = linearize(traceable, *primals, has_aux=True) /usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in linearize(traceable, *primals, **kwargs) 94 _, in_tree = tree_flatten(((primals, primals), {})) 95 jvpfun_flat, out_tree = flatten_fun(jvpfun, in_tree) ---> 96 jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jvpfun_flat, in_pvals) 97 pval_primals, pval_tangents = tree_unflatten(out_tree(), out_pvals) 98 aval_primals, const_primals = unzip2(pval_primals) /usr/local/lib/python3.6/dist-packages/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, **kwargs) 341 with new_master(JaxprTrace) as master: 342 fun = trace_to_subjaxpr(fun, master, instantiate) --> 343 jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) 344 assert not env 345 del master /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 151 152 del gen --> 153 ans = self.f(*args, **dict(self.params, **kwargs)) 154 del args 155 while stack: <ipython-input-2-176922dbb306> in _fun(x) 3 n = x.shape[0] 4 y = np.array(list(range(n))) ----> 5 x = jax.ops.index_update(x, np.diag_indices(n), y) 6 return np.sum(x) 7 /usr/local/lib/python3.6/dist-packages/jax/ops/scatter.py in index_update(x, idx, y) 247 [1., 1., 1., 6., 6., 6.]], dtype=float32) 248 """ --> 249 return _scatter_update(x, idx, y, lax.scatter) 250 251 def segment_sum(data, segment_ids, num_segments=None): /usr/local/lib/python3.6/dist-packages/jax/ops/scatter.py in _scatter_update(x, idx, y, scatter_op) 55 # is more or less a transpose of the gather equivalent. 56 treedef, static_idx, dynamic_idx = np._split_index_for_jit(idx) ---> 57 return _scatter_impl(x, y, scatter_op, treedef, static_idx, dynamic_idx) 58 59 /usr/local/lib/python3.6/dist-packages/jax/ops/scatter.py in _scatter_impl(x, y, scatter_op, treedef, static_idx, dynamic_idx) 81 scatter_dims_to_operand_dims=indexer.dnums.start_index_map 82 ) ---> 83 return scatter_op(x, indexer.gather_indices, y, dnums) 84 85 /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in scatter(operand, scatter_indices, updates, dimension_numbers) 840 operand, scatter_indices, updates, update_jaxpr=jaxpr, 841 update_consts=consts, dimension_numbers=dimension_numbers, --> 842 updates_shape=updates.shape) 843 844 def index_take(src, idxs, axes): /usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs) 153 154 tracers = map(top_trace.full_raise, args) --> 155 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 156 if self.multiple_results: 157 return map(full_lower, out_tracer) /usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params) 220 "Forward-mode differentiation rule for '{}' not implemented" 221 .format(primitive)) --> 222 primal_out, tangent_out = jvp(primals_in, tangents_in, **params) 223 if primitive.multiple_results: 224 return [JVPTracer(self, x, t) for x, t in zip(primal_out, tangent_out)] /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in _scatter_jvp(primals, tangents, update_jaxpr, update_consts, dimension_numbers, updates_shape) 3286 g_operand, _zeros(g_operand)) 3287 masked_g_updates = select(eq(update_ids, gathered_update_ids), -> 3288 g_updates, _zeros(g_updates)) 3289 3290 # d) perform a scatter-add to compute the tangent output. /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in select(pred, on_true, on_false) 648 operator. 649 """ --> 650 return select_p.bind(pred, on_true, on_false) 651 652 def slice(operand, start_indices, limit_indices, strides=None): /usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs) 150 top_trace = find_top_trace(args) 151 if top_trace is None: --> 152 return self.impl(*args, **kwargs) 153 154 tracers = map(top_trace.full_raise, args) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in apply_primitive(prim, *args, **params) 138 """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" 139 abstract_args = map(abstractify, args) --> 140 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params) 141 return compiled_fun(*args) 142 /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **params) 144 def xla_primitive_callable(prim, *abstract_args, **params): 145 backend = params.get('backend', None) --> 146 aval_out = prim.abstract_eval(*abstract_args, **params) 147 if prim.multiple_results: 148 handlers = tuple(map(aval_to_result_handler, aval_out)) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs) 1486 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) 1487 elif least_specialized is ShapedArray: -> 1488 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) 1489 elif least_specialized is UnshapedArray: 1490 return UnshapedArray(dtype_rule(*args, **kwargs)) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in _select_shape_rule(pred, on_true, on_false) 2573 msg = ("select pred must be scalar or have the same shape as on_true and " 2574 "on_false, got pred shape {} for on_true and on_false of shape {}.") -> 2575 raise TypeError(msg.format(pred.shape, on_true.shape)) 2576 return on_true.shape 2577 TypeError: select pred must be scalar or have the same shape as on_true and on_false, got pred shape (5,) for on_true and on_false of shape (1, 5). ```
2019-12-20T20:31:51
google/jax
1,905
google__jax-1905
[ "1900" ]
d57f16f67d01d8c6ea13223337ba3d23ab10edf7
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -999,11 +999,12 @@ def _maybe_numpy_1_13_isclose_behavior(a, out): # The `jit` on `where` exists to avoid materializing constants in cases like # `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to # materialize the broadcast forms of scalar arguments. -@_wraps(onp.where, update_doc=False) @jit -def where(condition, x=None, y=None): +def _where(condition, x=None, y=None): if x is None or y is None: - raise ValueError("Must use the three-argument form of where().") + raise ValueError("Either both or neither of the x and y arguments should " + "be provided to jax.numpy.where, got {} and {}." + .format(x, y)) if not issubdtype(_dtype(condition), bool_): condition = lax.ne(condition, zeros_like(condition)) x, y = _promote_dtypes(x, y) @@ -1011,6 +1012,21 @@ def where(condition, x=None, y=None): return lax.select(condition, x, y) if onp.size(x) else x +_WHERE_DOC = """\ +At present, JAX does not support JIT-compilation of the single-argument form +of :py:func:`jax.numpy.where` because its output shape is data-dependent. The +three-argument form does not have a data-dependent shape and can be JIT-compiled +successfully. +""" + +@_wraps(onp.where, update_doc=False, lax_description=_WHERE_DOC) +def where(condition, x=None, y=None): + if x is None and y is None: + return nonzero(asarray(condition)) + else: + return _where(condition, x, y) + + @_wraps(onp.select) def select(condlist, choicelist, default=0): if len(condlist) != len(choicelist): @@ -1391,6 +1407,24 @@ def count_nonzero(a, axis=None): dtype=dtypes.canonicalize_dtype(onp.int_)) +_NONZERO_DOC = """\ +At present, JAX does not support JIT-compilation of :py:func:`jax.numpy.nonzero` +because its output shape is data-dependent. +""" + +@_wraps(onp.nonzero, lax_description=_NONZERO_DOC) +def nonzero(a): + # Note: this function cannot be jitted because its output has a dynamic + # shape. + a = atleast_1d(a) + dims = shape(a) + ndims = len(dims) + ds = [lax.broadcasted_iota(int_, dims + (1,), i) for i in range(ndims)] + d = concatenate(ds, axis=-1) + indexes = d[a != 0] + return tuple(indexes[..., i] for i in range(ndims)) + + def _make_nan_reduction(onp_reduction, np_reduction, init_val, nan_if_all_nan): @_wraps(onp_reduction) def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -583,6 +583,18 @@ def testCountNonzero(self, shape, dtype, axis): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}".format( + jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype} + for shape in all_shapes for dtype in all_dtypes)) + def testNonzero(self, shape, dtype): + rng = jtu.rand_some_zero() + onp_fun = lambda x: onp.nonzero(x) + lnp_fun = lambda x: lnp.nonzero(x) + args_maker = lambda: [rng(shape, dtype)] + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "{}_inshape={}_axis={}".format( rec.test_name.capitalize(), @@ -1929,6 +1941,18 @@ def onp_fun(*args): self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, rtol=tol) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}".format( + jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype} + for shape in all_shapes for dtype in all_dtypes)) + def testWhereOneArgument(self, shape, dtype): + rng = jtu.rand_some_zero() + onp_fun = lambda x: onp.where(x) + lnp_fun = lambda x: lnp.where(x) + args_maker = lambda: [rng(shape, dtype)] + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}".format("_".join( jtu.format_shape_dtype_string(shape, dtype) @@ -1937,7 +1961,7 @@ def onp_fun(*args): for shapes in filter(_shapes_are_broadcast_compatible, CombosWithReplacement(all_shapes, 3)) for dtypes in CombosWithReplacement(all_dtypes, 3))) - def testWhere(self, rng_factory, shapes, dtypes): + def testWhereThreeArgument(self, rng_factory, shapes, dtypes): rng = rng_factory() args_maker = self._GetArgsMaker(rng_factory(), shapes, dtypes) def onp_fun(cond, x, y):
How to deal with the re-implementation of numpy functions depending of C functions? As per title, is there / what is the procedure to re-implement numpy functions that depend on C functions, e.g. [`numpy.nonzero`](https://github.com/numpy/numpy/blob/v1.17.0/numpy/core/fromnumeric.py#L1759-L1849), depending on [`PyObject_NonZero`](https://github.com/numpy/numpy/blob/4b1cd4e118e543260cbe9c7fba51a1de37e03618/numpy/core/src/multiarray/item_selection.c#L2215-L2475)?
2019-12-20T21:36:46
google/jax
1,913
google__jax-1913
[ "1907" ]
a14a05d1f241f19482b75423d3d7eb2acea071df
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -524,8 +524,11 @@ def _remat_partial_eval(trace, f, tracers, params): jaxpr_converted = convert_freevars_jaxpr(jaxpr) in_avals = ([raise_to_shaped(t.pval[0]) for t in env] + [raise_to_shaped(pv) for pv in in_pvs]) - out_avals = [raise_to_shaped(pv if pv is not None else core.get_aval(const)) - for pv, const in zip(out_pvs, out_pval_consts1)] + out_avals = [raise_to_shaped(pv if pv is not None + else abstract_unit if var is unitvar + else get_aval(var.val) if type(var) is Literal + else get_aval(const)) + for var, pv, const in zip(jaxpr.outvars, out_pvs, out_pval_consts1)] typed_jaxpr = core.TypedJaxpr(jaxpr_converted, consts, in_avals, out_avals) in_unknowns = [t.pval[0] is not None for t in it.chain(env, tracers)] jaxpr_1, jaxpr_2, out_unknowns = partial_eval_jaxpr(typed_jaxpr, in_unknowns, False) @@ -565,7 +568,7 @@ def _dce_jaxpr(typed_jaxpr, outputs): # TODO(mattjj): better DCE jaxpr = typed_jaxpr.jaxpr outvars, out_avals = jaxpr.outvars, typed_jaxpr.out_avals - out_pairs = [(var, aval) if output else (core.unitvar, core.abstract_unit) + out_pairs = [(var, aval) if output else (unitvar, core.abstract_unit) for var, aval, output in zip(outvars, out_avals, outputs)] new_outvars, new_out_avals = unzip2(out_pairs)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1533,6 +1533,35 @@ def binom_checkpoint(funs): self.assertAllClose(f1(x), f2(x), check_dtypes=False) self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False) + def test_remat_symbolic_zeros(self): + # code from https://github.com/google/jax/issues/1907 + test_remat = True + test_scan = True + + key = jax.random.PRNGKey(0) + key, split = jax.random.split(key) + n = 5 + + def func(D0): + def shift(R, dR, **unused_kwargs): + return R + dR + + def apply_fn(R): + return D0 * R + + Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0, + dtype=np.float32) + + def move(R,i): + F = apply_fn(R) + return shift(R, 0.001 * F), np.array([0.]) + + move = api.remat(move) + R, temp = lax.scan(move, Rinit, np.arange(2)) + return R[0, 0] + + api.grad(func)(5.0) # doesn't crash + def test_trivial_computations(self): x = np.array([1, 2, 3]) y = api.jit(lambda x: x)(x)
remat with scan First, let me express my excitement with the new remat functionality for automatic checkpointing! Really awesome :) As I begin testing this out for my purposes, I am running into an issue that pops up when using lax.scan, though I'm not sure exactly where its coming from. I created a minimal example (see below), which I have tested on Colab. When both of the flags `test_remat` and `test_scan` are set to `True`, an error ultimately saying `TypeError: <class 'jax.ad_util.Zero'> is not a valid Jax type` appears. I should note that on a (much) more complicated version of this I was getting a NotImplemented error that I think is related, but I wasn't able to reproduce that in a minimal code so its possible its something else. I'd be happy to share if that would help. Any ideas on what is going on? Thanks in advance! Here's the minimal example: ``` !pip install --upgrade -q https://storage.googleapis.com/jax-releases/cuda$(echo $CUDA_VERSION | sed -e 's/\.//' -e 's/\..*//')/jaxlib-$(pip search jaxlib | grep -oP '[0-9\.]+' | head -n 1)-cp36-none-linux_x86_64.whl !pip install --upgrade -q jax import numpy as onp import jax.numpy as np from jax import random, grad, lax, remat ######################################## #Error only when both of these are True# ######################################## test_remat = True test_scan = True key = random.PRNGKey(0) key, split = random.split(key) n=5 def func(D0): def shift(R, dR, **unused_kwargs): return R + dR def apply_fn(R): return D0*R Rinit = random.uniform(split, (n,3), minval=0.0, maxval=5.0, dtype=np.float32) def move(R,i): F = apply_fn(R) return shift(R, 0.001*F), np.array([0.]) if(test_remat): move = remat(move) if(test_scan): R, temp = lax.scan(move, Rinit, np.arange(2)) else: R, temp = move(move(Rinit,0)[0],0) return R[0,0] grad(func)(5.0) ```
Thanks for the enthusiasm, and the clear repro! This error pops up when we aren't handling the AD system's symbolic zeros correctly. Usually the fix is trivial once we spot it...
2019-12-23T20:48:51
google/jax
1,916
google__jax-1916
[ "1914", "1914" ]
3bf12f6ecde817f8e8f2c8ec6c6d7bc9e1a2a0b0
diff --git a/jax/interpreters/pxla.py b/jax/interpreters/pxla.py --- a/jax/interpreters/pxla.py +++ b/jax/interpreters/pxla.py @@ -360,7 +360,7 @@ def __getitem__(self, idx): ids = self._ids() device_buffer = self.device_buffers[ids[idx]] aval = ShapedArray(self.aval.shape[1:], self.aval.dtype) - handler = xla.aval_to_result_handler(aval) + handler = xla.aval_to_result_handler(None, aval) return handler(device_buffer) else: return super(ShardedDeviceArray, self).__getitem__(idx) diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -70,14 +70,15 @@ def aval_to_xla_shape(aval): xla_shape_handlers[ShapedArray] = lambda a: xc.Shape.array_shape(a.dtype, a.shape) xla_shape_handlers[ConcreteArray] = lambda a: xc.Shape.array_shape(a.dtype, a.shape) -def aval_to_result_handler(aval): +def aval_to_result_handler(device, aval): try: - return xla_result_handlers[type(aval)](aval) + return xla_result_handlers[type(aval)](device, aval) except KeyError: raise TypeError("No xla_result_handler for type: {}".format(type(aval))) xla_result_handlers = {} -xla_result_handlers[core.AbstractUnit] = lambda _: lambda _: core.unit -def array_result_handler(aval): return partial(DeviceArray, raise_to_shaped(aval)) +xla_result_handlers[core.AbstractUnit] = lambda _, __: lambda _: core.unit +def array_result_handler(device, aval): + return partial(DeviceArray, raise_to_shaped(aval), device) xla_result_handlers[ShapedArray] = array_result_handler xla_result_handlers[ConcreteArray] = array_result_handler @@ -147,11 +148,6 @@ def _make_abstract_python_scalar(typ, _): ### op-by-op execution -def apply_primitive(prim, *args, **params): - """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" - compiled_fun = xla_primitive_callable(prim, *map(arg_spec, args), **params) - return compiled_fun(*args) - def arg_spec(x): aval = abstractify(x) try: @@ -159,33 +155,51 @@ def arg_spec(x): except: return aval, None +def apply_primitive(prim, *args, **params): + """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" + compiled_fun = xla_primitive_callable(prim, *map(arg_spec, args), **params) + return compiled_fun(*args) + @cache() def xla_primitive_callable(prim, *arg_specs, **params): - avals, devices = unzip2(arg_specs) - # TODO(mattjj): make Device hashable instead of handling pairs here - try: - device, = set(d for d in devices if d is not None) or (None,) - except ValueError: - msg = "primitive arguments must be colocated on the same device, got {}" - names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None) - raise ValueError(msg.format(", ".join(names))) - else: - all_devices = it.chain(xb.devices(), xb.devices('cpu')) - device = device and next(d for d in all_devices if (type(d), d.id) == device) + avals, arg_devices = unzip2(arg_specs) + device = _device_from_arg_devices(arg_devices) backend = xb.get_device_backend(device) aval_out = prim.abstract_eval(*avals, **params) - if prim.multiple_results: - handlers = tuple(map(aval_to_result_handler, aval_out)) - handle_result = lambda xs: tuple(h(x) for h, x in zip(handlers, xs.destructure())) + if not prim.multiple_results: + handle_result = aval_to_result_handler(device, aval_out) else: - handle_result = aval_to_result_handler(aval_out) + handlers = tuple(map(partial(aval_to_result_handler, device), aval_out)) + handle_result = lambda xs: tuple(h(x) for h, x in zip(handlers, xs.destructure())) tuple_args = len(avals) > 100 built_c = primitive_computation(prim, backend, tuple_args, *avals, **params) - options = xb.get_compile_options(device_assignment=(device.id,) if device else None) + options = xb.get_compile_options(device_assignment=device and (device.id,)) compiled = built_c.Compile(compile_options=options, backend=backend) return partial(_execute_compiled_primitive, prim, compiled, backend, tuple_args, handle_result) +# TODO(mattjj): make Device instances hashable instead of handling pairs here +def _device_from_arg_devices(devices): + """Given devices of inputs, determine where to perform a computation. + + Args: + devices: list where each element is a either a pair consisting of a device + class and an int id (representing a Device instance) or a None. + Returns: + A Device instance or None. + Raises: + ValueError if input devices are inconsistent. + """ + try: + device, = set(d for d in devices if d is not None) or (None,) + except ValueError: + msg = "primitive arguments must be colocated on the same device, got {}" + names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None) + raise ValueError(msg.format(", ".join(names))) + else: + all_devices = it.chain(xb.devices(), xb.devices('cpu')) + return device and next(d for d in all_devices if (type(d), d.id) == device) + @cache() def primitive_computation(prim, backend, tuple_args, *avals, **params): c = xb.make_computation_builder("primitive_computation_{}".format(prim.name)) @@ -425,7 +439,7 @@ def eqn_collectives(eqn): def _xla_call_impl(fun, *args, **params): device = params['device'] backend = params['backend'] - compiled_fun = _xla_callable(fun, device, backend, *map(abstractify, args)) + compiled_fun = _xla_callable(fun, device, backend, *map(arg_spec, args)) try: return compiled_fun(*args) except FloatingPointError: @@ -434,33 +448,38 @@ def _xla_call_impl(fun, *args, **params): return fun.call_wrapped(*args) # probably won't return @lu.cache -def _xla_callable(fun, device, backend, *abstract_args): +def _xla_callable(fun, device, backend, *arg_specs): + if device is not None and backend is not None: + raise ValueError("can't specify both a device and a backend for jit, " + "got device={} and backend={}".format(device, backend)) + + abstract_args, arg_devices = unzip2(arg_specs) pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args] with core.new_master(pe.StagingJaxprTrace, True) as master: jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals) assert not env # no subtraces here del master, env _map(prefetch, it.chain(consts, jaxpr_literals(jaxpr))) - result_handlers = tuple(map(_pval_to_result_handler, pvals)) + + nreps = jaxpr_replicas(jaxpr) + device = _xla_callable_device(nreps, backend, device, arg_devices) + result_handlers = tuple(map(partial(_pval_to_result_handler, device), pvals)) # Computations that only produce constants and/or only rearrange their inputs, # which are often produced from partial evaluation, don't need compilation, # and don't need to force their (potentially lazy) arguments. if not jaxpr.eqns: - device = _get_device(device, backend) + device = device or xb.get_backend(None).get_default_device_assignment(1)[0] return partial(_execute_trivial, jaxpr, device, consts, result_handlers) log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG logging.log(log_priority, "Compiling {} for args {}.".format(fun.__name__, abstract_args)) - nreps = jaxpr_replicas(jaxpr) if nreps > xb.device_count(backend): msg = ("compiling computation that requires {} replicas, but only {} XLA " "devices are available") raise ValueError(msg.format(nreps, xb.device_count(backend))) - axis_env = AxisEnv(nreps, [], []) - if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)): raise NotImplementedError( "jit of multi-host pmap not implemented (and jit-of-pmap can cause " @@ -471,11 +490,10 @@ def _xla_callable(fun, device, backend, *abstract_args): c = xb.make_computation_builder("jit_{}".format(fun.__name__)) xla_consts = _map(c.Constant, consts) xla_args = _xla_callable_args(c, abstract_args, tuple_args) - out_nodes = jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, (), *xla_args) + out_nodes = jaxpr_subcomp(c, jaxpr, backend, AxisEnv(nreps, [], []), + xla_consts, (), *xla_args) built = c.Build(c.Tuple(*out_nodes)) - if device is not None and nreps > 1: - raise ValueError("can't specify device assignment for jit-of-pmap") options = xb.get_compile_options( num_replicas=nreps, device_assignment=(device.id,) if device else None) compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend)) @@ -485,6 +503,22 @@ def _xla_callable(fun, device, backend, *abstract_args): else: return partial(_execute_replicated, compiled, backend, result_handlers, tuple_args) +def _xla_callable_device(nreps, backend, device, arg_devices): + if nreps > 1: + if device is not None or backend is not None: + raise ValueError("can't specify device or backend for jit-of-pmap, " + "got device={} and backend={}".format(device, backend)) + return None + else: + if device is None and backend is None: + return _device_from_arg_devices(arg_devices) + elif device is not None and backend is None: + return device + elif device is None and backend is not None: + return xb.get_backend(backend).get_default_device_assignment(1)[0] + else: + assert False # Unreachable given the error check in _xla_callable + def _xla_callable_args(c, avals, tuple_args): if not tuple_args: xla_args = [c.ParameterWithShape(aval_to_xla_shape(a)) @@ -499,12 +533,12 @@ def _xla_callable_args(c, avals, tuple_args): assert next(xla_inputs, None) is None return xla_args -def _pval_to_result_handler(pval): +def _pval_to_result_handler(device, pval): pv, const = pval if pv is None: return lambda _: const else: - return aval_to_result_handler(pv) + return aval_to_result_handler(device, pv) def _execute_compiled(compiled, backend, handlers, tuple_args, *args): device, = compiled.local_devices() @@ -631,7 +665,7 @@ class Token(object): pass pytype_aval_mappings[Token] = lambda _: abstract_token core.pytype_aval_mappings[Token] = lambda _: abstract_token xla_shape_handlers[AbstractToken] = lambda _: xc.Shape.token_shape() -xla_result_handlers[AbstractToken] = lambda _: lambda _: token +xla_result_handlers[AbstractToken] = lambda _, __: lambda _: token canonicalize_dtype_handlers[Token] = identity @@ -671,11 +705,9 @@ class DeviceArray(DeviceValue): __slots__ = ["_npy_value", "_device"] __array_priority__ = 100 - def __init__(self, aval, device_buffer): + def __init__(self, aval, device, device_buffer): self.aval = aval self.device_buffer = device_buffer - # TODO(mattjj): make Device hashable - device = device_buffer.device() self._device = device and (type(device), device.id) self._npy_value = None @@ -835,7 +867,7 @@ def _device_put_impl(x, device=None): except TypeError: raise TypeError("Argument '{}' of type {} is not a valid JAX type" .format(x, type(x))) - handler = aval_to_result_handler(a) + handler = aval_to_result_handler(device, a) return handler(device_put(x, device)) device_put_p = core.Primitive('device_put') @@ -887,13 +919,13 @@ def _instantiate_device_constant(const, device=None, backend=None, cutoff=1e6): # dispatch an XLA Computation to build the constant on the device if it's # large, or alternatively build it on the host and transfer it if it's small assert isinstance(const, DeviceConstant) + backend = xb.get_backend(device.platform) if device else xb.get_backend(backend) if const.size > cutoff: c = xb.make_computation_builder("constant_instantiating_computation") xla_const = const.constant_handler(c, const) device_assignment = (device.id,) if device else None opts = xb.get_compile_options(device_assignment=device_assignment) - compiled = c.Build(xla_const).Compile((), opts, backend=xb.get_backend(backend)) + compiled = c.Build(xla_const).Compile((), opts, backend=backend) return compiled.Execute(()) else: - return xc.Buffer.from_pyval(onp.asarray(const), device, - backend=xb.get_backend(backend)) + return xc.Buffer.from_pyval(onp.asarray(const), device, backend=backend)
diff --git a/tests/fft_test.py b/tests/fft_test.py --- a/tests/fft_test.py +++ b/tests/fft_test.py @@ -79,8 +79,7 @@ def testFftn(self, inverse, shape, dtype, axes, rng_factory): self._CompileAndCheck(np_fn, args_maker, check_dtypes=True) # Test gradient for differentiable types. if dtype in inexact_dtypes: - # TODO(skye): can we be more precise? - tol = 1e-1 + tol = 0.15 # TODO(skye): can we be more precise? jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol) jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol) diff --git a/tests/multi_device_test.py b/tests/multi_device_test.py --- a/tests/multi_device_test.py +++ b/tests/multi_device_test.py @@ -62,30 +62,77 @@ def test_computation_follows_data(self): if len(jax.devices()) < 2: raise SkipTest("test requires multiple devices") + # computation follows data explicitly placed on device 1 + x = jax.device_put(1, jax.devices()[1]) + y = x.reshape((1, 1)) + self.assertEqual(y.device_buffer.device(), jax.devices()[1]) + z = y.reshape((1, 1)) + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + + # multiple arguments explicitly placed on device 0 are compatible x = jax.device_put(1, jax.devices()[0]) y = jax.device_put(2, jax.devices()[0]) z = x + y self.assertEqual(z, 3) self.assertEqual(z.device_buffer.device(), jax.devices()[0]) + w = z + x + self.assertEqual(w.device_buffer.device(), jax.devices()[0]) + f = jax.jit(lambda x: x + 1, device=jax.devices()[0]) + z = f(1) + f(2) + self.assertEqual(z, 5) + self.assertEqual(z.device_buffer.device(), jax.devices()[0]) + w = z + z + self.assertEqual(z.device_buffer.device(), jax.devices()[0]) + + # multiple arguments explicitly placed on device 1 are compatible x = jax.device_put(1, jax.devices()[1]) y = jax.device_put(2, jax.devices()[1]) z = x + y self.assertEqual(z, 3) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + w = z + x + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) - x = jax.device_put(1, jax.devices()[1]) - y = 4 - z = x + y + f = jax.jit(lambda x: x + 1, device=jax.devices()[1]) + z = f(1) + f(2) self.assertEqual(z, 5) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + w = z + z + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) - x = jax.device_put(1, jax.devices()[1]) - y = np.ones(3) - z = x + y + # an argument explicitly placed on one device still works with values that + # aren't device-committed (and computaiton follows device-committed values) + z = jax.device_put(1., jax.devices()[1]) + 4 + self.assertEqual(z, 5.) + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + w = z + 3 + self.assertEqual(w, 8.) + self.assertEqual(w.device_buffer.device(), jax.devices()[1]) + + z = jax.device_put(1., jax.devices()[1]) + np.ones(3) self.assertAllClose(z, 1 + onp.ones(3), check_dtypes=False) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + w = z - 3 + self.assertAllClose(w, 1 + onp.ones(3) - 3, check_dtypes=False) + self.assertEqual(w.device_buffer.device(), jax.devices()[1]) + + z = jax.device_put(1., jax.devices()[1]) + np.array([1, 2]) + self.assertAllClose(z, 1 + onp.array([1, 2]), check_dtypes=False) + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + w = z * 2 + self.assertAllClose(w, (1 + onp.array([1, 2])) * 2, check_dtypes=False) + self.assertEqual(w.device_buffer.device(), jax.devices()[1]) + + z = jax.device_put(1., jax.devices()[1]) + jax.device_put(2) + self.assertAllClose(z, 3., check_dtypes=False) + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + + z = jax.device_put(1., jax.devices()[1]) + jax.jit(lambda x: x + 1)(3) + self.assertAllClose(z, 5., check_dtypes=False) + self.assertEqual(z.device_buffer.device(), jax.devices()[1]) + # multiple arguments explicitly placed on distinct devices cause errors x = jax.device_put(1, jax.devices()[0]) y = jax.device_put(2, jax.devices()[1]) self.assertRaisesRegex( @@ -93,9 +140,12 @@ def test_computation_follows_data(self): "primitive arguments must be colocated on the same device", lambda: x + y) - x = jax.device_put(1, jax.devices()[1]) - y = x.reshape((1, 1)) - self.assertEqual(y.device_buffer.device(), jax.devices()[1]) + f = jax.jit(lambda x: x + 1, device=jax.devices()[0]) + g = jax.jit(lambda x: x + 1, device=jax.devices()[1]) + self.assertRaisesRegex( + ValueError, + "primitive arguments must be colocated on the same device", + lambda: f(1) + g(2)) def test_primitive_compilation_cache(self): if len(jax.devices()) < 2: diff --git a/tests/multibackend_test.py b/tests/multibackend_test.py --- a/tests/multibackend_test.py +++ b/tests/multibackend_test.py @@ -123,6 +123,22 @@ def fun(x, y): self.assertEqual(z.device_buffer.platform(), backend) self.assertEqual(w.device_buffer.platform(), backend) + @jtu.skip_on_devices("cpu") # test can only fail with non-cpu backends + def testJitCpu(self): + @partial(api.jit, backend='cpu') + def get_arr(scale): + return scale + np.ones((2, 2)) + + x = get_arr(0.1) + + a = x / x.shape[0] + b = x + np.ones_like(x) + c = x + np.eye(2) + + self.assertEqual(a.device_buffer.device(), api.devices('cpu')[0]) + self.assertEqual(b.device_buffer.device(), api.devices('cpu')[0]) + self.assertEqual(c.device_buffer.device(), api.devices('cpu')[0]) + if __name__ == "__main__": absltest.main()
jit(..., backend='cpu') and the new device placement don't go together After #1884 this code snippet fails: ``` def get_arr(scale): return scale + np.ones((2, 2)) get_arr = jit(get_arr, backend='cpu') x = get_arr(0.1) x / x.shape[0] x + np.ones_like(x) x + np.eye(2) ``` with errors like ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-20-ca9775de72ea> in <module>() ----> 1 x / x.shape[0] 4 frames google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params) 169 msg = "primitive arguments must be colocated on the same device, got {}" 170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None) --> 171 raise ValueError(msg.format(", ".join(names))) 172 else: 173 all_devices = it.chain(xb.devices(), xb.devices('cpu')) ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0) ``` Note that it works file if instead of `jit(..., backend='cpu')` I use `jax.device_get`. Below is the modified `tests/multi_device_test.py` to test for this case. ``` # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from unittest import SkipTest from absl.testing import absltest import numpy as onp from jax.api import jit import jax import jax.numpy as np from jax import lax from jax import test_util as jtu from jax.lib import xla_bridge from jax.interpreters import xla from jax.config import config config.parse_flags_with_absl() prev_xla_flags = None # Run all tests with 8 CPU devices. def setUpModule(): global prev_xla_flags prev_xla_flags = os.getenv("XLA_FLAGS") flags_str = prev_xla_flags or "" # Don't override user-specified device count, or other XLA flags. if "xla_force_host_platform_device_count" not in flags_str: os.environ["XLA_FLAGS"] = (flags_str + " --xla_force_host_platform_device_count=8") # Clear any cached backends so new CPU backend will pick up the env var. xla_bridge.get_backend.cache_clear() # Reset to previous configuration in case other test modules will be run. def tearDownModule(): if prev_xla_flags is None: del os.environ["XLA_FLAGS"] else: os.environ["XLA_FLAGS"] = prev_xla_flags xla_bridge.get_backend.cache_clear() class MultiDeviceTest(jtu.JaxTestCase): def test_computation_follows_data(self): if len(jax.devices()) < 2: raise SkipTest("test requires multiple devices") x = jax.device_put(1, jax.devices()[0]) y = jax.device_put(2, jax.devices()[0]) z = x + y self.assertEqual(z, 3) self.assertEqual(z.device_buffer.device(), jax.devices()[0]) x = jax.device_put(1, jax.devices()[1]) y = jax.device_put(2, jax.devices()[1]) z = x + y self.assertEqual(z, 3) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) x = jax.device_put(1, jax.devices()[1]) y = 4 z = x + y self.assertEqual(z, 5) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) x = jax.device_put(1, jax.devices()[1]) y = np.ones(3) z = x + y self.assertAllClose(z, 1 + onp.ones(3), check_dtypes=False) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) x = jax.device_put(1, jax.devices()[0]) y = jax.device_put(2, jax.devices()[1]) self.assertRaisesRegex( ValueError, "primitive arguments must be colocated on the same device", lambda: x + y) x = jax.device_put(1, jax.devices()[1]) y = x.reshape((1, 1)) self.assertEqual(y.device_buffer.device(), jax.devices()[1]) def test_primitive_compilation_cache(self): if len(jax.devices()) < 2: raise SkipTest("test requires multiple devices") primitive_computation = xla.primitive_computation xla.xla_primitive_callable.cache_clear() # clear op-by-op cache count = [0] def primitive_computation_and_count(*args, **kwargs): count[0] += 1 return primitive_computation(*args, **kwargs) x = jax.device_put(1, jax.devices()[1]) try: xla.primitive_computation = primitive_computation_and_count y = lax.add(x, x) z = lax.add(y, y) finally: xla.primitive_computation = primitive_computation self.assertEqual(count[0], 1) self.assertEqual(y.device_buffer.device(), jax.devices()[1]) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) def test_jit_cpu(self): def get_arr(scale): return scale + np.ones((2, 2)) get_arr = jit(get_arr, backend='cpu') x = get_arr(0.1) x / x.shape[0] x + np.ones_like(x) x + np.eye(2) if __name__ == '__main__': absltest.main() ``` jit(..., backend='cpu') and the new device placement don't go together After #1884 this code snippet fails: ``` def get_arr(scale): return scale + np.ones((2, 2)) get_arr = jit(get_arr, backend='cpu') x = get_arr(0.1) x / x.shape[0] x + np.ones_like(x) x + np.eye(2) ``` with errors like ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-20-ca9775de72ea> in <module>() ----> 1 x / x.shape[0] 4 frames google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params) 169 msg = "primitive arguments must be colocated on the same device, got {}" 170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None) --> 171 raise ValueError(msg.format(", ".join(names))) 172 else: 173 all_devices = it.chain(xb.devices(), xb.devices('cpu')) ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0) ``` Note that it works file if instead of `jit(..., backend='cpu')` I use `jax.device_get`. Below is the modified `tests/multi_device_test.py` to test for this case. ``` # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from unittest import SkipTest from absl.testing import absltest import numpy as onp from jax.api import jit import jax import jax.numpy as np from jax import lax from jax import test_util as jtu from jax.lib import xla_bridge from jax.interpreters import xla from jax.config import config config.parse_flags_with_absl() prev_xla_flags = None # Run all tests with 8 CPU devices. def setUpModule(): global prev_xla_flags prev_xla_flags = os.getenv("XLA_FLAGS") flags_str = prev_xla_flags or "" # Don't override user-specified device count, or other XLA flags. if "xla_force_host_platform_device_count" not in flags_str: os.environ["XLA_FLAGS"] = (flags_str + " --xla_force_host_platform_device_count=8") # Clear any cached backends so new CPU backend will pick up the env var. xla_bridge.get_backend.cache_clear() # Reset to previous configuration in case other test modules will be run. def tearDownModule(): if prev_xla_flags is None: del os.environ["XLA_FLAGS"] else: os.environ["XLA_FLAGS"] = prev_xla_flags xla_bridge.get_backend.cache_clear() class MultiDeviceTest(jtu.JaxTestCase): def test_computation_follows_data(self): if len(jax.devices()) < 2: raise SkipTest("test requires multiple devices") x = jax.device_put(1, jax.devices()[0]) y = jax.device_put(2, jax.devices()[0]) z = x + y self.assertEqual(z, 3) self.assertEqual(z.device_buffer.device(), jax.devices()[0]) x = jax.device_put(1, jax.devices()[1]) y = jax.device_put(2, jax.devices()[1]) z = x + y self.assertEqual(z, 3) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) x = jax.device_put(1, jax.devices()[1]) y = 4 z = x + y self.assertEqual(z, 5) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) x = jax.device_put(1, jax.devices()[1]) y = np.ones(3) z = x + y self.assertAllClose(z, 1 + onp.ones(3), check_dtypes=False) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) x = jax.device_put(1, jax.devices()[0]) y = jax.device_put(2, jax.devices()[1]) self.assertRaisesRegex( ValueError, "primitive arguments must be colocated on the same device", lambda: x + y) x = jax.device_put(1, jax.devices()[1]) y = x.reshape((1, 1)) self.assertEqual(y.device_buffer.device(), jax.devices()[1]) def test_primitive_compilation_cache(self): if len(jax.devices()) < 2: raise SkipTest("test requires multiple devices") primitive_computation = xla.primitive_computation xla.xla_primitive_callable.cache_clear() # clear op-by-op cache count = [0] def primitive_computation_and_count(*args, **kwargs): count[0] += 1 return primitive_computation(*args, **kwargs) x = jax.device_put(1, jax.devices()[1]) try: xla.primitive_computation = primitive_computation_and_count y = lax.add(x, x) z = lax.add(y, y) finally: xla.primitive_computation = primitive_computation self.assertEqual(count[0], 1) self.assertEqual(y.device_buffer.device(), jax.devices()[1]) self.assertEqual(z.device_buffer.device(), jax.devices()[1]) def test_jit_cpu(self): def get_arr(scale): return scale + np.ones((2, 2)) get_arr = jit(get_arr, backend='cpu') x = get_arr(0.1) x / x.shape[0] x + np.ones_like(x) x + np.eye(2) if __name__ == '__main__': absltest.main() ```
IIUC the test only fails on a GPU machine, or at least one where there's a backend other than `'cpu'`. Does that sound right? IIUC the test only fails on a GPU machine, or at least one where there's a backend other than `'cpu'`. Does that sound right?
2019-12-25T03:16:55
google/jax
1,930
google__jax-1930
[ "1640" ]
322ebe7c9b3738fae0ecd4f6eb338af4d5f2f8c1
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -1337,8 +1337,8 @@ def jaxpr_maker(*args, **kwargs): jax_args, in_tree = tree_flatten((args, kwargs)) jaxtree_fun, out_tree = flatten_fun(wrapped, in_tree) in_pvals = map(pv_like, jax_args) - jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jaxtree_fun, in_pvals, - instantiate=True) + jaxpr, out_pvals, consts = pe.trace_to_jaxpr( + jaxtree_fun, in_pvals, instantiate=True, stage_out_calls=True) out_avals = map(raise_to_shaped, unzip2(out_pvals)[0]) in_avals = tuple(raise_to_shaped(in_aval) for in_aval, _ in in_pvals) typed_jaxpr = core.TypedJaxpr(jaxpr, consts, in_avals, out_avals) diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -325,10 +325,10 @@ def partial_val_aval(pv, const): else: raise TypeError(pv) -def trace_to_jaxpr(fun, pvals, **kwargs): +def trace_to_jaxpr(fun, pvals, instantiate=False, stage_out_calls=False): """Traces a function, given abstract inputs, to a jaxpr.""" - instantiate = kwargs.pop('instantiate', False) - with new_master(JaxprTrace) as master: + trace_type = StagingJaxprTrace if stage_out_calls else JaxprTrace + with new_master(trace_type) as master: fun = trace_to_subjaxpr(fun, master, instantiate) jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) assert not env diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -57,7 +57,8 @@ def _initial_style_jaxpr(fun, in_tree, in_avals): in_pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals] fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree) - jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=True) + jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=True, + stage_out_calls=True) out_avals = _map(raise_to_shaped, unzip2(out_pvals)[0]) const_avals = tuple(raise_to_shaped(core.get_aval(c)) for c in consts) typed_jaxpr = core.TypedJaxpr(pe.closure_convert_jaxpr(jaxpr),
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -953,6 +953,14 @@ def test_issue_871(self): def test_partial_eval_lower(self): # this is a simplified model of a bug that arose when we first used @jit in # a jvp rule. it's in this file because we want to use make_jaxpr. + + # NOTE(mattjj): I no longer understand what this was meant to test. My guess + # is it was related to staging out the broadcast into a jaxpr to be + # transposed, but after #1749 that's no longer a problem. After changing + # make_jaxpr (and jit) to stage out sub-calls fully, this test started to + # fail; I left it in as skipped because deleting tests feels wrong. + raise unittest.SkipTest("obsolete test") + @api.jit def f(a, b, c): a = lax.broadcast(a, (2,)) diff --git a/tests/nn_test.py b/tests/nn_test.py --- a/tests/nn_test.py +++ b/tests/nn_test.py @@ -30,6 +30,7 @@ from jax import nn from jax import random import jax +import jax.numpy as np from jax.config import config config.parse_flags_with_absl() @@ -51,6 +52,16 @@ def testEluValue(self): val = nn.elu(1e4) self.assertAllClose(val, 1e4, check_dtypes=False) + @jtu.skip_on_devices("gpu", "tpu") + def testEluMemory(self): + # see https://github.com/google/jax/pull/1640 + jax.make_jaxpr(nn.elu)(np.ones((10 ** 12,))) # don't oom + + @jtu.skip_on_devices("gpu", "tpu") + def testHardTanhMemory(self): + # see https://github.com/google/jax/pull/1640 + jax.make_jaxpr(nn.hard_tanh)(np.ones((10 ** 12,))) # don't oom + InitializerRecord = collections.namedtuple( "InitializerRecord", ["name", "initializer", "shapes"])
Fix large constants in activations This fixes an issue where using the elu, selu, or tanh activation results in a constant as big as the input being created. This PR also switches the use of np.where to lax.select for activation functions to avoid accidental broadcasting of scalars in the future. I added a test that verifies that making the jaxpr does not produce constants that scale in size with the input.
Thanks for this! Looks like there were some test failures in 64bit mode. Turns out lax.select fails because in X64 mode a python float * float32 tensor = float64 tensor. Is that not a violation of numpy dtype promotion rules? I'm hesitant to merge this just because it makes the code uglier and we hope #1668 (or a related PR) will fix the underlying issue. But at the same time this is fixing a real problem! @jekbradbury WDYT? Should we merge, maybe with TODO notes to revert it once we fix the underlying issues? I think I’d rather merge a version that uses `tie_in` to avoid materializing broadcast constants, if we can get that to work (I can look into it), or wait until #1668 if we expect that to land soon. The version in this PR depends on the specific current behavior where lax.full is lazy but broadcasts aren’t, which makes it a little opaque.
2019-12-31T19:03:22
google/jax
1,931
google__jax-1931
[ "1912" ]
82dbf9131105a0b3a22c191930bc42b15d420794
diff --git a/jax/util.py b/jax/util.py --- a/jax/util.py +++ b/jax/util.py @@ -214,6 +214,10 @@ def get_module_functions(module): """ module_fns = set() for key in dir(module): + # Omitting module level __getattr__, __dir__ which was added in Python 3.7 + # https://www.python.org/dev/peps/pep-0562/ + if key in ('__getattr__', '__dir__'): + continue attr = getattr(module, key) if isinstance( attr, (types.BuiltinFunctionType, types.FunctionType, onp.ufunc)):
not compatible with numpy 1.18 and python 3.7 With numpy 1.18 and python 3.7, `import jax` will raise the error ```python >>> import jax Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/fehiepsi/miniconda3/envs/py37/lib/python3.7/site-packages/jax/__init__.py", line 20, in <module> from jax import nn File "/home/fehiepsi/miniconda3/envs/py37/lib/python3.7/site-packages/jax/nn/__init__.py", line 17, in <module> from . import initializers File "/home/fehiepsi/miniconda3/envs/py37/lib/python3.7/site-packages/jax/nn/initializers.py", line 28, in <module> from jax import random File "/home/fehiepsi/miniconda3/envs/py37/lib/python3.7/site-packages/jax/random.py", line 33, in <module> from . import numpy as np File "/home/fehiepsi/miniconda3/envs/py37/lib/python3.7/site-packages/jax/numpy/__init__.py", line 16, in <module> from .lax_numpy import * File "<frozen importlib._bootstrap>", line 1019, in _handle_fromlist File "/home/fehiepsi/miniconda3/envs/py37/lib/python3.7/site-packages/jax/numpy/lax_numpy.py", line 3211, in wrapped raise NotImplementedError(msg.format(fun)) NotImplementedError: Numpy function <function __getattr__ at 0x7f029e9dd560> not yet implemented ```
2019-12-31T20:21:46
google/jax
1,955
google__jax-1955
[ "1950" ]
bb9cd233683565c5127f88bab5a51a504752cae2
diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -79,6 +79,7 @@ def jvp_subtrace_aux(master, primals, tangents): aux_tracers = map(trace.full_raise, aux) out_primals, out_tangents = unzip2((t.primal, t.tangent) for t in ans_tracers) aux_primals, _ = unzip2((t.primal, t.tangent) for t in aux_tracers) + aux_primals = map(core.full_lower, aux_primals) yield (out_primals, out_tangents), aux_primals def linearize(traceable, *primals, **kwargs):
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -34,7 +34,7 @@ import jax import jax.numpy as np from jax import jit, grad, device_put, jacfwd, jacrev, hessian -from jax import api, lax +from jax import api, core, lax from jax.core import Primitive from jax.interpreters import ad from jax.interpreters import xla @@ -495,6 +495,17 @@ def test_grad_and_aux_constant(self): self.assertEqual(g, grad(lambda x: x**3)(4.)) self.assertEqual(aux, [4.**2, 4.]) + def test_grad_and_aux_no_tracers(self): + # see https://github.com/google/jax/issues/1950 + def f(x): + aux = dict(identity=x, p1=x+1) + return x ** 2, aux + + _, aux = jax.grad(f, has_aux=True)(3.) + self.assertIsInstance(aux, dict) + for val in aux.values(): + self.assertNotIsInstance(val, core.Tracer) + def test_jvp_mismatched_arguments(self): self.assertRaisesRegex( TypeError,
jax.grad(has_aux=True) leaks identity tracers ``` def f(x): aux = dict(identity=x, p1=x+1) return x ** 2, aux jax.grad(f, has_aux=True)(3.) (DeviceArray(6., dtype=float32), {'identity': Traced<ConcreteArray(3.0, weak_type=True):JaxprTrace(level=0/0)>, 'p1': DeviceArray(4., dtype=float32)}) ```
It's surprising that one of them is being unboxed properly... A workaround for now is just to use `lax.stop_gradient(x)` when you build the dictionary.
2020-01-07T02:09:27
google/jax
1,956
google__jax-1956
[ "1952" ]
1ca9e9b251ebdea78df168d7a86f4719f49c553b
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -135,20 +135,31 @@ def _nan_like(c, operand): nan = c.Constant(onp.array(onp.nan, dtype=dtype)) return c.Broadcast(nan, shape.dimensions()) -def cholesky_cpu_translation_rule(c, operand): +# TODO(phawkins): remove supports_batching argument after the minimum jaxlib +# version is 0.1.38. +def _cholesky_cpu_gpu_translation_rule(potrf_impl, potrf_supports_batching, c, + operand): shape = c.GetShape(operand) + batch_dims = shape.dimensions()[:-2] dtype = shape.element_type().type - if len(shape.dimensions()) == 2 and onp.dtype(dtype) in _cpu_lapack_types: - result, info = lapack.potrf(c, operand, lower=True) - return c.Select(c.Eq(info, c.ConstantS32Scalar(0)), result, - _nan_like(c, result)) + if len(batch_dims) == 0 or potrf_supports_batching: + result, info = potrf_impl(c, operand, lower=True) + ok = c.Eq(info, c.ConstantS32Scalar(0)) + return _broadcasting_select(c, + c.Reshape(ok, None, batch_dims + (1, 1)), result, + _nan_like(c, result)) else: - # Fall back to the HLO implementation for batched Cholesky decomposition or - # unsupported types. - # TODO(phawkins): support LAPACK primitives in batched mode. + # Fall back to the HLO implementation for batched Cholesky decomposition. return c.Cholesky(operand) -xla.backend_specific_translations['cpu'][cholesky_p] = cholesky_cpu_translation_rule +xla.backend_specific_translations['cpu'][cholesky_p] = partial( + _cholesky_cpu_gpu_translation_rule, lapack.potrf, + not hasattr(lapack, "jax_potrf")) + +# TODO(phawkins): remove after the minimum jaxlib version is 0.1.38. +if hasattr(cusolver, "potrf"): + xla.backend_specific_translations['gpu'][cholesky_p] = partial( + _cholesky_cpu_gpu_translation_rule, cusolver.potrf, True) # Asymmetric eigendecomposition diff --git a/jaxlib/cusolver.py b/jaxlib/cusolver.py --- a/jaxlib/cusolver.py +++ b/jaxlib/cusolver.py @@ -99,6 +99,39 @@ def trsm(c, a, b, left_side=False, lower=False, trans_a=False, conj_a=False, return c.GetTupleElement(out, 0) +def potrf(c, a, lower): + """Cholesky decomposition.""" + a_shape = c.GetShape(a) + dtype = a_shape.element_type() + dims = a_shape.dimensions() + m, n = dims[-2:] + assert m == n + batch_dims = tuple(dims[:-2]) + num_bd = len(batch_dims) + batch = _prod(batch_dims) + + lwork, opaque = cusolver_kernels.build_potrf_descriptor( + np.dtype(dtype), lower, batch, n) + kernel = b"cusolver_potrf" + + out = c.CustomCall( + kernel, + operands=(a,), + shape_with_layout=_Shape.tuple_shape(( + _Shape.array_shape( + dtype, batch_dims + (n, n), + (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))), + _Shape.array_shape( + np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))), + _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)), + )), + operand_shapes_with_layout=(_Shape.array_shape( + dtype, batch_dims + (n, n), + (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),), + opaque=opaque) + return c.GetTupleElement(out, 0), c.GetTupleElement(out, 1) + + def getrf(c, a): """LU decomposition.""" a_shape = c.GetShape(a)
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -72,8 +72,9 @@ def args_maker(): a = rng(factor_shape, dtype) return [onp.matmul(a, np.conj(T(a)))] - if np.issubdtype(dtype, np.complexfloating) and ( - len(shape) > 2 or jtu.device_under_test() != "cpu"): + if (np.issubdtype(dtype, np.complexfloating) and + (jtu.device_under_test() == "tpu" or + (jtu.device_under_test() == "cpu" and jax.lib.version < (0, 1, 38)))): self.skipTest("Unimplemented case for complex Cholesky decomposition.") self._CheckAgainstNumpy(onp.linalg.cholesky, np.linalg.cholesky, args_maker,
cholesky not working with vmap on complex arrays I tried to use `cholesky` on complex arrays with `vmap` but I get `Unimplemented` error. ``` A = np.eye(3).astype(np.complex64) A_batched = np.stack([A, A]) jax.vmap(np.linalg.cholesky)(A_batched) RuntimeError: Unimplemented: Complex types are not implemented in Cholesky; got shape c64[2,3,3]: ``` This isn't a problem if I don't use vmap. ``` np.linalg.cholesky(A) DeviceArray([[1.+0.j, 0.+0.j, 0.+0.j], [0.+0.j, 1.+0.j, 0.+0.j], [0.+0.j, 0.+0.j, 1.+0.j]], dtype=complex64) ``` Is this a bug or am I doing anything wrong? This looks really strange.
Thanks for the question! What backend are you using (CPU/GPU/TPU)? (I'm guessing CPU.) It looks like you're hitting [this unimplemented error in XLA](https://github.com/tensorflow/tensorflow/blob/adc3a1b1bec28849f62076e9b4be5c5963e5e5e7/tensorflow/compiler/xla/service/cholesky_expander.cc#L148). I think the reason it's only coming up when you use `vmap` is that [JAX falls back to an XLA-based implementation](https://github.com/google/jax/blob/29db4203fec6b9796637034d48a62ac84fbcf43f/jax/lax_linalg.py#L146-L149), rather than using a LAPACK-based one as in the un-batched case. @hawkinsp we could just write a loop in our [`lapack.pyx` bindings](https://github.com/google/jax/blob/29db4203fec6b9796637034d48a62ac84fbcf43f/jaxlib/lapack.pyx#L772) to handle batch dimensions, right? I think that's how we [handle batch dimensions in other kernels](https://github.com/google/jax/blob/29db4203fec6b9796637034d48a62ac84fbcf43f/jaxlib/lapack.pyx#L308). I'm going to mark this as a "good first issue" because there are plenty of examples in lapack.pyx and lax_linalg.py to pattern-match off of. (Also I un-assigned myself because I'm not working on this right now.) will attempt this!
2020-01-07T03:35:21
google/jax
1,958
google__jax-1958
[ "1919", "1919" ]
0c9aacf1dac3804cad8044d517f009a15cf49654
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -211,7 +211,7 @@ def while_loop(cond_fun, body_fun, init_val): return tree_unflatten(body_tree, outs) def _while_loop_abstract_eval(*args, **kwargs): - return kwargs["body_jaxpr"].out_avals + return _map(raise_to_shaped, kwargs["body_jaxpr"].out_avals) def _while_loop_translation_rule(c, axis_env, *args, **kwargs): backend = kwargs.pop('backend') @@ -363,7 +363,7 @@ def cond(pred, true_operand, true_fun, false_operand, false_fun): return tree_unflatten(true_out_tree, out) def _cond_abstract_eval(*args, **kwargs): - return kwargs["true_jaxpr"].out_avals + return _map(raise_to_shaped, kwargs["true_jaxpr"].out_avals) def _cond_translation_rule(c, axis_env, pred, *args, **kwargs): backend = kwargs.pop("backend", None)
AssertionError: If you see this error, please let us know by opening an issue at... Hi there! Just ran into ``` AssertionError: If you see this error, please let us know by opening an issue at https://github.com/google/jax/issues since we thought this was unreachable! ``` so I thought I better do as i was told! I am trying to implement an ML algorithm, and when jit tracing a function I get the error above. The issue was that i used a good old numpy randint inside the loss function, and when replacing that with a jax.random randint the error went away. AssertionError: If you see this error, please let us know by opening an issue at... Hi there! Just ran into ``` AssertionError: If you see this error, please let us know by opening an issue at https://github.com/google/jax/issues since we thought this was unreachable! ``` so I thought I better do as i was told! I am trying to implement an ML algorithm, and when jit tracing a function I get the error above. The issue was that i used a good old numpy randint inside the loss function, and when replacing that with a jax.random randint the error went away.
Thanks for raising this! However, it's not very actionable for us without a bit more information. Can you share a repro (smaller is better)? Yes! A branch where the error is left is here: https://github.com/jotsif/fm_jax/tree/jit_error, and run `warp_jit_error.py` to reproduce. After playing around a bit I think the error is related to that `neg_item` scalar being produced by a vanilla python function, `npr.randint`, so it catches some corner case for the 0-dimensional ConcreteArray in the jit-logic but I am on very thin ice here. And thanks for an awesome framework btw. Got the basics of the factorisation machine working yesterday 🎉 Thanks for the kind words, and the repro! I'll look into it. By the way, as a workaround if you want to dodge this error, you can put this at the top of your file: ```python from jax.interpreters import partial_eval as pe pe._thread_local_state.remat = True ``` [The error](https://github.com/google/jax/blob/82dbf9131105a0b3a22c191930bc42b15d420794/jax/interpreters/partial_eval.py#L598-L608) is a "voluntary" and [temporary](https://github.com/google/jax/blob/82dbf9131105a0b3a22c191930bc42b15d420794/jax/interpreters/partial_eval.py#L512) one that I inserted to check our understanding that a change we made in #1749 wouldn't cause any extra (but otherwise harmless) FLOPs to be performed. The above lines just disable the "voluntary" error-checking mechanism. I want to look into this example to understand if it's an edge case (which we can just patch) or if I should revert the extra-FLOP-potential part of #1749 that this error is meant to check. Thanks for raising this! However, it's not very actionable for us without a bit more information. Can you share a repro (smaller is better)? Yes! A branch where the error is left is here: https://github.com/jotsif/fm_jax/tree/jit_error, and run `warp_jit_error.py` to reproduce. After playing around a bit I think the error is related to that `neg_item` scalar being produced by a vanilla python function, `npr.randint`, so it catches some corner case for the 0-dimensional ConcreteArray in the jit-logic but I am on very thin ice here. And thanks for an awesome framework btw. Got the basics of the factorisation machine working yesterday 🎉 Thanks for the kind words, and the repro! I'll look into it. By the way, as a workaround if you want to dodge this error, you can put this at the top of your file: ```python from jax.interpreters import partial_eval as pe pe._thread_local_state.remat = True ``` [The error](https://github.com/google/jax/blob/82dbf9131105a0b3a22c191930bc42b15d420794/jax/interpreters/partial_eval.py#L598-L608) is a "voluntary" and [temporary](https://github.com/google/jax/blob/82dbf9131105a0b3a22c191930bc42b15d420794/jax/interpreters/partial_eval.py#L512) one that I inserted to check our understanding that a change we made in #1749 wouldn't cause any extra (but otherwise harmless) FLOPs to be performed. The above lines just disable the "voluntary" error-checking mechanism. I want to look into this example to understand if it's an edge case (which we can just patch) or if I should revert the extra-FLOP-potential part of #1749 that this error is meant to check.
2020-01-07T06:50:35
google/jax
1,970
google__jax-1970
[ "1933" ]
46014da21d49febb713781d033c0cbfcb79dfedb
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -111,13 +111,24 @@ def neg(x): def sign(x): r"""Elementwise sign. + For floating-point inputs, returns :math:`\mathrm{sign}(x) = \begin{cases} -1 & x < 0\\ -0 & x = -0\\ \mathit{NaN} & x = \mathit{NaN}\\ +0 & x = +0\\ 1 & x > 0 - \end{cases}`. + \end{cases}` + + For signed integer inputs, returns + :math:`\mathrm{sign}(x) = \begin{cases} + -1 & x < 0\\ + 0 & x = 0\\ + 1 & x > 0 + \end{cases}` + + For complex inputs, returns the complex phase, i.e. + :math:`\mathrm{sign}(x) = \frac{x}{|x|}`. """ return sign_p.bind(x) @@ -1531,9 +1542,10 @@ def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs): return result_dtype(aval.dtype) -def unop(result_dtype, accepted_dtypes, name): +def unop(result_dtype, accepted_dtypes, name, translation_rule=None): dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name) - prim = standard_primitive(_attrgetter('shape'), dtype_rule, name) + prim = standard_primitive(_attrgetter('shape'), dtype_rule, name, + translation_rule=translation_rule) batching.defvectorized(prim) masking.defvectorized(prim) return prim @@ -1623,7 +1635,17 @@ def _brcast_to(x, shape): neg_p = standard_unop(_num, 'neg') ad.deflinear(neg_p, lambda t: [neg(t)]) -sign_p = standard_unop(_num, 'sign') +def _sign_translation_rule(c, x): + shape = c.GetShape(x) + dtype = shape.numpy_dtype() + if dtypes.issubdtype(dtype, onp.unsignedinteger): + zero = c.Constant(onp.array(0, dtype=dtype)) + dims = c.GetShape(x).dimensions() + return c.Select(c.Eq(x, zero), c.Broadcast(zero, dims), + c.Broadcast(c.Constant(onp.array(1, dtype=dtype)), dims)) + return c.Sign(x) + +sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule) ad.defjvp_zero(sign_p) nextafter_p = standard_binop( diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -415,7 +415,6 @@ def fn(x1, x2): bitwise_not = _one_to_one_unop(onp.bitwise_not, lax.bitwise_not) negative = _one_to_one_unop(onp.negative, lax.neg) positive = _one_to_one_unop(onp.positive, lambda x: x) -sign = _one_to_one_unop(onp.sign, lax.sign) floor = _one_to_one_unop(onp.floor, lax.floor, True) ceil = _one_to_one_unop(onp.ceil, lax.ceil, True) @@ -486,6 +485,16 @@ def op(*args): logical_xor = _logical_op(onp.logical_xor, lax.bitwise_xor) +@_wraps(onp.sign) +def sign(x): + dtype = _dtype(x) + if issubdtype(dtype, complexfloating): + re = lax.real(x) + return lax.complex( + lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0)) + return lax.sign(x) + + @_wraps(onp.true_divide) def true_divide(x1, x2): x1, x2 = _promote_args_inexact("true_divide", x1, x2)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -60,6 +60,7 @@ {lnp.bfloat16, onp.float16, onp.float32, onp.float64})) complex_dtypes = [onp.complex64, onp.complex128] int_dtypes = [onp.int32, onp.int64] +uint_dtypes = [onp.uint32, onp.uint64] unsigned_dtypes = [onp.uint32, onp.uint64] bool_dtypes = [onp.bool_] default_dtypes = float_dtypes + int_dtypes @@ -188,6 +189,10 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []), op_record("floor_divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"]), + # TODO(phawkins): merge this with the preceding entry after the minimum + # Jaxlib version is increased to 0.1.38. + op_record("floor_divide", 2, uint_dtypes, all_shapes, jtu.rand_nonzero, + ["rev"]), op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [], inexact=True), op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [], @@ -230,6 +235,8 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [], tolerance={onp.float16: 1e-2}), op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []), + op_record("sign", 1, number_dtypes + uint_dtypes, all_shapes, + jtu.rand_some_inf_and_nan, []), op_record("sinc", 1, [t for t in number_dtypes if t != lnp.bfloat16], all_shapes, jtu.rand_default, ["rev"], tolerance={onp.complex64: 1e-5}, inexact=True, @@ -297,7 +304,8 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}), op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [], tolerance={onp.float16: 1e-1}), - op_record("__floordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []), + op_record("__floordiv__", 2, default_dtypes, all_shapes, + jtu.rand_nonzero, []), op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [], inexact=True), op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []), @@ -319,7 +327,8 @@ def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes, tolerance={onp.float32: 2e-4, onp.complex64: 1e-3}), op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [], tolerance={onp.float16: 1e-1}), - op_record("__rfloordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []), + op_record("__rfloordiv__", 2, default_dtypes, all_shapes, + jtu.rand_nonzero, []), op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [], inexact=True), # op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []), diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -62,6 +62,7 @@ def num_float_bits(dtype): {onp.complex64, onp.complex128})) inexact_dtypes = float_dtypes + complex_dtypes int_dtypes = list(jtu.supported_dtypes().intersection({onp.int32, onp.int64})) +uint_dtypes = list(jtu.supported_dtypes().intersection({onp.uint32, onp.uint64})) bool_dtypes = [onp.bool_] default_dtypes = float_dtypes + int_dtypes all_dtypes = float_dtypes + complex_dtypes + int_dtypes + bool_dtypes @@ -77,7 +78,7 @@ def op_record(op, nargs, dtypes, rng_factory, tol=None): LAX_OPS = [ op_record("neg", 1, default_dtypes + complex_dtypes, jtu.rand_small), - op_record("sign", 1, default_dtypes, jtu.rand_small), + op_record("sign", 1, default_dtypes + uint_dtypes, jtu.rand_small), op_record("floor", 1, float_dtypes, jtu.rand_small), op_record("ceil", 1, float_dtypes, jtu.rand_small), op_record("round", 1, float_dtypes, jtu.rand_default),
Floor division does not work with unsigned (8/16/32/64 bit) int types ``` from jax import numpy as np np.ones((1,), np.uint32) // 2 ``` Produces: ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params) 179 try: --> 180 return c.Build() 181 except RuntimeError as e: 8 frames /usr/local/lib/python3.6/dist-packages/jax/lib/xla_bridge.py in Build(self, *args, **kwargs) 256 return super(_JaxComputationBuilder, self).Build( --> 257 *args, **kwargs) 258 /usr/local/lib/python3.6/dist-packages/jaxlib/xla_client.py in Build(self, root, backend) 729 else: --> 730 return Computation(self._builder.Build(), backend=backend) 731 RuntimeError: Invalid argument: Expected element type in shape to be signed or complex for sign operation; got U32.: During handling of the above exception, another exception occurred: RuntimeError Traceback (most recent call last) <ipython-input-12-3db71aa0f40b> in <module>() 1 from jax import numpy as np 2 ----> 3 np.ones((1,), np.uint32) // 2 /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in floor_divide(x1, x2) 459 if issubdtype(dtype, integer): 460 quotient = lax.div(x1, x2) --> 461 select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0) 462 # TODO(mattjj): investigate why subtracting a scalar was causing promotion 463 return where(select, quotient - onp.array(1, _dtype(quotient)), quotient) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in sign(x) 117 \end{cases}`. 118 """ --> 119 return sign_p.bind(x) 120 121 def floor(x): /usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs) 150 top_trace = find_top_trace(args) 151 if top_trace is None: --> 152 return self.impl(*args, **kwargs) 153 154 tracers = map(top_trace.full_raise, args) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in apply_primitive(prim, *args, **params) 138 """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" 139 abstract_args = map(abstractify, args) --> 140 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params) 141 return compiled_fun(*args) 142 /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **params) 150 else: 151 handle_result = aval_to_result_handler(aval_out) --> 152 built_c = primitive_computation(prim, *abstract_args, **params) 153 compiled = built_c.Compile(compile_options=xb.get_compile_options(), 154 backend=xb.get_backend(backend)) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params) 183 "This is a bug in JAX's shape-checking rules; please report it!\n" 184 "https://github.com/google/jax/issues\n") --> 185 raise RuntimeError(msg) 186 187 def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args): RuntimeError: Invalid argument: Expected element type in shape to be signed or complex for sign operation; got U32.: This is a bug in JAX's shape-checking rules; please report it! https://github.com/google/jax/issues ```
2020-01-09T15:44:39
google/jax
1,972
google__jax-1972
[ "1920" ]
ab2582585e0000e6a148a180ff419253c02390db
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -4320,7 +4320,8 @@ def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads): msg = "Wrong number of explicit pads for convolution: expected {}, got {}." raise TypeError(msg.format(len(lhs_shape) - 2, len(pads))) - lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads))) + lhs_padded = onp.add(lhs_shape[2:], onp.sum(onp.array(pads).reshape(-1, 2), + axis=1)) out_space = onp.floor_divide( onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1 out_space = onp.maximum(0, out_space)
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -489,6 +489,16 @@ def fun(lhs, rhs): # TODO(mattjj): test conv_general_dilated against numpy + def testConv0DIsDot(self): + rng = jtu.rand_default() + def args_maker(): + return [rng((10, 5), onp.float32), rng((5, 7), onp.float32)] + jnp_fun = partial(lax.conv_general_dilated, window_strides=(), + padding='VALID', dimension_numbers=('NC', 'IO', 'NC')) + self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True) + self._CheckAgainstNumpy(jnp_fun, onp.dot, args_maker) + + @staticmethod def _conv_transpose_via_grad(data, kernel, strides, padding, rhs_dilation=None, dimension_numbers=None):
Nit: lax.conv_general_dilated does not support 0D-convolution AFAIK 0-dimensional convolution should reduce to a fully-connected layer, but fails with the following example and error. While this is not a practically useful scenario, degenerate cases like these could be useful for testing. ``` from jax import lax import jax.numpy as np lax.conv_general_dilated(lhs=np.ones((10, 5)), rhs=np.ones((5, 7)), strides=(), paddign='SAME', dimension_numbers=('NC', 'IO', 'NC')) ``` ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-17-573e7695a148> in <module>() ----> 1 lax.conv_general_dilated(np.ones((10, 5)), np.ones((5, 7)), (), 'SAME', dimension_numbers=('NC', 'IO', 'NC')) 6 frames google3/third_party/py/jax/lax/lax.py in conv_general_dilated(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count, precision) 516 feature_group_count=feature_group_count, 517 lhs_shape=lhs.shape, rhs_shape=rhs.shape, --> 518 precision=_canonicalize_precision(precision)) 519 520 def dot(lhs, rhs, precision=None): google3/third_party/py/jax/core.py in bind(self, *args, **kwargs) 148 top_trace = find_top_trace(args) 149 if top_trace is None: --> 150 return self.impl(*args, **kwargs) 151 152 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in apply_primitive(prim, *args, **params) 150 def apply_primitive(prim, *args, **params): 151 """Impl rule that compiles and runs a single primitive 'prim' using XLA.""" --> 152 compiled_fun = xla_primitive_callable(prim, *map(arg_spec, args), **params) 153 return compiled_fun(*args) 154 google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params) 174 device = device and next(d for d in all_devices if (type(d), d.id) == device) 175 backend = xb.get_device_backend(device) --> 176 aval_out = prim.abstract_eval(*avals, **params) 177 if prim.multiple_results: 178 handlers = tuple(map(aval_to_result_handler, aval_out)) google3/third_party/py/jax/lax/lax.py in standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs) 1500 return ConcreteArray(prim.impl(*[x.val for x in args], **kwargs)) 1501 elif least_specialized is ShapedArray: -> 1502 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) 1503 elif least_specialized is UnshapedArray: 1504 return UnshapedArray(dtype_rule(*args, **kwargs)) google3/third_party/py/jax/lax/lax.py in _conv_general_dilated_shape_rule(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count, **unused_kwargs) 1943 lhs_trans = _dilate_shape(onp.take(lhs.shape, lhs_perm), lhs_dilation) 1944 rhs_trans = _dilate_shape(onp.take(rhs.shape, rhs_perm), rhs_dilation) -> 1945 out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding) 1946 return tuple(onp.take(out_trans, onp.argsort(out_perm))) 1947 google3/third_party/py/jax/lax/lax.py in conv_shape_tuple(lhs_shape, rhs_shape, strides, pads) 4334 raise TypeError(msg.format(len(lhs_shape) - 2, len(pads))) 4335 -> 4336 lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads))) 4337 out_space = onp.floor_divide( 4338 onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1 ValueError: invalid number of arguments ```
2020-01-09T18:16:17
google/jax
1,993
google__jax-1993
[ "1986" ]
a5b6e8abf3a6c4f172b7b8b339d6ef64dd8d73dc
diff --git a/jax/scipy/stats/__init__.py b/jax/scipy/stats/__init__.py --- a/jax/scipy/stats/__init__.py +++ b/jax/scipy/stats/__init__.py @@ -26,3 +26,4 @@ from . import pareto from . import t from . import uniform +from . import logistic diff --git a/jax/scipy/stats/logistic.py b/jax/scipy/stats/logistic.py new file mode 100644 --- /dev/null +++ b/jax/scipy/stats/logistic.py @@ -0,0 +1,48 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import scipy.stats as osp_stats +from jax.scipy.special import expit, logit + +from ... import lax +from ...numpy.lax_numpy import _promote_args_inexact, _wraps + + +@_wraps(osp_stats.logistic.logpdf, update_doc=False) +def logpdf(x): + return lax.neg(x) - 2. * lax.log1p(lax.exp(lax.neg(x))) + +@_wraps(osp_stats.logistic.pdf, update_doc=False) +def pdf(x): + return lax.exp(logpdf(x)) + +@_wraps(osp_stats.logistic.ppf, update_doc=False) +def ppf(x): + return logit(x) + +@_wraps(osp_stats.logistic.sf, update_doc=False) +def sf(x): + return expit(lax.neg(x)) + +@_wraps(osp_stats.logistic.isf, update_doc=False) +def isf(x): + return -logit(x) + +@_wraps(osp_stats.logistic.cdf, update_doc=False) +def cdf(x): + return expit(x)
diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py --- a/tests/scipy_stats_test.py +++ b/tests/scipy_stats_test.py @@ -211,6 +211,58 @@ def args_maker(): tol=1e-6) self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + @genNamedParametersNArgs(1, jtu.rand_default) + def testLogisticCdf(self, rng_factory, shapes, dtypes): + rng = rng_factory() + scipy_fun = osp_stats.logistic.cdf + lax_fun = lsp_stats.logistic.cdf + + def args_maker(): + return list(map(rng, shapes, dtypes)) + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False, + tol=1e-6) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + + @genNamedParametersNArgs(1, jtu.rand_default) + def testLogisticLogpdf(self, rng_factory, shapes, dtypes): + rng = rng_factory() + scipy_fun = osp_stats.logistic.logpdf + lax_fun = lsp_stats.logistic.logpdf + + def args_maker(): + return list(map(rng, shapes, dtypes)) + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False, + tol=1e-6) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + + @genNamedParametersNArgs(1, jtu.rand_default) + def testLogisticPpf(self, rng_factory, shapes, dtypes): + rng = rng_factory() + scipy_fun = osp_stats.logistic.ppf + lax_fun = lsp_stats.logistic.ppf + + def args_maker(): + return list(map(rng, shapes, dtypes)) + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False, + tol=1e-6) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + + @genNamedParametersNArgs(1, jtu.rand_default) + def testLogisticSf(self, rng_factory, shapes, dtypes): + rng = rng_factory() + scipy_fun = osp_stats.logistic.sf + lax_fun = lsp_stats.logistic.sf + + def args_maker(): + return list(map(rng, shapes, dtypes)) + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False, + tol=1e-6) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + # TODO: currently it ignores the argument "shapes" and only tests dim=4 @genNamedParametersNArgs(3, jtu.rand_default) def testMultivariateNormalLogPdf(self, rng_factory, shapes, dtypes):
Could we have support for scipy.stats.logistic? In #324 we got support for `logit` and `expit` from `scipy.special`. This provides all the building blocks to also support `scipy.stats.logistic` (in particular, it’s members `cdf`, `pdf`, `ppf`, `logcdf` and `logpdf`). Having this would enable doing maximum-likelihood estimation of logistic regression using `scipy.optimize`.
2020-01-14T06:00:37
google/jax
2,034
google__jax-2034
[ "2003" ]
17b5fe11d03e35bcebe662fc14d2decaeca81b95
diff --git a/jax/nn/initializers.py b/jax/nn/initializers.py --- a/jax/nn/initializers.py +++ b/jax/nn/initializers.py @@ -32,13 +32,13 @@ def zeros(key, shape, dtype=np.float32): return np.zeros(shape, dtype) def ones(key, shape, dtype=np.float32): return np.ones(shape, dtype) -def uniform(scale=1e-2): - def init(key, shape, dtype=np.float32): +def uniform(scale=1e-2, dtype=np.float32): + def init(key, shape, dtype=dtype): return random.uniform(key, shape, dtype) * scale return init -def normal(stddev=1e-2): - def init(key, shape, dtype=np.float32): +def normal(stddev=1e-2, dtype=np.float32): + def init(key, shape, dtype=dtype): return random.normal(key, shape, dtype) * stddev return init @@ -48,8 +48,8 @@ def _compute_fans(shape, in_axis=-2, out_axis=-1): fan_out = shape[out_axis] * receptive_field_size return fan_in, fan_out -def variance_scaling(scale, mode, distribution, in_axis=-2, out_axis=-1): - def init(key, shape, dtype=np.float32): +def variance_scaling(scale, mode, distribution, in_axis=-2, out_axis=-1, dtype=np.float32): + def init(key, shape, dtype=dtype): fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) if mode == "fan_in": denominator = fan_in elif mode == "fan_out": denominator = fan_out @@ -77,14 +77,14 @@ def init(key, shape, dtype=np.float32): kaiming_uniform = he_uniform = partial(variance_scaling, 2.0, "fan_in", "uniform") kaiming_normal = he_normal = partial(variance_scaling, 2.0, "fan_in", "truncated_normal") -def orthogonal(scale=1.0, column_axis=-1): +def orthogonal(scale=1.0, column_axis=-1, dtype=np.float32): """ Construct an initializer for uniformly distributed orthogonal matrices. If the shape is not square, the matrices will have orthonormal rows or columns depending on which side is smaller. """ - def init(key, shape, dtype=np.float32): + def init(key, shape, dtype=dtype): if len(shape) < 2: raise ValueError("orthogonal initializer requires at least a 2D shape") n_rows, n_cols = onp.prod(shape) // shape[column_axis], shape[column_axis] @@ -99,21 +99,21 @@ def init(key, shape, dtype=np.float32): return init -def delta_orthogonal(scale=1.0, column_axis=-1): +def delta_orthogonal(scale=1.0, column_axis=-1, dtype=np.float32): """ Construct an initializer for delta orthogonal kernels; see arXiv:1806.05393. The shape must be 3D, 4D or 5D. """ - def init(key, shape, dtype=np.float32): + def init(key, shape, dtype=dtype): if len(shape) not in [3, 4, 5]: raise ValueError("Delta orthogonal initializer requires a 3D, 4D or 5D " "shape.") if shape[-1] < shape[-2]: raise ValueError("`fan_in` must be less or equal than `fan_out`. ") - ortho_init = orthogonal(scale=scale, column_axis=column_axis) + ortho_init = orthogonal(scale=scale, column_axis=column_axis, dtype=dtype) ortho_matrix = ortho_init(key, shape[-2:]) - W = np.zeros(shape) + W = np.zeros(shape, dtype=dtype) if len(shape) == 3: k = shape[0] return ops.index_update(W, ops.index[(k-1)//2, ...], ortho_matrix)
diff --git a/tests/nn_test.py b/tests/nn_test.py --- a/tests/nn_test.py +++ b/tests/nn_test.py @@ -74,16 +74,16 @@ def initializer_record(name, initializer, min_dims=2, max_dims=4): return InitializerRecord(name, initializer, shapes) INITIALIZER_RECS = [ - initializer_record("uniform", nn.initializers.uniform(), 1), - initializer_record("normal", nn.initializers.normal(), 1), - initializer_record("he_normal", nn.initializers.he_normal()), - initializer_record("he_uniform", nn.initializers.he_uniform()), - initializer_record("glorot_normal", nn.initializers.glorot_normal()), - initializer_record("glorot_uniform", nn.initializers.glorot_uniform()), - initializer_record("lecun_normal", nn.initializers.lecun_normal()), - initializer_record("lecun_uniform", nn.initializers.lecun_uniform()), - initializer_record("orthogonal", nn.initializers.orthogonal(), 2, 2), - initializer_record("orthogonal", nn.initializers.delta_orthogonal(), 4, 4) + initializer_record("uniform", nn.initializers.uniform, 1), + initializer_record("normal", nn.initializers.normal, 1), + initializer_record("he_normal", nn.initializers.he_normal), + initializer_record("he_uniform", nn.initializers.he_uniform), + initializer_record("glorot_normal", nn.initializers.glorot_normal), + initializer_record("glorot_uniform", nn.initializers.glorot_uniform), + initializer_record("lecun_normal", nn.initializers.lecun_normal), + initializer_record("lecun_uniform", nn.initializers.lecun_uniform), + initializer_record("orthogonal", nn.initializers.orthogonal, 2, 2), + initializer_record("delta_orthogonal", nn.initializers.delta_orthogonal, 4, 4) ] class NNInitializersTest(jtu.JaxTestCase): @@ -93,7 +93,7 @@ class NNInitializersTest(jtu.JaxTestCase): "_{}_{}".format( rec.name, jtu.format_shape_dtype_string(shape, dtype)), - "initializer": rec.initializer, + "initializer": rec.initializer(), "shape": shape, "dtype": dtype} for rec in INITIALIZER_RECS for shape in rec.shapes @@ -101,6 +101,27 @@ class NNInitializersTest(jtu.JaxTestCase): def testInitializer(self, initializer, shape, dtype): rng = random.PRNGKey(0) val = initializer(rng, shape, dtype) + self.assertEqual(shape, np.shape(val)) + self.assertEqual(jax.dtypes.canonicalize_dtype(dtype), np.dtype(val)) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_{}_{}".format( + rec.name, + jtu.format_shape_dtype_string(shape, dtype)), + "initializer_provider": rec.initializer, + "shape": shape, "dtype": dtype} + for rec in INITIALIZER_RECS + for shape in rec.shapes + for dtype in [onp.float32, onp.float64])) + def testInitializerProvider(self, initializer_provider, shape, dtype): + rng = random.PRNGKey(0) + initializer = initializer_provider(dtype=dtype) + val = initializer(rng, shape) + + self.assertEqual(shape, np.shape(val)) + self.assertEqual(jax.dtypes.canonicalize_dtype(dtype), np.dtype(val)) + if __name__ == "__main__": absltest.main()
stax neural network initializers default to 32bit floats even in 64 bit mode I need/want to try out my code with 64 bit wide floats. My code uses neural networks setup with stax, the initializers for which default to 32 bit floats (and there is no useable API to change this in user facing code). Using the thus initialized (`float32`) parameters (and `float64` data batches) in an update `fori_loop` (taking the gradients with `value_and_grad` and updating parameters with any optimizer) results in either one of two faulty behaviors (depending on the exact nature of computation): 1. the gradients come out as `float32`, thus thwarting my intention of computing with `float64` values 2. the gradients come out as `float64`, which results in a type-mismatch exception thrown by `fori_loop` since the initial value argument passed to it was of type `float32`
`stax` is intentionally a somewhat minimal example of a neural network library that is perhaps more intended for expository purposes than anything else. Its goal is to be simple more than it is to be feature complete. There are a number of other neural network libraries built on top of JAX that may be more useful and more feature complete (examples include [flax](https://github.com/google-research/flax/tree/prerelease), [trax](https://github.com/google/trax), [Jaxnet](https://github.com/JuliusKunze/jaxnet), ... and there are more! A thousand flowers seem to be blooming right now, which is great!) I don't know if any of them in particular support 64-bit weights, but it seems like a reasonable thing to want. That said, `stax` is very minimal, only a few hundred lines of code. So if you are mostly happy with stax, I'd actually just suggest forking it into your repository and making whatever changes you like. Or feel free to send a PR allowing more dtype flexibility. Up to you. It might also be possible to get the type behavior you want by passing in a custom initializer. Would you consider a PR that just changes all default `dtypes` in `jax/nn/initializers.py` to `np.float64`? With default jax behavior (i.e., 32 bit mode) those will be cast to `np.float32` anyways so nothing changes there but in 64 bit mode (i.e., when `jax_enable_x64` was explicitly set to `True`) they would then always be `float64`. I don't think that would be a good idea, because our plan is to make x64 mode the default sometime soon. The proposed change would mean that the default is simply 64-bit weights, which seems like a strange default for neural network training. I think the best plan is to make the dtype user-selectable somehow.
2020-01-21T13:47:56
google/jax
2,054
google__jax-2054
[ "2053" ]
bb176d414b97fe9afa84a5924aea728442634303
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1649,6 +1649,7 @@ def _brcast_to(x, shape): _num = _int | _float | _complex _any = _int | _float | _complex | _bool +_bool_or_int = _int | _bool neg_p = standard_unop(_num, 'neg') ad.deflinear(neg_p, lambda t: [neg(t)]) @@ -1814,15 +1815,15 @@ def _pow_jvp_rhs(g, ans, x, y): ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs) _replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x) -not_p = standard_unop(_int | _bool, 'not') +not_p = standard_unop(_bool_or_int, 'not') -and_p = standard_naryop([_any, _any], 'and') +and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and') ad.defjvp_zero(and_p) -or_p = standard_naryop([_any, _any], 'or') +or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or') ad.defjvp_zero(or_p) -xor_p = standard_naryop([_any, _any], 'xor') +xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor') ad.defjvp_zero(xor_p) def _add_transpose(t, x, y):
`&` and `|` operators don't work for `float32` arguments Example: ```python a = jnp.array([0.0, 1.0]) b = a | a ``` results in ` RuntimeError: Invalid argument: Expected pred or integral type in argument to and/or operation; got F32.`
This should be an error (it's an error in classic NumPy too), but it should be caught earlier. Thanks for the report!
2020-01-23T15:59:55
google/jax
2,060
google__jax-2060
[ "2044" ]
393938f38720e1c49553bd971f7ed4bd12239110
diff --git a/build/build.py b/build/build.py --- a/build/build.py +++ b/build/build.py @@ -307,7 +307,7 @@ def main(): # Find a working Bazel. bazel_path = get_bazel_path(args.bazel_path) - check_bazel_version(bazel_path, min_version="0.24.0", max_version=None) + check_bazel_version(bazel_path, min_version="0.26.0", max_version=None) print("Bazel binary path: {}".format(bazel_path)) python_bin_path = get_python_bin_path(args.python_bin_path)
build.py specifies bazel 0.24 but uses repo_env which is new in bazel 0.26 https://github.com/google/jax/blob/master/build/build.py#L310 specifies that the minimum allowed bazel version is 0.24, but https://github.com/google/jax/blob/master/build/build.py#L167-L169 uses `--repo_env` (as opposed to `--action_env`), which was added in bazel 0.26 (in https://github.com/bazelbuild/bazel/commit/d7702b1 ).
@hawkinsp as this appears to stem from https://github.com/google/jax/commit/3e9ce2f . Oops! Sorry about that. The right fix is probably for us to bump the minimum version of bazel (0.24 is old at this point). I'll do that. Unfortunately I think there are other issues preventing us from upgrading bazel, at least when using the full build_jaxlib_wheels.sh stack. We may wanna revert to using --action_env for now, although ideally we fix the issues with new bazels. I may have been mistaken -- I was just able to build at least one jaxlib wheel with bazel 1.1.0. I'll try bumping the minimum version as suggested and see what happens when I try a fresh jaxlib build (I've been debugging a lot of build issues so it's possible I messed something else up along the way).
2020-01-24T00:46:14
google/jax
2,111
google__jax-2111
[ "2110" ]
102ce6f0acbfbc151accb62186c48b9c473271f0
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2308,7 +2308,7 @@ def tensordot(a, b, axes=2, precision=None): if type(axes) is int: if axes > _min(a_ndim, b_ndim): msg = "Number of tensordot axes (axes {}) exceeds input ranks ({} and {})" - raise msg.format(axes, a.shape, b.shape) + raise TypeError(msg.format(axes, a.shape, b.shape)) contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes)) elif type(axes) in (list, tuple) and len(axes) == 2: ax1, ax2 = axes
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -814,6 +814,13 @@ def onp_fun(a, b): tol=tol) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + def testTensordotErrors(self): + a = onp.random.random((3, 2, 2)) + b = onp.random.random((2,)) + self.assertRaisesRegex( + TypeError, "Number of tensordot axes.*exceeds input ranks.*", + lambda: lnp.tensordot(a, b, axes=2)) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}_{}".format( jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
tensordot raises TypeError: exceptions must derive from BaseException Setup: ```python import jax.numpy as jnp import numpy as np shp_a = (3, 2, 2) shp_c = (2,) a = np.random.random(shp_a) c = np.random.random(shp_c) ``` Obviously, `jax.numpy.tensordot(a, c, axis=2)` should not work, but it raises a wrong error (logically, this should be something like dimension mismatch or index error): ```console $ jnp.tensordot(a, c) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-48-e1d9970c866f> in <module> ----> 1 jnp.tensordot(a, c) /usr/lib/python3.8/site-packages/jax/numpy/lax_numpy.py in tensordot(a, b, axes, precision) 2309 if axes > _min(a_ndim, b_ndim): 2310 msg = "Number of tensordot axes (axes {}) exceeds input ranks ({} and {})" -> 2311 raise msg.format(axes, a.shape, b.shape) 2312 contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes)) 2313 elif type(axes) in (list, tuple) and len(axes) == 2: TypeError: exceptions must derive from BaseException ``` while in NumPy ```console $ np.tensordot(a, c) --------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-46-3f592bfd2cd1> in <module> ----> 1 np.tensordot(a, c) <__array_function__ internals> in tensordot(*args, **kwargs) /usr/lib/python3.8/site-packages/numpy/core/numeric.py in tensordot(a, b, axes) 1070 else: 1071 for k in range(na): -> 1072 if as_[axes_a[k]] != bs[axes_b[k]]: 1073 equal = False 1074 break IndexError: tuple index out of range ```
2020-01-29T14:34:40
google/jax
2,112
google__jax-2112
[ "2105" ]
102ce6f0acbfbc151accb62186c48b9c473271f0
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3627,6 +3627,9 @@ def reduce_window(x, window_dimensions, window_strides, padding): def _reduce_window_sum_shape_rule(operand, window_dimensions, window_strides, padding, input_shape): + if not dtypes.issubdtype(operand.dtype, onp.number): + msg = "operand to reduce_window_sum must have a number dtype, got {}" + raise TypeError(msg.format(onp.dtype(operand.dtype).name)) return _common_reduce_window_shape_rule(operand, window_dimensions, window_strides, padding) diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1534,6 +1534,8 @@ def _cumulative_reduction(a, axis, dtype): if squash_nan: a = where(isnan(a), _constant_like(a, init_val), a) + if not dtype and _dtype(a) == bool_: + dtype = int_ if dtype: a = lax.convert_element_type(a, dtype) @@ -1553,7 +1555,6 @@ def _cumulative_reduction(a, axis, dtype): def cumulative_reduction(a, axis=None, dtype=None): # jit doesn't support kwargs as static_args. return _cumulative_reduction(a, axis, dtype) - return cumulative_reduction
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1075,7 +1075,7 @@ def test_single(m, args_maker, repeats, axis): "rng_factory": jtu.rand_default, "lnp_op": getattr(lnp, op), "onp_op": getattr(onp, op)} for op in ["cumsum", "cumprod"] - for dtype in default_dtypes + for dtype in all_dtypes for out_dtype in default_dtypes for shape in all_shapes for axis in [None] + list(range(-len(shape), len(shape)))))
np.cumsum(np.array([True])) raises a runtime error ``` import jax a = jax.np.array([True]) b = jax.np.cumsum(a) ``` ``` --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params) 179 try: --> 180 return c.Build() 181 except RuntimeError as e: 11 frames /usr/local/lib/python3.6/dist-packages/jax/lib/xla_bridge.py in Build(self, *args, **kwargs) 256 return super(_JaxComputationBuilder, self).Build( --> 257 *args, **kwargs) 258 /usr/local/lib/python3.6/dist-packages/jaxlib/xla_client.py in Build(self, root, backend) 729 else: --> 730 return Computation(self._builder.Build(), backend=backend) 731 RuntimeError: Invalid argument: Expected element type in shape to be arithmetic type for operation add; got PRED.: During handling of the above exception, another exception occurred: RuntimeError Traceback (most recent call last) <ipython-input-3-94787de425b8> in <module>() 2 3 a = jax.np.array([True]) ----> 4 b = jax.np.cumsum(a) /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in cumulative_reduction(a, axis, dtype) 1365 def cumulative_reduction(a, axis=None, dtype=None): 1366 # jit doesn't support kwargs as static_args. -> 1367 return _cumulative_reduction(a, axis, dtype) 1368 1369 return cumulative_reduction /usr/local/lib/python3.6/dist-packages/jax/api.py in f_jitted(*args, **kwargs) 148 _check_args(args_flat) 149 flat_fun, out_tree = flatten_fun(f, in_tree) --> 150 out = xla.xla_call(flat_fun, *args_flat, device=device, backend=backend) 151 return tree_unflatten(out_tree(), out) 152 /usr/local/lib/python3.6/dist-packages/jax/core.py in call_bind(primitive, f, *args, **params) 592 if top_trace is None: 593 with new_sublevel(): --> 594 outs = primitive.impl(f, *args, **params) 595 else: 596 tracers = map(top_trace.full_raise, args) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params) 359 device = params['device'] 360 backend = params.get('backend', None) --> 361 compiled_fun = _xla_callable(fun, device, backend, *map(abstractify, args)) 362 try: 363 return compiled_fun(*args) /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in memoized_fun(fun, *args) 207 fun.populate_stores(stores) 208 else: --> 209 ans = call(fun, *args) 210 cache[key] = (ans, fun.stores) 211 return ans /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _xla_callable(fun, device, backend, *abstract_args) 390 xla_consts = _map(c.Constant, consts) 391 xla_args = _xla_callable_args(c, abstract_args, tuple_args) --> 392 out_nodes = jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, (), *xla_args) 393 built = c.Build(c.Tuple(*out_nodes)) 394 /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args) 260 ans = rule(c, *in_nodes, **eqn.params) 261 elif eqn.primitive in translations: --> 262 ans = translations[eqn.primitive](c, *in_nodes, **eqn.params) 263 elif eqn.primitive in reduction_translations: 264 new_params = check_backend_params(eqn.params, backend) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in _reduce_window_sum_translation_rule(c, operand, window_dimensions, window_strides, padding, input_shape) 3533 scalar = ShapedArray((), dtype) 3534 return c.ReduceWindow(operand, c.Constant(onp.array(0, dtype)), -> 3535 xla.primitive_computation(add_p, scalar, scalar), 3536 window_dimensions, window_strides, padding) 3537 /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params) 183 "This is a bug in JAX's shape-checking rules; please report it!\n" 184 "https://github.com/google/jax/issues\n") --> 185 raise RuntimeError(msg) 186 187 def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args): RuntimeError: Invalid argument: Expected element type in shape to be arithmetic type for operation add; got PRED.: This is a bug in JAX's shape-checking rules; please report it! https://github.com/google/jax/issues ```
2020-01-29T14:52:46
google/jax
2,113
google__jax-2113
[ "2104" ]
04befac4f64e712772dfe5e3c248ce6cee7b618d
diff --git a/jax/scipy/linalg.py b/jax/scipy/linalg.py --- a/jax/scipy/linalg.py +++ b/jax/scipy/linalg.py @@ -278,7 +278,7 @@ def _expm(A, upper_triangular=False): R = _solve_P_Q(P, Q, upper_triangular) R = _squaring(R, n_squarings) return R - + @jit def _calc_P_Q(A): A = np.asarray(A) @@ -379,3 +379,25 @@ def _pade13(A): U = np.dot(A,np.dot(A6, b[13]*A6 + b[11]*A4 + b[9]*A2) + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident) V = np.dot(A6, b[12]*A6 + b[10]*A4 + b[8]*A2) + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident return U,V + + +@_wraps(scipy.linalg.block_diag) +@jit +def block_diag(*arrs): + if len(arrs) == 0: + arrs = [np.zeros((1, 0))] + arrs = np._promote_dtypes(*arrs) + bad_shapes = [i for i, a in enumerate(arrs) if np.ndim(a) > 2] + if bad_shapes: + raise ValueError("Arguments to jax.scipy.linalg.block_diag must have at " + "most 2 dimensions, got {} at argument {}." + .format(arrs[bad_shapes[0]], bad_shapes[0])) + arrs = [np.atleast_2d(a) for a in arrs] + acc = arrs[0] + dtype = lax.dtype(acc) + for a in arrs[1:]: + _, c = a.shape + a = lax.pad(a, dtype.type(0), ((0, 0, 0), (acc.shape[-1], 0, 0))) + acc = lax.pad(acc, dtype.type(0), ((0, 0, 0), (0, c, 0))) + acc = lax.concatenate([acc, a], dimension=0) + return acc
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -682,6 +682,23 @@ def f(inp): class ScipyLinalgTest(jtu.JaxTestCase): + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_i={}".format(i), "args": args} + for i, args in enumerate([ + (), + (1,), + (7, -2), + (3, 4, 5), + (onp.ones((3, 4), dtype=np.float_), 5, + onp.random.randn(5, 2).astype(np.float_)), + ]))) + def testBlockDiag(self, args): + args_maker = lambda: args + self._CheckAgainstNumpy(osp.linalg.block_diag, jsp.linalg.block_diag, + args_maker, check_dtypes=True) + self._CompileAndCheck(jsp.linalg.block_diag, args_maker, check_dtypes=True) + + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
Better support for handling block diagonal matrices Dear JAX team, Thanks for the amazing work with JAX! I'm currently doing some work which would really benefit from support for handling block diagonal matrices. In particular, I would like to assemble matrices using something like `scipy.linalg.block_diag` (https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.block_diag.html). It would also be nice to be able to split them again, but that's a bit awkward in numpy too and I think could be done using slicing. I've been able to work around this for now using kronecker products, but it's not very neat. Would this be hard to add? Could give it a go too if it's not too hard and there's a rough plan (maybe scatter would be one way to go?).
https://github.com/google/jax/pull/2106 adds an implementation of `np.block`, which might help with assembling block matrices. How large are your blocks? To me, this looks like a usecase that might be better solved by a `LinearOperator` object that supports (at least) matrix-vector multiplication, e.g., https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperatorBlockDiag Thanks both of you! @hawkinsp I think np.block will do the trick! It doesn't look quite as nice as block_diag because you have to explicitly create the zero parts, but that's not a big deal. Happy to have the issue closed! @shoyer The blocks are currently really small, definitely under 10x10. For larger ones I agree, the linear operator approach should be much more efficient. Are there any plans to implement them in JAX? > Are there any plans to implement them in JAX? Not that I know of, currently
2020-01-29T15:50:53
google/jax
2,136
google__jax-2136
[ "2030" ]
efbdaf66bfa584cc635092919a23b684c7fb2247
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -2027,7 +2027,8 @@ def checkpoint(fun, concrete=False): def fun_remat(*args, **kwargs): args_flat, in_tree = tree_flatten((args, kwargs)) flat_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree) - out_flat = pe.remat_call(flat_fun, *args_flat, concrete=concrete) + out_flat = pe.remat_call(flat_fun, *args_flat, name=flat_fun.__name__, + concrete=concrete) return tree_unflatten(out_tree(), out_flat) return fun_remat remat = checkpoint diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -526,7 +526,7 @@ def _remat_partial_eval(trace, f, tracers, params): # Since we traced with everything marked as unknown, but we need to know which # outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns. jaxpr_converted = convert_freevars_jaxpr(jaxpr) - in_avals = ([raise_to_shaped(t.pval[0]) for t in env] + in_avals = ([raise_to_shaped(partial_val_aval(*t.pval)) for t in env] + [raise_to_shaped(pv) for pv in in_pvs]) out_avals = [raise_to_shaped(pv if pv is not None else abstract_unit if var is unitvar
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1600,6 +1600,46 @@ def move(R,i): api.grad(func)(5.0) # doesn't crash + def test_remat_jit2(self): + @api.jit + def f(x): + y = 2 * x + + @api.remat + def g(): + return y + + return g() + + self.assertAllClose(f(3), 6, check_dtypes=False) + + def test_remat_nontrivial_env(self): + # simplified from https://github.com/google/jax/issues/2030 + + @api.remat + def foo(state, dt=0.5, c=1): + u, u_t = state + u_tt = c**2 * u + u_t = u_t + u_tt * dt + return (u, u_t) + + @partial(api.jit, static_argnums=(1,)) + def _multi_step(state, count, dt, c): + f = lambda s, _: (foo(s, dt, c), _) + return lax.scan(f, state, None, count) + + def multi_step(state, count, dt=1/np.sqrt(2), c=1): + return _multi_step(state, count, dt, c) + + def loss(u0, target, steps, dt=1/np.sqrt(2), c=1): + init = (u0, np.zeros_like(u0)) + (uf, _), _ = multi_step(init, steps, dt, c) + return ((uf - target) ** 2).mean() + + target = np.zeros((128, 128)) + u0 = np.ones_like(target) + loss(u0, target, 10) # doesn't crash + def test_trivial_computations(self): x = np.array([1, 2, 3]) y = api.jit(lambda x: x)(x)
remat inside scan only works when computing gradient I tried using `remat` inside a variation of my TPU wave equation example (for optimizing an initial condition). It works when computing a gradient, but not when doing the forwards calculation of the loss: ```python from functools import partial import jax from jax import lax import jax.numpy as jnp import numpy as np from scipy import ndimage def axis_slice(ndim, index, axis): slices = [slice(None)] * ndim slices[axis] = index return tuple(slices) def slice_along_axis(array, index, axis): return array[axis_slice(array.ndim, index, axis)] def shift(array, offset, axis): index = slice(offset, None) if offset >= 0 else slice(None, offset) sliced = slice_along_axis(array, index, axis) padding = [(0, 0)] * array.ndim padding[axis] = (-min(offset, 0), max(offset, 0)) return jnp.pad(sliced, padding, mode='constant', constant_values=0) def laplacian(array, step=1): left = shift(array, +1, axis=0) right = shift(array, -1, axis=0) up = shift(array, +1, axis=1) down = shift(array, -1, axis=1) convolved = (left + right + up + down - 4 * array) if step != 1: convolved *= (1 / step ** 2) return convolved def scalar_wave_equation(u, c=1, dx=1): return c ** 2 * laplacian(u, dx) @jax.remat def leapfrog_step(state, dt=0.5, c=1): # https://en.wikipedia.org/wiki/Leapfrog_integration u, u_t = state u_tt = scalar_wave_equation(u, c) u_t = u_t + u_tt * dt u = u + u_t * dt return (u, u_t) @partial(jax.jit, static_argnums=(1,)) def _multi_step(state, count, dt, c): f = lambda s, _: (leapfrog_step(s, dt, c), _) return lax.scan(f, state, None, count) def multi_step(state, count, dt=1/np.sqrt(2), c=1): return _multi_step(state, count, dt, c) def loss(u0, target, steps, dt=1/np.sqrt(2), c=1): init = (u0, jnp.zeros_like(u0)) (uf, _), _ = multi_step(init, steps, dt, c) return ((uf - target) ** 2).mean() grad_loss = jax.jit(jax.grad(loss), static_argnums=(2,)) x = np.linspace(0, 1, num=128, endpoint=False) y = np.linspace(0, 1, num=128, endpoint=False) x_mesh, y_mesh = np.meshgrid(x, y, indexing='ij') target = ndimage.gaussian_filter( 1.0 * (((x_mesh - 1/3) ** 2 + (y_mesh - 1/4) ** 2) < 0.1 ** 2), sigma=1) u0 = np.ones_like(target) grad_loss(u0, target, 10) # works jax.value_and_grad(loss)(u0, target, 10) # also works loss(u0, target, 10) # doesn't work ``` Here's the traceback: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-9-81418533a6e4> in <module> ----> 1 loss(u0, target, 10) # doesn't work <ipython-input-8-b038831f9764> in loss(u0, target, steps, dt, c) 54 def loss(u0, target, steps, dt=1/np.sqrt(2), c=1): 55 init = (u0, jnp.zeros_like(u0)) ---> 56 (uf, _), _ = multi_step(init, steps, dt, c) 57 return ((uf - target) ** 2).mean() 58 <ipython-input-8-b038831f9764> in multi_step(state, count, dt, c) 50 51 def multi_step(state, count, dt=1/np.sqrt(2), c=1): ---> 52 return _multi_step(state, count, dt, c) 53 54 def loss(u0, target, steps, dt=1/np.sqrt(2), c=1): ~/dev/jax/jax/api.py in f_jitted(*args, **kwargs) 147 _check_args(args_flat) 148 flat_fun, out_tree = flatten_fun(f, in_tree) --> 149 out = xla.xla_call(flat_fun, *args_flat, device=device, backend=backend) 150 return tree_unflatten(out_tree(), out) 151 ~/dev/jax/jax/core.py in call_bind(primitive, f, *args, **params) 603 if top_trace is None: 604 with new_sublevel(): --> 605 outs = primitive.impl(f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) ~/dev/jax/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params) 441 device = params['device'] 442 backend = params['backend'] --> 443 compiled_fun = _xla_callable(fun, device, backend, *map(arg_spec, args)) 444 try: 445 return compiled_fun(*args) ~/dev/jax/jax/linear_util.py in memoized_fun(fun, *args) 221 fun.populate_stores(stores) 222 else: --> 223 ans = call(fun, *args) 224 cache[key] = (ans, fun.stores) 225 return ans ~/dev/jax/jax/interpreters/xla.py in _xla_callable(fun, device, backend, *arg_specs) 458 pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args] 459 with core.new_master(pe.StagingJaxprTrace, True) as master: --> 460 jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals) 461 assert not env # no subtraces here 462 del master, env ~/dev/jax/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: <ipython-input-8-b038831f9764> in _multi_step(state, count, dt, c) 47 def _multi_step(state, count, dt, c): 48 f = lambda s, _: (leapfrog_step(s, dt, c), _) ---> 49 return lax.scan(f, state, None, count) 50 51 def multi_step(state, count, dt=1/np.sqrt(2), c=1): ~/dev/jax/jax/lax/lax_control_flow.py in scan(f, init, xs, length) 612 forward=True, length=length, jaxpr=jaxpr, 613 num_consts=len(consts), num_carry=len(init_flat), --> 614 linear=(False,) * (len(consts) + len(in_flat))) 615 return tree_unflatten(out_tree, out) 616 ~/dev/jax/jax/lax/lax_control_flow.py in scan_bind(*args, **kwargs) 979 return core.Primitive.bind(scan_p, *args, forward=forward, length=length, 980 jaxpr=jaxpr, num_consts=num_consts, --> 981 num_carry=num_carry, linear=linear) 982 983 scan_p = core.Primitive("scan") ~/dev/jax/jax/core.py in bind(self, *args, **kwargs) 160 161 tracers = map(top_trace.full_raise, args) --> 162 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 163 if self.multiple_results: 164 return map(full_lower, out_tracer) ~/dev/jax/jax/interpreters/partial_eval.py in process_primitive(self, primitive, tracers, params) 96 def process_primitive(self, primitive, tracers, params): 97 if primitive in custom_partial_eval_rules: ---> 98 return custom_partial_eval_rules[primitive](self, *tracers, **params) 99 else: 100 pvs, consts = unzip2(t.pval for t in tracers) ~/dev/jax/jax/lax/lax_control_flow.py in _scan_partial_eval(trace, *tracers, **kwargs) 729 unknowns = const_uk + carry_uk + xs_uk 730 jaxpr_1, jaxpr_2, out_uk = pe.partial_eval_jaxpr( --> 731 jaxpr, unknowns, instantiate=carry_uk + [False] * num_ys) 732 carry_uk_out, ys_uk = out_uk[:num_carry], out_uk[num_carry:] 733 if carry_uk_out == carry_uk: ~/dev/jax/jax/interpreters/partial_eval.py in partial_eval_jaxpr(jaxpr, unknowns, instantiate) 453 pvals = [PartialVal((abstract_unit, unit)) if uk else PartialVal((aval, unit)) 454 for aval, uk in zip(jaxpr.in_avals, unknowns)] --> 455 jaxpr_1, out_pvals, consts_1 = trace_to_jaxpr(lu.wrap_init(fun), pvals, instantiate=True) 456 (out_pvs_2, jaxpr_2, num_res), = cell 457 assert len(jaxpr_2.constvars) == num_res ~/dev/jax/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, instantiate, stage_out_calls) 330 with new_master(trace_type) as master: 331 fun = trace_to_subjaxpr(fun, master, instantiate) --> 332 jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) 333 assert not env 334 del master ~/dev/jax/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: ~/dev/jax/jax/interpreters/partial_eval.py in fun(*vals) 446 pvals = [PartialVal((aval, unit)) if uk else PartialVal((None, val)) 447 for aval, val, uk in zip(jaxpr.in_avals, vals, unknowns)] --> 448 jaxpr_2, out_pvals_2, consts_2 = trace_to_jaxpr(f, pvals, instantiate=instantiate) 449 out_pvs_2, out_consts_2 = unzip2(out_pvals_2) 450 cell.append((out_pvs_2, jaxpr_2, len(consts_2))) ~/dev/jax/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, instantiate, stage_out_calls) 330 with new_master(trace_type) as master: 331 fun = trace_to_subjaxpr(fun, master, instantiate) --> 332 jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) 333 assert not env 334 del master ~/dev/jax/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: ~/dev/jax/jax/core.py in jaxpr_as_fun(typed_jaxpr, *args) 77 @curry 78 def jaxpr_as_fun(typed_jaxpr, *args): ---> 79 return eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args) 80 81 ~/dev/jax/jax/core.py in eval_jaxpr(jaxpr, consts, freevar_vals, *args) 212 in eqn.bound_subjaxprs] 213 subfuns = map(lu.wrap_init, subfuns) --> 214 ans = eqn.primitive.bind(*(subfuns + in_vals), **eqn.params) 215 if eqn.primitive.multiple_results: 216 map(write, eqn.outvars, ans) ~/dev/jax/jax/core.py in call_bind(primitive, f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) --> 608 outs = map(full_lower, top_trace.process_call(primitive, f, tracers, params)) 609 return apply_todos(env_trace_todo(), outs) 610 ~/dev/jax/jax/interpreters/partial_eval.py in process_call(self, call_primitive, f, tracers, params) 119 tracers = map(self.instantiate_const_abstracted, tracers) 120 if call_primitive in call_partial_eval_rules: --> 121 return call_partial_eval_rules[call_primitive](self, f, tracers, params) 122 if call_primitive in map_primitives: 123 return self.process_map(call_primitive, f, tracers, params) ~/dev/jax/jax/interpreters/partial_eval.py in _remat_partial_eval(trace, f, tracers, params) 522 # outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns. 523 jaxpr_converted = convert_freevars_jaxpr(jaxpr) --> 524 in_avals = ([raise_to_shaped(t.pval[0]) for t in env] 525 + [raise_to_shaped(pv) for pv in in_pvs]) 526 out_avals = [raise_to_shaped(pv if pv is not None ~/dev/jax/jax/interpreters/partial_eval.py in <listcomp>(.0) 522 # outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns. 523 jaxpr_converted = convert_freevars_jaxpr(jaxpr) --> 524 in_avals = ([raise_to_shaped(t.pval[0]) for t in env] 525 + [raise_to_shaped(pv) for pv in in_pvs]) 526 out_avals = [raise_to_shaped(pv if pv is not None ~/dev/jax/jax/abstract_arrays.py in raise_to_shaped(aval, weak_type) 232 return abstract_token 233 else: --> 234 raise TypeError(type(aval)) 235 236 core.literalable_types.update(array_types) TypeError: <class 'NoneType'> ```
2020-02-01T06:54:42
google/jax
2,150
google__jax-2150
[ "2130" ]
1022573b26a1996db524229de10fb84dbe6e08b3
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -782,99 +782,99 @@ def _next_kxv(kxv): 0.017050642, -0.0021309345, 0.00085092385, -1.5248239e-07]] def _gamma_grad_one(z, alpha): - # Ref 1: Pathwise Derivatives Beyond the Reparameterization Trick, Martin & Fritz - # Ref 2: Case 4 follows https://github.com/fritzo/notebooks/blob/master/gamma-reparameterized.ipynb - - # TODO: use lax.cond instead of lax.while_loop when its batching rule is available - # See https://github.com/google/jax/issues/490 - def _case1(zagf): - z, alpha, _, flag = zagf - - # dz = - dCDF(z; a) / pdf(z; a) - # pdf = z^(a-1) * e^(-z) / Gamma(a) - # CDF(z; a) = IncompleteGamma(a, z) / Gamma(a) - # dCDF(z; a) = (dIncompleteGamma - IncompleteGamma * Digamma(a)) / Gamma(a) - # =: unnormalized_dCDF / Gamma(a) - # IncompleteGamma ~ z^a [ 1/a - z/(a+1) + z^2/2!(a+2) - z^3/3!(a+3) + z^4/4!(a+4) - z^5/5!(a+5) ] - # =: z^a * term1 - # dIncompleteGamma ~ z^a * log(z) * term1 - z^a [1/a^2 - z/(a+1)^2 + z^2/2!(a+2)^2 - # - z^3/3!(a+3)^2 + z^4/4!(a+4)^2 - z^5/5!(a+5)^2 ] - # =: z^a * log(z) * term1 - z^a * term2 - # unnormalized_dCDF = z^a { [log(z) - Digamma(a)] * term1 - term2 } - zi = 1.0 - update = zi / alpha - term1 = update - term2 = update / alpha - for i in range(1, 6): - zi = -zi * z / i - update = zi / (alpha + i) - term1 = term1 + update - term2 = term2 + update / (alpha + i) - - unnormalized_cdf_dot = np.power(z, alpha) * ((np.log(z) - lax.digamma(alpha)) * term1 - term2) - unnormalized_pdf = np.power(z, alpha - 1) * np.exp(-z) - grad = -unnormalized_cdf_dot / unnormalized_pdf - - return z, alpha, grad, ~flag - - def _cond2(zagf): - z, alpha, _, flag = zagf - return (~flag) & (alpha > 8.0) & ((z < 0.9 * alpha) | (z > 1.1 * alpha)) - - def _case2(zagf): - z, alpha, _, flag = zagf - - # Formula 58 of [1] - sqrt_8a = np.sqrt(8 * alpha) - z_minus_a = z - alpha - log_z_div_a = np.log(z / alpha) - sign = np.where(z < alpha, 1.0, -1.0) - term1 = 4 * (z + alpha) / (sqrt_8a * z_minus_a * z_minus_a) - term2 = log_z_div_a * (sqrt_8a / z_minus_a + sign * np.power(z_minus_a - alpha * log_z_div_a, -1.5)) - term3 = z * (1.0 + 1.0 / (12 * alpha) + 1.0 / (288 * alpha * alpha)) / sqrt_8a - grad = (term1 + term2) * term3 - - return z, alpha, grad, ~flag - - def _cond3(zagf): - z, alpha, _, flag = zagf - return (~flag) & (alpha > 8.0) & (z >= 0.9 * alpha) & (z <= 1.1 * alpha) - - def _case3(zagf): - z, alpha, _, flag = zagf - - # Formula 59 of [1] - z_div_a = np.divide(z, alpha) - aa = alpha * alpha - term1 = 1440 * alpha + 6 * z_div_a * (53 - 120 * z) - 65 * z_div_a * z_div_a + 3600 * z + 107 - term2 = 1244160 * alpha * aa - term3 = 1 + 24 * alpha + 288 * aa - grad = term1 * term3 / term2 - - return z, alpha, grad, ~flag - - def _case4(zagf): - z, alpha, _, flag = zagf - - # Ref [2] - u = np.log(z / alpha) - v = np.log(alpha) - c = [] - for i in range(8): - c.append(_bivariate_coef[0][i] + u * (_bivariate_coef[1][i] + u * _bivariate_coef[2][i])) - p = c[0] + v * (c[1] + v * (c[2] + v * c[3])) - q = c[4] + v * (c[5] + v * (c[6] + v * c[7])) - grad = np.exp(p / np.maximum(q, 0.01)) - - return z, alpha, grad, ~flag - - _, _, grad, flag = lax.while_loop(lambda zagf: (~zagf[3]) & (zagf[0] < 0.8), - _case1, - (z, alpha, lax._const(alpha, 0.0), False)) - _, _, grad, flag = lax.while_loop(_cond2, _case2, (z, alpha, grad, flag)) - _, _, grad, flag = lax.while_loop(_cond3, _case3, (z, alpha, grad, flag)) - _, _, grad, flag = lax.while_loop(lambda zagf: ~zagf[3], _case4, (z, alpha, grad, flag)) - return grad + # Ref 1: Pathwise Derivatives Beyond the Reparameterization Trick, Martin & Fritz + # Ref 2: Case 4 follows https://github.com/fritzo/notebooks/blob/master/gamma-reparameterized.ipynb + + # TODO: use lax.cond instead of lax.while_loop when its batching rule is available + # See https://github.com/google/jax/issues/490 + def _case1(zagf): + z, alpha, _, flag = zagf + + # dz = - dCDF(z; a) / pdf(z; a) + # pdf = z^(a-1) * e^(-z) / Gamma(a) + # CDF(z; a) = IncompleteGamma(a, z) / Gamma(a) + # dCDF(z; a) = (dIncompleteGamma - IncompleteGamma * Digamma(a)) / Gamma(a) + # =: unnormalized_dCDF / Gamma(a) + # IncompleteGamma ~ z^a [ 1/a - z/(a+1) + z^2/2!(a+2) - z^3/3!(a+3) + z^4/4!(a+4) - z^5/5!(a+5) ] + # =: z^a * term1 + # dIncompleteGamma ~ z^a * log(z) * term1 - z^a [1/a^2 - z/(a+1)^2 + z^2/2!(a+2)^2 + # - z^3/3!(a+3)^2 + z^4/4!(a+4)^2 - z^5/5!(a+5)^2 ] + # =: z^a * log(z) * term1 - z^a * term2 + # unnormalized_dCDF = z^a { [log(z) - Digamma(a)] * term1 - term2 } + zi = 1.0 + update = zi / alpha + term1 = update + term2 = update / alpha + for i in range(1, 6): + zi = -zi * z / i + update = zi / (alpha + i) + term1 = term1 + update + term2 = term2 + update / (alpha + i) + + unnormalized_cdf_dot = np.power(z, alpha) * ((np.log(z) - lax.digamma(alpha)) * term1 - term2) + unnormalized_pdf = np.power(z, alpha - 1) * np.exp(-z) + grad = -unnormalized_cdf_dot / unnormalized_pdf + + return z, alpha, grad, ~flag + + def _cond2(zagf): + z, alpha, _, flag = zagf + return (~flag) & (alpha > 8.0) & ((z < 0.9 * alpha) | (z > 1.1 * alpha)) + + def _case2(zagf): + z, alpha, _, flag = zagf + + # Formula 58 of [1] + sqrt_8a = np.sqrt(8 * alpha) + z_minus_a = z - alpha + log_z_div_a = np.log(z / alpha) + sign = np.where(z < alpha, lax._const(z, 1.0), lax._const(z, -1.0)) + term1 = 4 * (z + alpha) / (sqrt_8a * z_minus_a * z_minus_a) + term2 = log_z_div_a * (sqrt_8a / z_minus_a + sign * np.power(z_minus_a - alpha * log_z_div_a, -1.5)) + term3 = z * (1.0 + 1.0 / (12 * alpha) + 1.0 / (288 * alpha * alpha)) / sqrt_8a + grad = (term1 + term2) * term3 + + return z, alpha, grad, ~flag + + def _cond3(zagf): + z, alpha, _, flag = zagf + return (~flag) & (alpha > 8.0) & (z >= 0.9 * alpha) & (z <= 1.1 * alpha) + + def _case3(zagf): + z, alpha, _, flag = zagf + + # Formula 59 of [1] + z_div_a = np.divide(z, alpha) + aa = alpha * alpha + term1 = 1440 * alpha + 6 * z_div_a * (53 - 120 * z) - 65 * z_div_a * z_div_a + 3600 * z + 107 + term2 = 1244160 * alpha * aa + term3 = 1 + 24 * alpha + 288 * aa + grad = term1 * term3 / term2 + + return z, alpha, grad, ~flag + + def _case4(zagf): + z, alpha, _, flag = zagf + + # Ref [2] + u = np.log(z / alpha) + v = np.log(alpha) + c = [] + for i in range(8): + c.append(_bivariate_coef[0][i] + u * (_bivariate_coef[1][i] + u * _bivariate_coef[2][i])) + p = c[0] + v * (c[1] + v * (c[2] + v * c[3])) + q = c[4] + v * (c[5] + v * (c[6] + v * c[7])) + grad = np.exp(p / np.maximum(q, 0.01)) + + return z, alpha, grad, ~flag + + _, _, grad, flag = lax.while_loop(lambda zagf: (~zagf[3]) & (zagf[0] < 0.8), + _case1, + (z, alpha, lax._const(alpha, 0.0), False)) + _, _, grad, flag = lax.while_loop(_cond2, _case2, (z, alpha, grad, flag)) + _, _, grad, flag = lax.while_loop(_cond3, _case3, (z, alpha, grad, flag)) + _, _, grad, flag = lax.while_loop(lambda zagf: ~zagf[3], _case4, (z, alpha, grad, flag)) + return grad def _gamma_grad(sample, a): samples = np.reshape(sample, -1)
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -329,6 +329,15 @@ def testGammaGrad(self, alpha): self.assertAllClose(actual_grad, expected_grad, check_dtypes=True, rtol=2e-2 if jtu.device_under_test() == "tpu" else 5e-4) + def testGammaGradType(self): + # Regression test for https://github.com/google/jax/issues/2130 + key = random.PRNGKey(0) + a = np.array(1., dtype=np.float32) + b = np.array(3., dtype=np.float32) + f = lambda x, y: random.gamma(key=key, a=x, dtype=np.float32) / y + # Should not crash with a type error. + api.vjp(f, a, b) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name} for dtype in [onp.float32, onp.float64]))
Gamma sample gradients has incorrect dtype in x64 mode The following snippet of code fails in JAX: ``` import jax import jax.numpy as jnp import jax.random as jaxrand jax.config.update('jax_enable_x64', True) key = jaxrand.PRNGKey(0.) a = jnp.array(1., dtype=jnp.float32) b = jnp.array(3., dtype=jnp.float32) f = lambda x, y: jaxrand.gamma(key=key, a=x, dtype=jnp.float32) / y y, f_vjp = jax.vjp(f, a, b) ``` with ``` TypeError: body_fun output and input must have identical types, got (ShapedArray(float32[]), ShapedArray(float32[]), ShapedArray(float64[]), ShapedArray(bool[])) and (ShapedArray(float32[]), ShapedArray(float32[]), ShapedArray(float32[]), ShapedArray(bool[])) ``` Some dtype information is being lost along the way.
Unless gamma sampler is implementing implicit reparam, it should probably raise for the vjp. The sampler is reparameterized the same way as TF I was referring to gradients wrt the shape parameter, implemented here https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/cwise_op_random_grad.cc I guess the issue here is simpler than that, just an unstable while body. @hawkinsp this is a similar error to what what I was trying to give a minimal-case for in https://github.com/google/jax/issues/2004
2020-02-03T16:22:28
google/jax
2,182
google__jax-2182
[ "2178" ]
8407a65e1b940673ab8f83e41e810f72c9a5cee7
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2902,7 +2902,8 @@ def _index_to_gather(x_shape, idx): collapsed_slice_dims = [] start_index_map = [] - gather_indices = onp.zeros((0,), dtype=int32) # use onp to save a compilation + index_dtype = int64 if max(x_shape) >= (1 << 31) else int32 + gather_indices = onp.zeros((0,), dtype=index_dtype) # use onp to save a compilation # We perform three transformations to y before the scatter op, in order: # First, y is broadcast to slice_shape. In general `y` only need broadcast to @@ -2929,7 +2930,7 @@ def _index_to_gather(x_shape, idx): shape = advanced_indexes[0].shape ndim = len(shape) advanced_indexes = [ - lax.convert_element_type(lax.reshape(a, shape + (1,)), int32) + lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype) for a in advanced_indexes] # Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k]. @@ -2957,7 +2958,7 @@ def _index_to_gather(x_shape, idx): if (isinstance(abstract_i, ConcreteArray) or isinstance(abstract_i, ShapedArray)) and _int(abstract_i): i = _normalize_index(i, x_shape[x_axis]) - i = lax.convert_element_type(i, int32) + i = lax.convert_element_type(i, index_dtype) i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,)) gather_indices = concatenate((gather_indices, i), -1) collapsed_slice_dims.append(x_axis) @@ -2989,7 +2990,7 @@ def _index_to_gather(x_shape, idx): if needs_rev: reversed_y_dims.append(collapsed_y_axis) if stride == 1: - i = lax.convert_element_type(start, int32) + i = lax.convert_element_type(start, index_dtype) i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,)) gather_indices = concatenate((gather_indices, i), -1) slice_shape.append(limit - start) @@ -2997,7 +2998,7 @@ def _index_to_gather(x_shape, idx): offset_dims.append(collapsed_y_axis) start_index_map.append(x_axis) else: - i = arange(start, limit, stride, dtype=int32) + i = arange(start, limit, stride, dtype=index_dtype) size = i.shape[0] slice_shape.append(size) gather_slice_shape.append(1)
Bug concatenating large arrays on CPU On CPU, I get the following result when doing a concatenation of a large array with a smaller one ```python In [1]: import jax.numpy as np In [2]: a = np.concatenate((np.ones(1 << 32), np.array([2., 3., 4.]))) In [3]: a[-4:] Out[3]: DeviceArray([1., 1., 1., 1.], dtype=float32) ``` The result of the final line should be `[1., 2., 3., 4]`, not `[1., 1., 1., 1.]`. I've had a look at the implementation of jax.numpy.concatenate and it seems to delegate pretty directly to lax.concatenate, which is itself just a direct wrapper around the XLA op, so I wonder if this might be a bug in XLA. The result seems to be incorrect in the same way for other dtypes: ```python In [4]: a = np.concatenate((np.ones(1 << 32, 'float16'), np.array([2., 3., 4.], 'float16'))) In [5]: a[-4:] Out[5]: DeviceArray([1., 1., 1., 1.], dtype=float16) In [6]: a = np.concatenate((np.ones(1 << 32, 'uint16'), np.array([2., 3., 4.], 'uint16'))) In [7]: a[-4:] Out[7]: DeviceArray([1, 1, 1, 1], dtype=uint16) ``` I haven't had time to test whether this same issue exists on GPU or TPU.
2020-02-06T17:07:34
google/jax
2,206
google__jax-2206
[ "2179" ]
e9d06ecf53a5227bb324b682fab74b628430ff9d
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -1955,6 +1955,30 @@ def __init__(self, shape, dtype): self.shape = shape self.dtype = dtype + size = property(lambda self: onp.prod(self.shape)) + ndim = property(lambda self: len(self.shape)) + + def __len__(self): + try: + return self.shape[0] + except IndexError: + raise TypeError("len() of unsized object") # same as numpy error + + def __repr__(self): + return "{}(shape={}, dtype={})".format( + type(self).__name__, self.shape, self.dtype.dtype.name) + + __str__ = __repr__ + + def __eq__(self, other): + if not isinstance(other, ShapeDtypeStruct): + return False + else: + return (other.shape, other.dtype) == (self.shape, self.dtype) + + def __hash__(self): + return hash((self.shape, self.dtype)) + def eval_shape(fun, *args, **kwargs): """Compute the shape/dtype of ``fun(*args, **kwargs)`` without any FLOPs.
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -891,6 +891,26 @@ def f(pt): g = api.grad(f)(pt) self.assertIsInstance(pt, ZeroPoint) + @parameterized.parameters(1, 2, 3) + def test_shape_dtype_struct(self, i): + s = api.ShapeDtypeStruct(shape=(i, 2, 3), dtype=np.float32) + self.assertEqual(s.shape, (i, 2, 3)) + self.assertEqual(s.dtype, np.float32) + self.assertEqual(s.ndim, 3) + self.assertEqual(s.size, i * 2 * 3) + self.assertLen(s, i) + for f in (str, repr): + self.assertEqual( + f(s), "ShapeDtypeStruct(shape=({}, 2, 3), dtype=float32)".format(i)) + + def test_shape_dtype_struct_scalar(self): + s = api.ShapeDtypeStruct(shape=(), dtype=np.float32) + self.assertEmpty(s.shape) + self.assertEqual(s.size, 1) + self.assertEqual(s.ndim, 0) + with self.assertRaisesRegex(TypeError, "len[(][)] of unsized object"): + _ = len(s) + def test_eval_shape(self): def fun(x, y): return np.tanh(np.dot(x, y) + 3.)
Should ShapeDtypeStruct.size exist? `ShapeDtypeStruct` currently has `dtype` and `shape`. We could add numpy's `size` property via ``` class ShapeDtypeStruct(object): __slots__ = ["shape", "dtype"] def __init__(self, shape, dtype): self.shape = shape self.dtype = dtype @property def size(self): return np.prod(self.shape, dtype=int) ``` Do you want this patch?
+1, maybe ndim too? Also it would be nice to have `__repr__` :smile: (maybe we can make this a `NamedTuple` ?) `NamedTuples` are bad: ``` shape, dtype = struct # WAT ``` But yes to `__repr__`; I hit the lack of that just a moment ago. :) Yeah, I recently learned how bad namedtuples are! What confused me was this kind of behavior: ```python Point = namedtuple('Point', ['x', 'y']) hash(Point((2, 3), onp.float32)) == hash(ShapeDtypeStruct((2, 3), onp.float32)) ``` They're really meant to be just sugar on tuples, without type tags. That's not super relevant, but I thought I'd complain about it at every opportunity. ShapeDtypeStruct exists because I didn't want to expose much API surface here. ShapedArray does everything we want (and more), but I didn't want to surface that as part of the API. So we could: 1. keep ShapeDtypeStruct super minimal, no convenience methods/properties 2. add these select convenience methods/properties 3. just use ShapedArray here I don't have strong feelings between 1 and 2, and I suspect that you both as users of this API might be best able to decide between them. I am very cautious about 3 because I don't like exposing internals, even if it means some redundant code. WDYT? Understood re namedtuple, while we wait for dataclasses is it worth taking a dep on [attrs](https://www.attrs.org/en/stable/) to make these pod classes more consistent and easier to read (e.g. generated slots, eq, hash, immutability etc)? I vote for (2) and agree with not exposing the internal `ShapedArray`. I think a complete implementation would have: `shape`, `dtype`, `ndim`, `size`, `__len__` and of course `__eq__` and `__hash__`. Rather than using attrs, I would rather require Python 3.6+ with the dataclasses backport.
2020-02-10T15:44:23
google/jax
2,214
google__jax-2214
[ "2180", "1963" ]
5e77789afe6118b80341d341f09b8b390edbf703
diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -183,21 +183,12 @@ def is_linear(var): else: write_primal(eqn.outvars[0], ans) else: - call_jaxpr = eqn.params["call_jaxpr"] + call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params) if any(is_linear(v) for v in eqn.invars): linear_eqns.append(eqn) - elif eqn.primitive is not pe.remat_call_p: - ans = _eval_subjaxpr_primals( - eqn.primitive, call_jaxpr, - map(read_primal, eqn.invars), eqn.params) - map(write_primal, eqn.outvars, ans) - - # we special-case remat_call here because it can be mixed linear / - # nonlinear, so we always evaluate it even if it has a linear part - if eqn.primitive is pe.remat_call_p: - ans = _eval_subjaxpr_primals( - eqn.primitive, call_jaxpr, - map(read_primal, eqn.invars), eqn.params) + if any(not is_linear(v) for v in eqn.invars): + ans = _eval_subjaxpr_primals(eqn.primitive, call_jaxpr, + map(read_primal, eqn.invars), params) map(write_primal, eqn.outvars, ans) ct_env = {} @@ -260,12 +251,10 @@ def is_linear(var): else: write_primal(eqn.outvars[0], ans) else: - call_jaxpr = eqn.params["call_jaxpr"] - if (eqn.primitive is pe.remat_call_p or - not any(is_linear(v) for v in eqn.invars)): - ans = _eval_subjaxpr_primals( - eqn.primitive, call_jaxpr, - map(read_primal, eqn.invars), eqn.params) + call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params) + if any(not is_linear(v) for v in eqn.invars): + ans = _eval_subjaxpr_primals(eqn.primitive, call_jaxpr, + map(read_primal, eqn.invars), params) map(write_primal, eqn.outvars, ans) return map(read_primal, jaxpr.outvars) diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -1006,11 +1006,11 @@ def _scan_transpose(cts, *args, forward, length, num_consts, num_carry, jaxpr, l if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres: raise NotImplementedError if not all(init_lin): - raise NotImplementedError + pass # TODO(mattjj): error check https://github.com/google/jax/issues/1963 - consts, init, xs = split_list(args, [num_consts, num_carry]) - ires, consts = split_list(consts, [num_ires]) - xs, eres = split_list(xs, [sum(xs_lin)]) + consts, _, xs = split_list(args, [num_consts, num_carry]) + ires, _ = split_list(consts, [num_ires]) + _, eres = split_list(xs, [sum(xs_lin)]) assert not any(r is ad.undefined_primal for r in ires) assert not any(r is ad.undefined_primal for r in eres)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1660,6 +1660,45 @@ def loss(u0, target, steps, dt=1/np.sqrt(2), c=1): u0 = np.ones_like(target) loss(u0, target, 10) # doesn't crash + def test_remat_jit3(self): + # https://github.com/google/jax/issues/2180 + def f(w, x): + a = np.dot(x, w) + b = np.einsum("btd,bTd->btT", a, a) + c = np.einsum("btT,btd->btd", b, a) + return np.sum(c) + + w = np.ones([1, 1]) + x = np.ones([1, 1, 1]) + f = api.remat(f) + api.grad(f)(w, x) # doesn't crash + + @api.jit + def mul(a, b): + return a * b + + def f(w, x): + a = mul(w, x) + b = mul(a, a) + return b + + w = 1. + x = 1. + f = api.remat(f) + api.grad(f)(w, x) # doesn't crash + + def test_remat_scan2(self): + # https://github.com/google/jax/issues/1963 + + def scan_bug(x0): + f = lambda x, _: (x + 1, None) + def scanned_f(x, _): + return lax.scan(f, x, xs=None, length=1)[0], None + x, _ = jax.remat(scanned_f)(x0, None) + return x + + jax.grad(scan_bug)(1.0) # doesn't crash + def test_trivial_computations(self): x = np.array([1, 2, 3]) y = api.jit(lambda x: x)(x)
Adding `remat` causes an error to otherwise working code When running the code below under `remat` we get the following error: ``` --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-16-3143f2d8c2b3> in <module>() 11 x = jnp.ones([1, 1, 1]) 12 f = jax.remat(f) ---> 13 jax.grad(f)(w, x) 17 frames google3/third_party/py/jax/interpreters/ad.py in bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs) 503 504 def bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs): --> 505 assert (x is undefined_primal) ^ (y is undefined_primal) 506 if x is undefined_primal: 507 out = zero if cotangent is zero else lhs_rule(cotangent, y, **kwargs) AssertionError: ``` Without `jax.remat` the code does not error out and produces: ``` DeviceArray([[3.]], dtype=float32) ``` **Reproducer:** ```python import jax import jax.numpy as jnp def f(w, x): a = jnp.dot(x, w) b = jnp.einsum("btd,bTd->btT", a, a) c = jnp.einsum("btT,btd->btd", b, a) return jnp.sum(c) w = jnp.ones([1, 1]) x = jnp.ones([1, 1, 1]) f = jax.remat(f) # comment this and things work jax.grad(f)(w, x) ``` remat with scan part 2 This is a followup to #1907. The code from #1907 does work now with the fix, but I am still getting the NotImplemented error I referred. The minimal example (below) is more complicated, but I hope it's not too bad. Please let me know if I can be helpful with debugging. Thanks in advance! ``` python !pip install --upgrade -q https://storage.googleapis.com/jax-releases/cuda$(echo $CUDA_VERSION | sed -e 's/\.//' -e 's/\..*//')/jaxlib-$(pip search jaxlib | grep -oP '[0-9\.]+' | head -n 1)-cp36-none-linux_x86_64.whl !pip install --upgrade -q jax import numpy as onp import jax.numpy as np from jax import random, jit, grad, vmap, lax, remat from functools import partial ####################################### #Error only when all of these are True# ####################################### function_test = True test_remat = True test_scan = True def pot(dr, D0=5.0, **kwargs): U = dr**2 if(function_test): U = D0 * U else: U = U - D0 return np.array(U, dtype=dr.dtype) def sys_energy(metric, D0=5.0): def fn_mapped(R, **dynamic_kwargs): _metric = vmap(vmap(partial(metric, **dynamic_kwargs), (0, None), 0), (None, 0), 0) dr = _metric(R, R) return np.sum( pot(dr, D0=D0), axis=None, keepdims=False) return fn_mapped def create_simulation(params): D0 = params def displacement(Ra, Rb, **unused_kwargs): return Ra - Rb def shift(R, dR, **unused_kwargs): return R + 0.001*dR energy_fn = sys_energy(displacement, D0=D0) def get_dynamics_functions(energy_fn, shift): force_fn = grad(lambda R, *args, **kwargs: -energy_fn(R, *args, **kwargs)) def init_fn(R): return R def apply_fn(state, **kwargs): R = state F = force_fn(R, **kwargs) R = shift(R, F) return R return init_fn, apply_fn return get_dynamics_functions(energy_fn, shift) def run(params): key2, split = random.split(key,2) init_fn, apply_fn = create_simulation(params) R = random.uniform( split, (N, dimension), minval=0.0, maxval=8.0, dtype=np.float32) state = init_fn(R) n_steps_inner = 3 n_steps_outer = 2 def run_partial(state, i): def loop_fn(s, i): return apply_fn(s), np.array([0.]) if(test_scan): steps = np.arange(0,n_steps_inner) return lax.scan(loop_fn, state, steps) else: return loop_fn(loop_fn(loop_fn(state,0)[0],0)[0],0) if(test_remat): run_partial = remat(run_partial) run_partial = jit(run_partial) #Surprisingly, this scan doesn't seem to affect things one way or another... state, temp = lax.scan(run_partial, state, np.arange(n_steps_outer)) #state, temp = run_partial(run_partial(state,0)[0],0) return np.mean(state) key = random.PRNGKey(0) N = 10 dimension = 3 grad(run)(20.0) ```
@mattjj Does this reproduce for you? Yes, thanks for the easy repro! Here's the traceback, which ends somewhere inside `_scan_transpose`: ``` google3/third_party/py/jax/api.py in grad_f(*args, **kwargs) 352 @wraps(fun, docstr=docstr, argnums=argnums) 353 def grad_f(*args, **kwargs): --> 354 _, g = value_and_grad_f(*args, **kwargs) 355 return g 356 google3/third_party/py/jax/api.py in value_and_grad_f(*args, **kwargs) 417 "differentiation, pass holomorphic=True.") 418 raise TypeError(msg.format(dtype)) --> 419 g = vjp_py(onp.ones((), dtype=dtype)) 420 g = g[0] if isinstance(argnums, int) else g 421 if not has_aux: google3/third_party/py/jax/api.py in _vjp_pullback_wrapper(fun, cotangent_dtypes, io_tree, py_args) 1237 "match type of corresponding primal output ({})") 1238 raise TypeError(msg.format(_dtype(a), dtype)) -> 1239 ans = fun(*args) 1240 return tree_unflatten(out_tree, ans) 1241 google3/third_party/py/jax/interpreters/ad.py in vjp_(*cts) 112 dummy_primals_and_cts = (core.unit,) * len(cts) + cts 113 dummy_args = (undefined_primal,) * len(jaxpr.invars) --> 114 _, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primals_and_cts) 115 arg_cts = arg_cts[len(primals):] 116 return map(instantiate_zeros, primals, arg_cts) google3/third_party/py/jax/interpreters/ad.py in backward_pass(jaxpr, consts, freevar_vals, args, cotangents_in) 220 map(write_cotangent, bound_vars, ct_free_vars_out) 221 else: --> 222 cts_out = get_primitive_transpose(eqn.primitive)(cts_in, *invals, **eqn.params) 223 cts_out = [zero] * len(eqn.invars) if cts_out is zero else cts_out 224 map(write_cotangent, eqn.invars, cts_out) google3/third_party/py/jax/lax/lax_control_flow.py in _scan_transpose(cts, *args, **kwargs) 831 # jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b]) 832 # jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a]) --> 833 jaxpr_trans = _transpose_jaxpr(num_ires, num_consts - num_ires, num_eres, jaxpr) 834 linear_trans = ([False] * num_ires + 835 [True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) + google3/third_party/py/jax/lax/lax_control_flow.py in _transpose_jaxpr(num_res1, num_c, num_res2, jaxpr) 864 _map(ad.add_tangents, c_bar, new_c_bar)) 865 return c_bar + a_bar --> 866 return _make_typed_jaxpr(transposed, res1_avals + c_avals + b_avals + res2_avals) 867 868 def _make_typed_jaxpr(traceable, in_avals): google3/third_party/py/jax/lax/lax_control_flow.py in _make_typed_jaxpr(traceable, in_avals) 868 def _make_typed_jaxpr(traceable, in_avals): 869 pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals] --> 870 jaxpr, pvals_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True) 871 out_avals, _ = unzip2(pvals_out) 872 return core.TypedJaxpr(jaxpr, consts, in_avals, out_avals) google3/third_party/py/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, instantiate, stage_out_calls) 330 with new_master(trace_type) as master: 331 fun = trace_to_subjaxpr(fun, master, instantiate) --> 332 jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) 333 assert not env 334 del master google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: google3/third_party/py/jax/lax/lax_control_flow.py in transposed(*res1_cbar_bbar_res2) 858 primals = res1 + [ad.undefined_primal] * (num_c + num_a) + res2 859 _, cbar_abar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, (), primals, --> 860 b_bar) 861 _, new_c_bar, a_bar, _ = split_list(cbar_abar, [num_res1, num_c, num_a]) 862 a_bar = _map(ad.instantiate_zeros_aval, a_avals, a_bar) google3/third_party/py/jax/interpreters/ad.py in backward_pass(jaxpr, consts, freevar_vals, args, cotangents_in) 217 sub_freevar_vals = map(read_primal, bound_vars) 218 ct_free_vars_out, cts_out = get_primitive_transpose(eqn.primitive)( --> 219 eqn.params, subjaxpr, sub_consts, sub_freevar_vals, invals, cts_in) 220 map(write_cotangent, bound_vars, ct_free_vars_out) 221 else: google3/third_party/py/jax/interpreters/ad.py in call_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct) 548 fun = lu.hashable_partial(lu.wrap_init(backward_pass), jaxpr) 549 fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def) --> 550 out_flat = primitive.bind(fun, *all_args, **params) 551 return tree_unflatten(out_tree(), out_flat) 552 primitive_transposes[core.call_p] = partial(call_transpose, call_p) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) --> 608 outs = map(full_lower, top_trace.process_call(primitive, f, tracers, params)) 609 return apply_todos(env_trace_todo(), outs) 610 google3/third_party/py/jax/interpreters/partial_eval.py in process_call(self, call_primitive, f, tracers, params) 124 in_pvs, in_consts = unzip2([t.pval for t in tracers]) 125 fun, aux = partial_eval(f, self, in_pvs) --> 126 out_flat = call_primitive.bind(fun, *in_consts, **params) 127 out_pvs, jaxpr, env = aux() 128 out_pv_consts, consts = split_list(out_flat, [len(out_flat)-len(jaxpr.constvars)]) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 603 if top_trace is None: 604 with new_sublevel(): --> 605 outs = primitive.impl(f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params) 441 device = params['device'] 442 backend = params['backend'] --> 443 compiled_fun = _xla_callable(fun, device, backend, *map(arg_spec, args)) 444 try: 445 return compiled_fun(*args) google3/third_party/py/jax/linear_util.py in memoized_fun(fun, *args) 221 fun.populate_stores(stores) 222 else: --> 223 ans = call(fun, *args) 224 cache[key] = (ans, fun.stores) 225 return ans google3/third_party/py/jax/interpreters/xla.py in _xla_callable(fun, device, backend, *arg_specs) 458 pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args] 459 with core.new_master(pe.StagingJaxprTrace, True) as master: --> 460 jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals) 461 assert not env # no subtraces here 462 del master, env google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: google3/third_party/py/jax/interpreters/ad.py in backward_pass(jaxpr, consts, freevar_vals, args, cotangents_in) 217 sub_freevar_vals = map(read_primal, bound_vars) 218 ct_free_vars_out, cts_out = get_primitive_transpose(eqn.primitive)( --> 219 eqn.params, subjaxpr, sub_consts, sub_freevar_vals, invals, cts_in) 220 map(write_cotangent, bound_vars, ct_free_vars_out) 221 else: google3/third_party/py/jax/interpreters/ad.py in call_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct) 548 fun = lu.hashable_partial(lu.wrap_init(backward_pass), jaxpr) 549 fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def) --> 550 out_flat = primitive.bind(fun, *all_args, **params) 551 return tree_unflatten(out_tree(), out_flat) 552 primitive_transposes[core.call_p] = partial(call_transpose, call_p) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) --> 608 outs = map(full_lower, top_trace.process_call(primitive, f, tracers, params)) 609 return apply_todos(env_trace_todo(), outs) 610 google3/third_party/py/jax/interpreters/partial_eval.py in process_call(self, call_primitive, f, tracers, params) 119 tracers = map(self.instantiate_const_abstracted, tracers) 120 if call_primitive in call_partial_eval_rules: --> 121 return call_partial_eval_rules[call_primitive](self, f, tracers, params) 122 if call_primitive in map_primitives: 123 return self.process_map(call_primitive, f, tracers, params) google3/third_party/py/jax/interpreters/partial_eval.py in _remat_partial_eval(trace, f, tracers, params) 513 out_flat = remat_call_p.bind(fun, *in_consts, **params) 514 else: --> 515 out_flat = remat_call_p.bind(fun, *in_consts, **params) 516 out_pvs, jaxpr, env = aux() 517 env = map(trace.full_raise, env) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) --> 608 outs = map(full_lower, top_trace.process_call(primitive, f, tracers, params)) 609 return apply_todos(env_trace_todo(), outs) 610 google3/third_party/py/jax/interpreters/partial_eval.py in process_call(self, call_primitive, f, tracers, params) 119 tracers = map(self.instantiate_const_abstracted, tracers) 120 if call_primitive in call_partial_eval_rules: --> 121 return call_partial_eval_rules[call_primitive](self, f, tracers, params) 122 if call_primitive in map_primitives: 123 return self.process_map(call_primitive, f, tracers, params) google3/third_party/py/jax/interpreters/partial_eval.py in _remat_partial_eval(trace, f, tracers, params) 513 out_flat = remat_call_p.bind(fun, *in_consts, **params) 514 else: --> 515 out_flat = remat_call_p.bind(fun, *in_consts, **params) 516 out_pvs, jaxpr, env = aux() 517 env = map(trace.full_raise, env) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params) 603 if top_trace is None: 604 with new_sublevel(): --> 605 outs = primitive.impl(f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/core.py in call_impl(***failed resolving arguments***) 612 def call_impl(f, *args, **params): 613 del params # params parameterize the call primitive, not the function --> 614 return f.call_wrapped(*args) 615 616 google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: google3/third_party/py/jax/interpreters/ad.py in backward_pass(jaxpr, consts, freevar_vals, args, cotangents_in) 220 map(write_cotangent, bound_vars, ct_free_vars_out) 221 else: --> 222 cts_out = get_primitive_transpose(eqn.primitive)(cts_in, *invals, **eqn.params) 223 cts_out = [zero] * len(eqn.invars) if cts_out is zero else cts_out 224 map(write_cotangent, eqn.invars, cts_out) google3/third_party/py/jax/lax/lax_control_flow.py in _scan_transpose(cts, *args, **kwargs) 814 raise NotImplementedError 815 if not all(init_lin): --> 816 raise NotImplementedError 817 818 consts, init, xs = split_list(args, [num_consts, num_carry]) NotImplementedError: ``` Sorry for being slow on this. Here's a crazy-sounding fix: just delete the lines that raise the exception, i.e. [these lines in lax_control_flow.py](https://github.com/google/jax/blob/efbdaf66bfa584cc635092919a23b684c7fb2247/jax/lax/lax_control_flow.py#L1007-L1008). In JAX we model reverse-mode autodiff, like `grad`, as three steps: linearization (forward-mode autodiff), partial evaluation, and transposition (which turns forward into reverse). The exception being raised here is saying that the transpose rule can't be sure, based on the information we handed it, that the linearized-and-partial-evaluated scan we gave it is truly linear / transposable. But anything produced by the first two steps will be transposable! We just haven't yet set it up in a way that the transpose rule can check that fact. If you delete the exception, the transpose rule just proceeds assuming that the transposition is going to work. And it does! (If it didn't, we'd get an error. And you can always use `check_grads` from `jax.test_util` to check things numerically.) It'll always succeed for jaxprs produced by linearize-and-partial-eval. The only time it'd fail is if, hypothetically, a user tried to directly transpose their own jaxpr (not produced automatically by linearize-and-partial-eval) involving a scan which wasn't actually linear. I think our intent with the NotImplementedError is that we wanted to add more explicit information to certify that the jaxpr to be transposed is really transposable. That still seems like a noble goal... I want to check this thinking tomorrow, if I get a chance to work on this. This bug appears to comes up in even the simplest examples of calling `scan` inside `remat`: ```python import jax from jax import lax def scan_bug(x0): f = lambda x, _: (x + 1, None) def scanned_f(x, _): return lax.scan(f, x, xs=None, length=1)[0], None # The first three versions work. The last one crashes. # x, _ = f(x0, None) # x, _ = jax.remat(f)(x0, None) # x, _ = scanned_f(x0, None) x, _ = jax.remat(scanned_f)(x0, None) return x jax.grad(scan_bug)(1.0) # NotImplementedError ```
2020-02-11T22:44:29
google/jax
2,220
google__jax-2220
[ "1747" ]
aca7bccefdd822dfa35f6ea1bdf41f790785d1f7
diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -27,6 +27,7 @@ from .. import dtypes from .lax_numpy import _not_implemented from .lax_numpy import _wraps +from .vectorize import vectorize from . import lax_numpy as np from ..api import custom_transforms, defjvp from ..util import get_module_functions @@ -328,16 +329,38 @@ def qr(a, mode="reduced"): return q, r -@_wraps(onp.linalg.solve) -@jit -def solve(a, b): - a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b)) +def _check_solve_shapes(a, b): if not (a.ndim >= 2 and a.shape[-1] == a.shape[-2] and b.ndim >= 1): msg = ("The arguments to solve must have shapes a=[..., m, m] and " "b=[..., m, k] or b=[..., m]; got a={} and b={}") raise ValueError(msg.format(a.shape, b.shape)) - lu, pivots = lax_linalg.lu(a) - return lax_linalg.lu_solve(lu, pivots, b, trans=0) + + +@partial(vectorize, signature='(n,m),(m)->(n)') +def _matvec_multiply(a, b): + return np.dot(a, b, precision=lax.Precision.HIGHEST) + + +@_wraps(onp.linalg.solve) +@jit +def solve(a, b): + a, b = _promote_arg_dtypes(np.asarray(a), np.asarray(b)) + _check_solve_shapes(a, b) + + # With custom_linear_solve, we can reuse the same factorization when + # computing sensitivities. This is considerably faster. + lu, pivots = lax.stop_gradient(lax_linalg.lu)(a) + custom_solve = partial( + lax.custom_linear_solve, + lambda x: _matvec_multiply(a, x), + solve=lambda _, x: lax_linalg.lu_solve(lu, pivots, x, trans=0), + transpose_solve=lambda _, x: lax_linalg.lu_solve(lu, pivots, x, trans=1)) + if a.ndim == b.ndim + 1: + # b.shape == [..., m] + return custom_solve(b) + else: + # b.shape == [..., m, k] + return vmap(custom_solve, b.ndim - 1, max(a.ndim, b.ndim) - 1)(b) for func in get_module_functions(onp.linalg): diff --git a/jax/scipy/linalg.py b/jax/scipy/linalg.py --- a/jax/scipy/linalg.py +++ b/jax/scipy/linalg.py @@ -18,7 +18,7 @@ import scipy.linalg import textwrap -from jax import jit +from jax import jit, vmap from .. import lax from .. import lax_linalg from ..numpy.lax_numpy import _wraps @@ -45,24 +45,16 @@ def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): @partial(jit, static_argnums=(2,)) def _cho_solve(c, b, lower): c, b = np_linalg._promote_arg_dtypes(np.asarray(c), np.asarray(b)) - c_shape = np.shape(c) - b_shape = np.shape(b) - c_ndims = len(c_shape) - b_ndims = len(b_shape) - if not (c_ndims >= 2 and c_shape[-1] == c_shape[-2] and - (c_ndims == b_ndims or c_ndims == b_ndims + 1)): - msg = ("The arguments to solve must have shapes a=[..., m, m] and " - "b=[..., m, k] or b=[..., m]; got a={} and b={}") - raise ValueError(msg.format(c_shape, b_shape)) - + np_linalg._check_solve_shapes(c, b) # TODO(phawkins): triangular_solve only supports matrices on the RHS, so we # add a dummy dimension. Extend it to support vectors and simplify this. - b = b if c_ndims == b_ndims else b[..., None] + rhs_vector = c.ndim == b.ndim + 1 + b = b[..., np.newaxis] if rhs_vector else b b = lax_linalg.triangular_solve(c, b, left_side=True, lower=lower, transpose_a=not lower, conjugate_a=not lower) b = lax_linalg.triangular_solve(c, b, left_side=True, lower=lower, transpose_a=lower, conjugate_a=lower) - return b[..., 0] if c_ndims != b_ndims else b + return b[..., 0] if rhs_vector else b @_wraps(scipy.linalg.cho_solve, update_doc=False) def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): @@ -170,13 +162,30 @@ def qr(a, overwrite_a=False, lwork=None, mode="full", pivoting=False, del overwrite_a, lwork, check_finite return _qr(a, mode, pivoting) + @partial(jit, static_argnums=(2, 3)) def _solve(a, b, sym_pos, lower): if not sym_pos: return np_linalg.solve(a, b) a, b = np_linalg._promote_arg_dtypes(np.asarray(a), np.asarray(b)) - return cho_solve(cho_factor(a, lower=lower), b) + np_linalg._check_solve_shapes(a, b) + + # With custom_linear_solve, we can reuse the same factorization when + # computing sensitivities. This is considerably faster. + factors = lax.stop_gradient(cho_factor)(a, lower=lower) + custom_solve = partial( + lax.custom_linear_solve, + lambda x: np_linalg._matvec_multiply(a, x), + solve=lambda _, x: cho_solve(factors, x), + symmetric=True) + if a.ndim == b.ndim + 1: + # b.shape == [..., m] + return custom_solve(b) + else: + # b.shape == [..., m, k] + return vmap(custom_solve, b.ndim - 1, max(a.ndim, b.ndim) - 1)(b) + @_wraps(scipy.linalg.solve) def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False,
Direct gradient rules to speed-up numpy.linalg.solve It can be [significantly more efficient](https://www.semanticscholar.org/paper/Structured-higher-order-algorithmic-differentiation-Walter/e52d834dac4fdfdc4328105e29ae657c3fa51467), both in runtime and memory, to directly define derivative rules for higher level linear algebra operations like `solve` rather than the constituent operations (factorization and triangular solve). For large matrices (e.g., 500x500 on CPUs), my microbenchmark shows that we can get a 3-4x speed-up for general purpose and symmetric solves: ```python from functools import partial import jax.scipy as jsp from jax import lax import jax.numpy as np import numpy as onp import jax def positive_definite_solve(a, b): factors = jsp.linalg.cho_factor(a) def solve(matvec, x): return jsp.linalg.cho_solve(factors, x) matvec = partial(np.dot, a) return lax.custom_linear_solve(matvec, b, solve, symmetric=True) def linear_solve(a, b): a_factors = jsp.linalg.lu_factor(a) def solve(matvec, x): return jsp.linalg.lu_solve(a_factors, x) def transpose_solve(vecmat, x): return jsp.linalg.lu_solve(a_factors, x, trans=1) matvec = partial(np.dot, a) return lax.custom_linear_solve(matvec, b, solve, transpose_solve) def loss(solve): def f(a, b): return solve(a, b).sum() return f rs = onp.random.RandomState(0) a = rs.randn(500, 500) a = jax.device_put(a.T @ a + 0.1 * np.eye(500)) b = jax.device_put(rs.randn(500)) # general purpose solve # current grad = jax.jit(jax.grad(loss(np.linalg.solve))) %timeit jax.device_get(grad(a, b)) # 33.8 ms per loop # new grad = jax.jit(jax.grad(loss(linear_solve))) %timeit jax.device_get(grad(a, b)) # 10.1 ms per loop # positive definite solve # current grad = jax.jit(jax.grad(loss(partial(jsp.linalg.solve, sym_pos=True)))) %timeit jax.device_get(grad(a, b)) # 23.7 ms per loop # new grad = jax.jit(jax.grad(loss(positive_definite_solve))) %timeit jax.device_get(grad(a, b)) # 4.8 ms per loop ``` Unfortunately, we can't just use these prototype implementations internally in JAX, for two reasons: 1. ~~`custom_linear_solve` (like custom transforms in general) doesn't work with batching yet (https://github.com/google/jax/issues/1249).~~ This was solved by #2099. 2. ~~We do an extra optimization in `triangular_solve_jvp_rule_a` for the case of solving many right-hand-sides at the same time with the same left-hand side (https://github.com/google/jax/pull/1466). This new gradient rule here doesn't handle this yet.~~ Update: in practice, I don't think this optimization actually matters -- it's the difference between `n*m*m+m*m*m` time vs `2*m*m*m` time. 3. We need to support multiple right-hand-side arguments. After https://github.com/google/jax/pull/2138, we'll be able to do this by batching `custom_linear_solve`.
Thanks to @mattjj for pointing out that LU solve has the `trans` argument, which means we use a single factorization for both forward and reverse calculations to speed up solves on all types of matrices.
2020-02-13T02:14:01
google/jax
2,257
google__jax-2257
[ "2248" ]
b6e834117616977c85dae1e166124e3254304ca4
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -1383,11 +1383,10 @@ def device_put(x, device=None): return tree_map(lambda y: xla.device_put_p.bind(y, device=device), x) -# TODO(mattjj): consider revising def _device_get(x): if isinstance(x, core.Tracer): return x - return x.copy() + return onp.asarray(x) def device_get(x): for y in tree_leaves(x): diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1816,6 +1816,10 @@ def asarray(a, dtype=None, order=None): lax._check_user_dtype_supported(dtype, "asarray") return array(a, dtype=dtype, copy=False, order=order) +@_wraps(onp.copy) +def copy(a, order='K'): + return array(a, copy=True, order=order) + @_wraps(onp.zeros_like) def zeros_like(x, dtype=None): @@ -3436,7 +3440,8 @@ def _operator_round(number, ndigits=None): _diff_methods = ["clip", "compress", "conj", "conjugate", "cumprod", "cumsum", "diagonal", "dot", "max", "mean", "min", "prod", "ptp", "ravel", "repeat", "sort", "squeeze", "std", "sum", - "swapaxes", "take", "tile", "trace", "transpose", "var"] + "swapaxes", "take", "tile", "trace", "transpose", "var", + "copy"] # Set up operator, method, and property forwarding on Tracer instances containing
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1826,6 +1826,9 @@ def func1(x): re.DOTALL)): api.jit(func1)(2.) + def test_array_tracer_copy(self): + api.value_and_grad(lambda x: x.copy().sum())(np.ones(2)) # doesn't crash + class JaxprTest(jtu.JaxTestCase):
ConcreteArray has no attribute copy I wanted to use `.copy()` on arrays to avoid working on views and corrupting values elsewhere. During tracing jax called my function with a `ConcreteArray` and threw an error since these don't support `.copy()`. Is there a different way i should have done this or should `ConcreteArray`s support a dummy `.copy()` that just returns `self`?
Can you share a full code example? How do you end up with a ConcreteArray? @shoyer A minimal reproducer: ```python from jax import numpy as np, tree_map, tree_flatten, value_and_grad from jax.numpy import ndarray as array from typing import List class PEPS: def __init__(self, tensors: List[array], *args): self._tensors = tree_map(lambda arr: arr.copy(), tensors) self.args = args def calculate_sth(self): return np.sum(tree_flatten(tree_map(np.linalg.norm, self._tensors))[0]) def cost_function(new_tensors): peps = PEPS(new_tensors) return peps.calculate_sth() test_tensors = [np.zeros([i, i, i]) for i in range(5)] val, grad = value_and_grad(cost_function)(test_tensors) ``` raises: ``` Traceback (most recent call last): File "/Users/jakobunfried/*CENSORED*/tmp/minimal_reproducer.py", line 21, in <module> val, grad = value_and_grad(cost_function)(test_tensors) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/api.py", line 411, in value_and_grad_f ans, vjp_py = vjp(f_partial, *dyn_args) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/api.py", line 1289, in vjp out_primal, out_vjp = ad.vjp(flat_fun, primals_flat) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/interpreters/ad.py", line 109, in vjp out_primals, pvals, jaxpr, consts = linearize(traceable, *primals) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/interpreters/ad.py", line 98, in linearize jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jvpfun_flat, in_pvals) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/interpreters/partial_eval.py", line 337, in trace_to_jaxpr jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/linear_util.py", line 152, in call_wrapped ans = self.f(*args, **dict(self.params, **kwargs)) File "/Users/jakobunfried/*CENSORED*/tmp/minimal_reproducer.py", line 16, in cost_function peps = PEPS(new_tensors) File "/Users/jakobunfried/*CENSORED*/tmp/minimal_reproducer.py", line 8, in __init__ self._tensors = tree_map(lambda arr: arr.copy(), tensors) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/tree_util.py", line 124, in tree_map return treedef.unflatten(map(f, leaves)) File "/Users/jakobunfried/*CENSORED*/tmp/minimal_reproducer.py", line 8, in <lambda> self._tensors = tree_map(lambda arr: arr.copy(), tensors) File "/Users/jakobunfried/anaconda3/lib/python3.7/site-packages/jax/core.py", line 365, in __getattr__ attr = getattr(self.aval, name) AttributeError: 'ConcreteArray' object has no attribute 'copy' Process finished with exit code 1 ``` @shoyer not sure if you get notified if I edit, so here goes another ping Here's a simpler example: ```python import jax import numpy as np jax.value_and_grad(lambda x: x.copy())(np.ones((2,))) ``` I guess one reason why `copy()` may not exist on `ConcreteArray` (or its base class `ShapedArray`) is that it the semantics of copying a tracer are not entirely clear: 1. should a copy return the same object on a tracer? 2. should a copy return a "disconnected copy"? The later is not really possible for tracers, but it may be what users expect. For context, it may be worth noting that JAX doesn't support in-place operations on arrays, so copies are generally not useful. Nonetheless, it is maybe worth adding a dummy `copy()` method to facilitate duck typing. I think dask does this. You are completely correct! I was still in the `onp` mindset and trying to protect my class attributes from being a view of an array that could be in-place mutated elsewhere and thus change my class attribute arrays uncontrollably. But since jax doesn't do that, I don't even need to copy. So for my use-case, just copying the list, i.e. `self._tensors = tensors.copy()` list should be completely fine. Thanks! In the end, it was useful for me to run into this error, since it brought my mistake to light. For my part, I don't see that any changes are called for. Do you want to close this, or is it still an open qustion if `ShapedArray` and its subclasses should support `.copy()` ? > Do you want to close this, or is it still an open qustion if `ShapedArray` and its subclasses should support `.copy()` ? I think this is still a valid open question.
2020-02-18T15:57:51
google/jax
2,268
google__jax-2268
[ "2264" ]
cfb5666ac1ac9889625ddc10d02ed64914a883e7
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -449,8 +449,11 @@ def make_computation(name, jaxpr, op_shape): return c.Conditional(pred, true_op, true_c, false_op, false_c) def _cond_pred_bcast_select(pred, x, y): - bcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred)))) - return lax.select(bcast_pred, x, y) + if core.get_aval(x) is core.get_aval(y) is core.abstract_unit: + return x + else: + bcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred)))) + return lax.select(bcast_pred, x, y) def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, linear): # TODO: maybe avoid moving arg axes to front if we're promoting to select?
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -820,6 +820,19 @@ def f(x): expected = f(4.) self.assertAllClose(y, expected, check_dtypes=False) + def testCondVmapGrad(self): + # https://github.com/google/jax/issues/2264 + def f_1(x): return x ** 2 + def f_2(x): return x ** 3 + + def f(x): return lax.cond(x > 0, x, f_1, x, f_2) + def g(x): return np.where(x > 0, f_1(x), f_2(x)) + + x = np.linspace(-1, 1, 20) + ans = api.vmap(api.grad(f))(x) + expected = api.vmap(api.grad(g))(x) + self.assertAllClose(ans, expected, check_dtypes=False) + def testIssue1263(self): def f(rng, x): cond = random.bernoulli(rng)
Using `vmap` on `grad` on function with `lax.cond` throws errors Hi, I'm using `vmap` on `grad` on functions containing `lax.cond`, this trows errors. Is it supposed to work yet? If I switch to the equivalent `np.where` there's no problem: ```python def f_1(x): return x ** 2 def f_2(x): return x ** 3 def f(x): y = lax.cond(x > 0, x, f_1, x, f_2) return y def g(x): y = np.where(x > 0, f_1(x), f_2(x)) return y x = np.linspace(-1, 1, 20) y = vmap(grad(f))(x) print(y) ``` <details> --------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-55-e5aab592a69e> in <module> 15 16 x = np.linspace(-1, 1, 20) ---> 17 y = vmap(grad(f))(x) 18 print(y) ~/one-leg/.venv/lib/python3.5/site-packages/jax/api.py in batched_fun(*args) 690 _check_axis_sizes(in_tree, args_flat, in_axes_flat) 691 out_flat = batching.batch(flat_fun, args_flat, in_axes_flat, --> 692 lambda: _flatten_axes(out_tree(), out_axes)) 693 return tree_unflatten(out_tree(), out_flat) 694 ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/batching.py in batch(fun, in_vals, in_dims, out_dim_dests) 35 def batch(fun, in_vals, in_dims, out_dim_dests): 36 size, = {x.shape[d] for x, d in zip(in_vals, in_dims) if d is not not_mapped} ---> 37 out_vals, out_dims = batch_fun(fun, in_vals, in_dims) 38 return map(partial(matchaxis, size), out_dims, out_dim_dests(), out_vals) 39 ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/batching.py in batch_fun(fun, in_vals, in_dims) 41 with new_master(BatchTrace) as master: 42 fun, out_dims = batch_subtrace(fun, master, in_dims) ---> 43 out_vals = fun.call_wrapped(*in_vals) 44 del master 45 return out_vals, out_dims() ~/one-leg/.venv/lib/python3.5/site-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 147 gen = None 148 --> 149 ans = self.f(*args, **dict(self.params, **kwargs)) 150 del args 151 while stack: ~/one-leg/.venv/lib/python3.5/site-packages/jax/api.py in grad_f(*args, **kwargs) 350 @wraps(fun, docstr=docstr, argnums=argnums) 351 def grad_f(*args, **kwargs): --> 352 _, g = value_and_grad_f(*args, **kwargs) 353 return g 354 ~/one-leg/.venv/lib/python3.5/site-packages/jax/api.py in value_and_grad_f(*args, **kwargs) 405 f_partial, dyn_args = _argnums_partial(f, argnums, args) 406 if not has_aux: --> 407 ans, vjp_py = vjp(f_partial, *dyn_args) 408 else: 409 ans, vjp_py, aux = vjp(f_partial, *dyn_args, has_aux=True) ~/one-leg/.venv/lib/python3.5/site-packages/jax/api.py in vjp(fun, *primals, **kwargs) 1283 if not has_aux: 1284 flat_fun, out_tree = flatten_fun_nokwargs(fun, in_tree) -> 1285 out_primal, out_vjp = ad.vjp(flat_fun, primals_flat) 1286 out_tree = out_tree() 1287 else: ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/ad.py in vjp(traceable, primals, has_aux) 104 def vjp(traceable, primals, has_aux=False): 105 if not has_aux: --> 106 out_primals, pvals, jaxpr, consts = linearize(traceable, *primals) 107 else: 108 out_primals, pvals, jaxpr, consts, aux = linearize(traceable, *primals, has_aux=True) ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/ad.py in linearize(traceable, *primals, **kwargs) 93 _, in_tree = tree_flatten(((primals, primals), {})) 94 jvpfun_flat, out_tree = flatten_fun(jvpfun, in_tree) ---> 95 jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jvpfun_flat, in_pvals) 96 pval_primals, pval_tangents = tree_unflatten(out_tree(), out_pvals) 97 aval_primals, const_primals = unzip2(pval_primals) ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, instantiate, stage_out_calls) 352 with new_master(trace_type) as master: 353 fun = trace_to_subjaxpr(fun, master, instantiate) --> 354 jaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals) 355 assert not env 356 del master ~/one-leg/.venv/lib/python3.5/site-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 147 gen = None 148 --> 149 ans = self.f(*args, **dict(self.params, **kwargs)) 150 del args 151 while stack: <ipython-input-55-e5aab592a69e> in f(x) 7 8 def f(x): ----> 9 y = lax.cond(x > 0, x, f_1, x, f_2) 10 return y 11 ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in cond(pred, true_operand, true_fun, false_operand, false_fun) 421 out = cond_p.bind( 422 *itertools.chain([pred], true_consts, true_ops, false_consts, false_ops), --> 423 true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear) 424 return tree_unflatten(true_out_tree, out) 425 ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in cond_bind(true_jaxpr, false_jaxpr, linear, *args) 675 core.check_jaxpr(false_jaxpr.jaxpr) 676 return core.Primitive.bind(cond_p, *args, true_jaxpr=true_jaxpr, --> 677 false_jaxpr=false_jaxpr, linear=linear) 678 679 cond_p = lax.Primitive('cond') ~/one-leg/.venv/lib/python3.5/site-packages/jax/core.py in bind(self, *args, **kwargs) 180 181 tracers = map(top_trace.full_raise, args) --> 182 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 183 if self.multiple_results: 184 return map(full_lower, out_tracer) ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params) 303 "Forward-mode differentiation rule for '{}' not implemented" 304 .format(primitive)) --> 305 primal_out, tangent_out = jvp(primals_in, tangents_in, **params) 306 if primitive.multiple_results: 307 return [JVPTracer(self, x, t) for x, t in zip(primal_out, tangent_out)] ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in _cond_jvp(primals, tangents, true_jaxpr, false_jaxpr, linear) 510 out = cond_p.bind( 511 *itertools.chain([pred], tops, tops_dot, fops, fops_dot), --> 512 true_jaxpr=true_jvp, false_jaxpr=false_jvp, linear=linear_jvp) 513 out_primals, out_tangents = split_list(out, [len(out_nz)]) 514 out_tangents_iter = iter(out_tangents) ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in cond_bind(true_jaxpr, false_jaxpr, linear, *args) 675 core.check_jaxpr(false_jaxpr.jaxpr) 676 return core.Primitive.bind(cond_p, *args, true_jaxpr=true_jaxpr, --> 677 false_jaxpr=false_jaxpr, linear=linear) 678 679 cond_p = lax.Primitive('cond') ~/one-leg/.venv/lib/python3.5/site-packages/jax/core.py in bind(self, *args, **kwargs) 180 181 tracers = map(top_trace.full_raise, args) --> 182 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 183 if self.multiple_results: 184 return map(full_lower, out_tracer) ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/partial_eval.py in process_primitive(self, primitive, tracers, params) 94 def process_primitive(self, primitive, tracers, params): 95 if primitive in custom_partial_eval_rules: ---> 96 return custom_partial_eval_rules[primitive](self, *tracers, **params) 97 else: 98 return self.default_process_primitive(primitive, tracers, params) ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in _cond_partial_eval(trace, true_jaxpr, false_jaxpr, linear, *tracers) 564 out_consts_res = cond_p.bind( 565 *in_consts, true_jaxpr=true_jaxpr_1, false_jaxpr=false_jaxpr_1, --> 566 linear=linear) 567 out_consts, res = split_list(out_consts_res, [len(out_consts_res) - num_res]) 568 ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in cond_bind(true_jaxpr, false_jaxpr, linear, *args) 675 core.check_jaxpr(false_jaxpr.jaxpr) 676 return core.Primitive.bind(cond_p, *args, true_jaxpr=true_jaxpr, --> 677 false_jaxpr=false_jaxpr, linear=linear) 678 679 cond_p = lax.Primitive('cond') ~/one-leg/.venv/lib/python3.5/site-packages/jax/core.py in bind(self, *args, **kwargs) 180 181 tracers = map(top_trace.full_raise, args) --> 182 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 183 if self.multiple_results: 184 return map(full_lower, out_tracer) ~/one-leg/.venv/lib/python3.5/site-packages/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params) 109 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here 110 batched_primitive = get_primitive_batcher(primitive) --> 111 val_out, dim_out = batched_primitive(vals_in, dims_in, **params) 112 if primitive.multiple_results: 113 return map(partial(BatchTracer, self), val_out, dim_out) ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in _cond_batching_rule(***failed resolving arguments***) 478 for x, b in zip(false_out, out_bat)] 479 return [_cond_pred_bcast_select(pred, t, f) --> 480 for t, f in zip(true_out, false_out)], [0] * len(true_out) 481 else: 482 out_dims = [0 if b else batching.not_mapped for b in out_bat] ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in <listcomp>(.0) 478 for x, b in zip(false_out, out_bat)] 479 return [_cond_pred_bcast_select(pred, t, f) --> 480 for t, f in zip(true_out, false_out)], [0] * len(true_out) 481 else: 482 out_dims = [0 if b else batching.not_mapped for b in out_bat] ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax_control_flow.py in _cond_pred_bcast_select(pred, x, y) 450 451 def _cond_pred_bcast_select(pred, x, y): --> 452 bcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred)))) 453 return lax.select(bcast_pred, x, y) 454 ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax.py in broadcast_in_dim(operand, shape, broadcast_dimensions) 621 return broadcast_in_dim_p.bind( 622 operand, shape=tuple(shape), --> 623 broadcast_dimensions=tuple(broadcast_dimensions)) 624 625 def reshape(operand, new_sizes, dimensions=None): ~/one-leg/.venv/lib/python3.5/site-packages/jax/core.py in bind(self, *args, **kwargs) 177 top_trace = find_top_trace(args) 178 if top_trace is None: --> 179 return self.impl(*args, **kwargs) 180 181 tracers = map(top_trace.full_raise, args) ~/one-leg/.venv/lib/python3.5/site-packages/jax/lax/lax.py in _broadcast_in_dim_impl(operand, shape, broadcast_dimensions) 2342 if type(operand) is xla.DeviceArray: 2343 aval = ShapedArray(shape, _dtype(operand)) -> 2344 lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions) 2345 return xla.DeviceArray(aval, None, lazy_expr, operand.device_buffer) 2346 else: ~/one-leg/.venv/lib/python3.5/site-packages/jax/lazy.py in broadcast(lexpr, shape, broadcast_dimensions) 127 new_dims = [None] * len(shape) 128 for i, d in enumerate(broadcast_dimensions): --> 129 new_dims[d] = lexpr.dims[i] 130 return LazyExpr(lexpr.input, shape, tuple(new_dims)) 131 IndexError: list assignment index out of range </details> In a more complex application I have this error: ```ValueError: broadcast_in_dim broadcast dimensions must be less than ndim(shape), got (0,) for shape ().``` If you want, I can share that code as well, but it's a bit more complicated than my toy example above. Thanks for your time, Rembert
It's supposed to work, but grad-of-cond is only pretty lightly tested at this point. Looks like you've found a bug for us! Toy examples are perfect, btw. Thanks!
2020-02-20T05:19:03
google/jax
2,288
google__jax-2288
[ "2283" ]
c953ca2d83099c20121e09168d1c51b01c931557
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1399,6 +1399,14 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): a_dtype = _dtype(a) if dtype: + if (not issubdtype(dtype, complexfloating) and + issubdtype(a_dtype, complexfloating)): + msg = ("jax.numpy.var does not yet support real dtype parameters when " + "computing the variance of an array of complex values. The " + "semantics of numpy.var seem unclear in this case. Please comment " + "on https://github.com/google/jax/issues/2283 if this behavior is " + "important to you.") + raise ValueError(msg) a_dtype = promote_types(a_dtype, dtype) else: if not issubdtype(a_dtype, inexact):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -2307,10 +2307,14 @@ def onp_fun(x): jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims) tol = jtu.tolerance(out_dtype, {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-3, onp.complex128: 1e-6}) - self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True, - tol=tol) - self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol, - atol=tol) + if (jnp.issubdtype(dtype, jnp.complexfloating) and + not jnp.issubdtype(out_dtype, jnp.complexfloating)): + self.assertRaises(ValueError, lambda: jnp_fun(*args_maker())) + else: + self._CheckAgainstNumpy(onp_fun, jnp_fun, args_maker, check_dtypes=True, + tol=tol) + self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, rtol=tol, + atol=tol) @parameterized.named_parameters( jtu.cases_from_list(
np.var does not match onp.var in some cases Running this snippet ```python import jax.numpy as np import numpy as onp inp_array = np.array([[[5.29215704-0.30965656j, 1.20047163+1.23179551j, 0+0.43213071j, 6.7226796 +4.36282052j]], [[5.60267397+2.28311318j, 0+0.36502505j, 0 +1j, -0.45407162+1.00102298j]]]) print(np.var(inp_array, 2, dtype=np.float32)) print(onp.var(onp.array(inp_array), 2, dtype=onp.float32)) ``` Prints different outputs for numpy and jax.numpy ``` [[10.911064] [ 6.728332]] [[12.953884] [ 8.079251]] ``` Not sure if there is an actual use case for np.var on complex arrays with dtype=float, but I guess the outputs are supposed to be the same since jax.numpy is supposed to match numpy where it's possible. The mismatch happens because the implementation in NumPy uses dtype for all the calculations, meanwhile, jax.numpy only converts the computed value to dtype in the very end and does all the calculations in the promote_types(complex, dtype=float) which evaluates to complex. If this sounds like something that should be fixed then I can make a PR.
I think we should do one of two things: (a) match NumPy, or (b) issue an error, if the behavior isn't sensible. I can't particularly decide if the behavior of NumPy is sensible here; it might make more sense to issue an error.
2020-02-22T13:33:38
google/jax
2,364
google__jax-2364
[ "2358" ]
64b1da9d4877b504ee353d03417368c351aeadc1
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1043,6 +1043,17 @@ def top_k(operand, k): return top_k_p.bind(operand, k=k) def tie_in(x, y): + """Gives ``y`` a fake data dependence on ``x``. + + When staging to XLA (e.g. running under jit or pmap), values that don't depend + on computation inputs are computed op-by-op, and folded into the XLA + computation as constants. + + ``tie_in`` provides a way to explicitly stage values into the computation. + When staging to XLA and ``x`` is already staged, then the result of ``tie_in`` + is ``y``, but staged to XLA. Downstream use of the result will also be staged + to XLA. + """ return tie_in_p.bind(x, y)
Docstring for jax.lax.tie_in `jax.lax.tie_in` seems to have subtle but important properties, but [has no docstring](https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.tie_in.html#jax.lax.tie_in). Could we add one?
2020-03-05T19:56:54
google/jax
2,395
google__jax-2395
[ "2367" ]
9fd69a04ea6072479513ce45391d4fbe37998715
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -657,6 +657,14 @@ def vmap(fun: Callable, in_axes=0, out_axes=0): docstr = ("Vectorized version of {fun}. Takes similar arguments as {fun} " "but with additional array axes over which {fun} is mapped.") + if isinstance(in_axes, list): + # To be a tree prefix of the positional args tuple, in_axes can never be a + # list: if in_axes is not a leaf, it must be a tuple of trees. However, + # in cases like these users expect tuples and lists to be treated + # essentially interchangeably, so we canonicalize lists to tuples here + # rather than raising an error. https://github.com/google/jax/issues/2367 + in_axes = tuple(in_axes) + _check_callable(fun) if (not isinstance(in_axes, (list, tuple, type(None), int)) or not isinstance(out_axes, (list, tuple, type(None), int))):
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -1229,6 +1229,20 @@ def vjp(x_tangent): b = np.dot(a + np.eye(a.shape[0]), real_x) print(gf(a, b)) # doesn't crash + def test_vmap_in_axes_list(self): + # https://github.com/google/jax/issues/2367 + dictionary = {'a': 5., 'b': np.ones(2)} + x = np.zeros(3) + y = np.arange(3.) + + + def f(dct, x, y): + return dct['a'] + dct['b'] + x + y + + out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y) + out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y) + self.assertAllClose(out1, out2, check_dtypes=True) + def test_vmap_in_axes_tree_prefix_error(self): # https://github.com/google/jax/issues/795 self.assertRaisesRegex(
Documentation for passing in trees I would like to pass a dictionary through a vmap. I made an attempt to understand https://jax.readthedocs.io/en/latest/notebooks/JAX_pytrees.html but couldn't figure out if this is something I need to understand or not? `vmap(loss, [None, 0, 0])(dictionary, X[i:i+batch], y[i:i+batch])` `vmap(loss, [None, 0, 0])(tree_flatten(dictionary), X[i:i+batch], y[i:i+batch])` `vmap(loss, [tree_flatten(dict_none), 0, 0])(tree_flatten(dictionary), X[i:i+batch], y[i:i+batch])` Either way I get an issue like this: ``` ValueError: Expected list, got (([<object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>], <object object at 0x7fcb21ef58b0>), <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>). During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) /usr/local/lib/python3.6/dist-packages/jax/api.py in _flatten_axes(treedef, axis_tree) 714 msg = ("axes specification must be a tree prefix of the corresponding " 715 "value, got specification {} for value {}.") --> 716 raise ValueError(msg.format(axis_tree, treedef)) 717 axes = [None if a is proxy else a for a in axes] 718 assert len(axes) == treedef.num_leaves ValueError: axes specification must be a tree prefix of the corresponding value, got specification [([], PyTreeDef(dict[['dense1', 'dense2', 'dense3']], [PyTreeDef(dict[[]], []),PyTreeDef(dict[[]], []),PyTreeDef(dict[[]], [])])), 0, 0] for value PyTreeDef(tuple, [PyTreeDef(tuple, [PyTreeDef(list, [*,*,*,*,*,*]),*]),*,*]). ```
I suspect you could make a dictionary with the right keys/values by using the `fromkeys` constructor but it's a little hard to say for sure without a working example to test: `vmap(loss, [dict.fromkeys(dictionary), 0, 0])(dictionary, X[i:i+batch], y[i:i+batch])` That said, if you don't want to include an argument in a JAX transformation, the easiest way is often just to define a new function with that argument already applied, e.g., ```python from functools import partial vmap(partial(loss, dictionary))(X[i:i+batch], y[i:i+batch]) ``` Thanks that's a helpful tip! Thanks for raising this, Sasha! We don't want to make you understand the details of pytrees; we want `vmap` and all the JAX transformations to "just work" on (nested) standard Python containers. In particular, you should _not_ need to read [the pytrees docs](https://jax.readthedocs.io/en/latest/notebooks/JAX_pytrees.html) (and it does say in bold at the top, "This is primarily JAX internal documentation, end-users are not supposed to need to understand this to use JAX, except when registering new user-defined container types with JAX.") To more precisely answer your question, we might need a runnable repro. But this works: ```python import jax.numpy as np from jax import vmap dictionary = {'a': 5., 'b': np.ones(2)} x = np.zeros(3) y = np.arange(3.) def f(dct, x, y): return dct['a'] + dct['b'] + x + y result = vmap(f, (None, 0, 0))(dictionary, x, y) ``` Yet, understandably surprisingly, this doesn't! ```python result = vmap(f, [None, 0, 0])(dictionary, x, y) # ValueError: axes specification must be a tree prefix # of the corresponding value, got specification [None, 0, 0] # for value PyTreeDef(tuple, [PyTreeDef(dict[['a', 'b']], [*,*]),*,*]). ``` The issue is that the axis specification has to be a tree prefix of the `args` tuple, meaning an int (i.e. a kind of pytree, a leaf), a tuple (because `args` is a tuple), or a tuple of pytrees (including a tuple of ints, or a tuple of other kinds of pytrees). It can't be a list! I think this behavior is surprising because we're so used to treating lists and tuples interchangeably in Python APIs, like we treat `'foo'` and `"foo"` string quoting interchangeably. I want to fix this!
2020-03-10T15:29:32
google/jax
2,400
google__jax-2400
[ "2396" ]
ebbcbad547e56357e600cd8c19232d4b91cf4f00
diff --git a/jax/tree_util.py b/jax/tree_util.py --- a/jax/tree_util.py +++ b/jax/tree_util.py @@ -38,6 +38,7 @@ import functools import collections +import operator as op from .lib import pytree @@ -105,6 +106,26 @@ def register_pytree_node(nodetype, flatten_func, unflatten_func): pytree.register_node(nodetype, flatten_func, unflatten_func) _registry[nodetype] = _RegistryEntry(flatten_func, unflatten_func) +def register_pytree_node_class(cls): + """Extends the set of types that are considered internal nodes in pytrees. + + This function is a thin wrapper around ``register_pytree_node``, and provides + a class-oriented interface: + + @register_pytree_node_class + class Special: + def __init__(self, x, y): + self.x = x + self.y = y + def tree_flatten(self): + return ((self.x, self.y), None) + @classmethod + def tree_unflatten(cls, aux_data, children): + return cls(*children) + """ + register_pytree_node(cls, op.methodcaller('tree_flatten'), cls.tree_unflatten) + return cls + def tree_map(f, tree): """Maps a function over a pytree to produce a new pytree.
diff --git a/tests/tree_util_tests.py b/tests/tree_util_tests.py --- a/tests/tree_util_tests.py +++ b/tests/tree_util_tests.py @@ -47,10 +47,28 @@ def __hash__(self): def __repr__(self): return "AnObject({},{},{})".format(self.x, self.y, self.z) - tree_util.register_pytree_node(AnObject, lambda o: ((o.x, o.y), o.z), lambda z, xy: AnObject(xy[0], xy[1], z)) +@tree_util.register_pytree_node_class +class Special: + def __init__(self, x, y): + self.x = x + self.y = y + + def __repr__(self): + return "Special(x={}, y={})".format(self.x, self.y) + + def tree_flatten(self): + return ((self.x, self.y), None) + + @classmethod + def tree_unflatten(cls, aux_data, children): + return cls(*children) + + def __eq__(self, other): + return type(self) is type(other) and (self.x, self.y) == (other.x, other.y) + PYTREES = [ ("foo",), ((),), @@ -60,6 +78,7 @@ def __repr__(self): ([3],), ([3, ATuple(foo=(3, ATuple(foo=3, bar=None)), bar={"baz": 34})],), ([AnObject(3, None, [4, "foo"])],), + (Special(2, 3.),), ({"a": 1, "b": 2},), (collections.OrderedDict([("foo", 34), ("baz", 101), ("something", -42)]),), (collections.defaultdict(dict,
Optional base class or class decorator for PyTree objects Instead of requiring users to explicitly call `register_pytree_node()`, we could supply a base class that lets them instead define a pair of special methods for flattening/unflattening. This looks slightly cleaner than calling functions on classes, but still avoids the evils of implementation inheritance. Here's a working example, adapted from the [pytree docs](https://jax.readthedocs.io/en/latest/notebooks/JAX_pytrees.html): ```python import jax from jax import tree_util # baseclass that should go in tree_util.py def _generic_flatten(tree): aux, children = tree.tree_flatten() return children, (type(tree), aux) def _generic_unflatten(type_and_aux_data, children): cls, aux_data = type_and_aux_data return cls.tree_unflatten(aux_data, children) # https://stackoverflow.com/questions/18126552/how-to-run-code-when-a-class-is-subclassed class _AutoRegister(type): def __init__(cls, *args, **kwargs): tree_util.register_pytree_node(cls, _generic_flatten, _generic_unflatten) super().__init__(*args, **kwargs) class PyTree(metaclass=_AutoRegister): def tree_flatten(self): """Returns aux_data and children.""" raise NotImplemented @classmethod def tree_unflatten(cls, aux_data, children): """Returns a new PyTree.""" raise NotImplemented # example user code class Special(PyTree): def __init__(self, x, y): self.x = x self.y = y def __repr__(self): return "Special(x={}, y={})".format(self.x, self.y) def tree_flatten(self): return (None, (self.x, self.y)) @classmethod def tree_unflatten(cls, aux_data, children): return cls(*children) # example usage def show_example(structured): flat, tree = jax.tree_flatten(structured) unflattened = jax.tree_unflatten(tree, flat) print("structured={}\n flat={}\n tree={}\n unflattened={}".format( structured, flat, tree, unflattened)) show_example(Special(1, 2)) # outputs: # structured=Special(x=1, y=2) # flat=[1, 2] # tree=PyTreeDef(<class '__main__.Special'>[(<class '__main__.Special'>, None)], [*,*]) # unflattened=Special(x=1, y=2) ```
Interesting experiment! I disagree that calling functions on classes is bad, especially since the action being undertaken is to register a dispatch handler for a type. I don't think we want to grow the pytree API. Here's a simple convenience wrapper one-liner that avoids the metaclass stuff or adding the `PyTree` class to the pytree API: ```python import operator from jax import tree_util def register_pytree_node_class(cls): flatten = operator.methodcaller('tree_flatten') return tree_util.register_pytree_node(cls, flatten, cls.tree_unflatten) ``` ```python # example user code class Special: def __init__(self, x, y): self.x = x self.y = y def __repr__(self): return "Special(x={}, y={})".format(self.x, self.y) def tree_flatten(self): return ((self.x, self.y), None) @classmethod def tree_unflatten(cls, aux_data, children): return cls(*children) register_pytree_node_class(Special) ``` The example usage behavior is the same. Any user who doesn't like to call registration functions on types can of course write their own subclassing magic wrapper on top! I'm going to close this issue because it's not something we want to add to the pytree API right now. I like `register_pytree_node_class`. If it returns `cls`, we could use it as a decorator, too, e.g., ``` @register_pytree_node_class class Special: ... ``` It also avoids the API expansion of adding a base class that can be type checked. Whoa, decorator is so clever. Want to make it a PR? Sorry if I seem like a jerk for closing the issue right after you brought it up. I'm trying to get our issues under control, since I've neglected them for too long. Is that decorator version of `register_pytree_node_class` useful? If so, either you or I could make a PR. No worries on the fast close, I appriecate your diligence here! Lets reopen to consider the decorator proposal, which is definitely a better design. I should have thought of this earlier, since it's what [`flax.struct` uses](https://flax.readthedocs.io/en/latest/flax.struct.html). I think some users would find this to be a nicer model, since the class provides a nice logical grouping for these methods. One concern is whether we should adopt the existing pytree interface, or try to fix the inconsistent order of `aux_data`/`children` between flatten/unflatten first. I think we should keep `register_pytree_node_class` consistent with `register_pytree_node`, since the former is just a tiny wrapper around the latter. If someone changes the order of `register_pytree_node` then both will be fixed automatically. I don't want to keep this issue open, so I'll make the PR right now. I won't update the docs, though.
2020-03-10T21:55:47
google/jax
2,414
google__jax-2414
[ "2412" ]
271041b499029d639bade9e1c77d3dc64a89f9cd
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -94,6 +94,13 @@ def while_body_fun(loop_carry): return lax.add(i, lax._const(i, 1)), upper, body_fun(i, x) return while_body_fun +@cache() +def _fori_scan_body_fun(body_fun): + def scanned_fun(loop_carry, _): + i, upper, x = loop_carry + return (lax.add(i, lax._const(i, 1)), upper, body_fun(i, x)), None + return scanned_fun + def fori_loop(lower, upper, body_fun, init_val): """Loop from ``lower`` to ``upper`` by reduction to ``while_loop``. @@ -131,15 +138,31 @@ def fori_loop(lower, upper, body_fun, init_val): Returns: Loop value from the final iteration, of type ``a``. """ - # TODO: perhaps do more type checking here, for better error messages. + # TODO(phawkins): perhaps do more type checking here, better error messages. lower_dtype = dtypes.canonicalize_dtype(lax.dtype(lower)) upper_dtype = dtypes.canonicalize_dtype(lax.dtype(upper)) if lower_dtype != upper_dtype: msg = ("lower and upper arguments to fori_loop must have equal types, " "got {} and {}") raise TypeError(msg.format(lower_dtype.name, upper_dtype.name)) - _, _, result = while_loop(_fori_cond_fun, _fori_body_fun(body_fun), - (lower, upper, init_val)) + + # If we can specialize on the trip count, call scan instead of a while_loop + # to enable efficient reverse-mode differentiation. + try: + lower_ = int(lower) + upper_ = int(upper) + except TypeError: + use_scan = False + else: + use_scan = True + + if use_scan: + (_, _, result), _ = scan(_fori_scan_body_fun(body_fun), + (lower, upper, init_val), None, + length=upper_ - lower_) + else: + _, _, result = while_loop(_fori_cond_fun, _fori_body_fun(body_fun), + (lower, upper, init_val)) return result @@ -815,17 +838,26 @@ def _scan_impl(*args, forward, length, num_consts, num_carry, jaxpr, linear): _, _, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry]) _, y_avals = split_list(jaxpr.out_avals, [num_carry]) - def body_fun(i, vals): - i = i if forward else length - i - 1 - carry, ys = split_list(vals, [num_carry]) - x = _map(partial(_index_array, i), x_avals, xs) + def cond_fun(vals): + i, *_ = vals + return i < length + + def body_fun(vals): + [i], carry, ys = split_list(vals, [1, num_carry]) + i_ = i if forward else length - i - 1 + x = _map(partial(_index_array, i_), x_avals, xs) out_flat = core.jaxpr_as_fun(jaxpr)(*(consts + carry + x)) carry_out, y_updates = split_list(out_flat, [num_carry]) - ys_out = _map(partial(_update_array, i), y_avals, ys, y_updates) - return carry_out + ys_out + ys_out = _map(partial(_update_array, i_), y_avals, ys, y_updates) + return [i + 1] + carry_out + ys_out ys_init = _map(partial(_empty_array, length), y_avals) - return fori_loop(lax._const(length, 0), length, body_fun, init + ys_init) + if length == 0: + return init + ys_init + else: + init_val = [lax._const(length, 0)] + init + ys_init + _, *outs = while_loop(cond_fun, body_fun, init_val) + return outs def _index_array(i, aval, x): if aval is core.abstract_unit: @@ -845,12 +877,11 @@ def _update_array(i, aval, xs, x): else: return lax.dynamic_update_index_in_dim(xs, x, i, 0) -# TODO(mattjj): make scan a primitive -# def _scan_abstract_eval(*args, forward, length, num_consts, num_carry, jaxpr, linear): -# carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry]) -# ys_avals = [ShapedArray((length,) + aval.shape, aval.dtype) -# if aval is not core.abstract_unit else aval for aval in y_avals] -# return carry_avals + y_avals +def _scan_abstract_eval(*args, forward, length, num_consts, num_carry, jaxpr, linear): + carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry]) + ys_avals = [ShapedArray((length,) + aval.shape, aval.dtype) + if aval is not core.abstract_unit else aval for aval in y_avals] + return carry_avals + ys_avals def _scan_jvp(primals, tangents, forward, length, jaxpr, num_consts, num_carry, linear): @@ -1141,7 +1172,7 @@ def _scan_masking_rule(shape_envs, padded_vals, shape_exprs, forward, length, *itertools.chain([dynamic_length] + consts, [0], init, xs), forward=forward, length=max_length, jaxpr=masked_jaxpr, num_consts=1 + num_consts, num_carry=1 + num_carry, - linear=[False] + const_linear + [False] + init_linear + xs_linear) + linear=tuple([False] + const_linear + [False] + init_linear + xs_linear)) return out_vals[1:], out_shape def _masked_scan_jaxpr(jaxpr, num_consts, num_carry): @@ -1180,7 +1211,8 @@ def scan_bind(*args, forward, length, num_consts, num_carry, jaxpr, linear): scan_p = core.Primitive("scan") scan_p.multiple_results = True scan_p.def_custom_bind(scan_bind) -scan_p.def_impl(_scan_impl) +scan_p.def_impl(partial(xla.apply_primitive, scan_p)) +scan_p.def_abstract_eval(_scan_abstract_eval) ad.primitive_jvps[scan_p] = _scan_jvp ad.primitive_transposes[scan_p] = _scan_transpose pe.custom_partial_eval_rules[scan_p] = _scan_partial_eval
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -349,7 +349,9 @@ def fun(x): self.assertAllClose(ans, expected, check_dtypes=False) def testForiLoopBatchedIssue1190(self): - f = lambda x: lax.fori_loop(0, 4, lambda _, x: x + 1, x) + cond_fun = lambda carry: carry[0] < 4 + body_fun = lambda carry: (carry[0] + 1, carry[1] + 1) + f = lambda x: lax.while_loop(cond_fun, body_fun, (0, x)) jaxpr = api.make_jaxpr(api.vmap(f))(np.arange(3)) eqn = jaxpr.jaxpr.eqns[0] self.assertIs(eqn.primitive, lax.while_p) @@ -1269,6 +1271,12 @@ def testMap(self): actual = lax.map(f, xs) self.assertAllClose(actual, expected, check_dtypes=True) + def testMapEmpty(self): + # https://github.com/google/jax/issues/2412 + ans = lax.map(lambda x: x * x, np.array([])) + expected = np.array([]) + self.assertAllClose(ans, expected, check_dtypes=True) + def testCaching(self): def cond(x): assert python_should_be_executing
jax.lax.{scan, map} break when supplied with an empty array Hey all, with 0.1.59 I'm seeing an error with both `map` and `scan`, where they can't handle an empty list: ```python jax.lax.map(lambda x: x * x, np.array([])) jax.lax.scan(lambda i, j: (i, j), np.array([]), np.array([])) ``` Both result in this error: ``` TypeError: slice slice_sizes must be less than or equal to operand shape, got slice_sizes (1,) for operand shape (0,). ``` This has been my workaround, though there is surely something more elegant and more correct: ```python def safe_map(f, xs): if xs.shape[0] == 0: return xs else: return jax.lax.map(f, xs) ``` Thank you!
2020-03-13T21:11:41
google/jax
2,481
google__jax-2481
[ "2314" ]
e456edf5583a3f06727b2aded916c5a78a8a5941
diff --git a/jax/scipy/stats/multivariate_normal.py b/jax/scipy/stats/multivariate_normal.py --- a/jax/scipy/stats/multivariate_normal.py +++ b/jax/scipy/stats/multivariate_normal.py @@ -17,27 +17,29 @@ import scipy.stats as osp_stats from ... import lax +from ...lax_linalg import cholesky, triangular_solve +from ... import numpy as jnp from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps -from ...numpy.lax_numpy import dot, subtract, einsum -from ...numpy.linalg import det, inv @_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False) def logpdf(x, mean, cov): x, mean, cov = _promote_dtypes_inexact(x, mean, cov) - two = _constant_like(x, 2) - dim = _constant_like(x, mean.shape[0]) - det_sig = det(cov).astype(cov.dtype) - log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim), - det_sig)) - x_shape = x.shape[:-1] - if x_shape: - x_2d = x.reshape((-1, mean.shape[0])) - quadratic = einsum("ij,jk,ik->i", subtract(x_2d, mean), inv(cov), - subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype) + if not mean.shape: + return -1/2 * (x - mean) ** 2 / cov - 1/2 * (np.log(2*np.pi) + jnp.log(cov)) else: - quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype) - return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two) + n = mean.shape[-1] + if not np.shape(cov): + y = x - mean + return (-1/2 * jnp.einsum('...i,...i->...', y, y) / cov + - n/2 * (np.log(2*np.pi) + jnp.log(cov))) + else: + if cov.ndim < 2 or cov.shape[-2:] != (n, n): + raise ValueError("multivariate_normal.logpdf got incompatible shapes") + L = cholesky(cov) + y = triangular_solve(L, x - mean, lower=True, transpose_a=True) + return (-1/2 * jnp.einsum('...i,...i->...', y, y) - n/2*np.log(2*np.pi) + - jnp.log(L.diagonal()).sum()) @_wraps(osp_stats.multivariate_normal.pdf, update_doc=False) def pdf(x, mean, cov):
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -757,7 +757,7 @@ def wrapped_fun(*args): def _CheckAgainstNumpy(self, numpy_reference_op, lax_op, args_maker, check_dtypes=False, tol=None): args = args_maker() - numpy_ans = numpy_reference_op(*args) lax_ans = lax_op(*args) + numpy_ans = numpy_reference_op(*args) self.assertAllClose(numpy_ans, lax_ans, check_dtypes=check_dtypes, atol=tol, rtol=tol) diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py --- a/tests/scipy_stats_test.py +++ b/tests/scipy_stats_test.py @@ -260,24 +260,6 @@ def args_maker(): tol=1e-6) self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) - # TODO: currently it ignores the argument "shapes" and only tests dim=4 - @genNamedParametersNArgs(3, jtu.rand_default) - def testMultivariateNormalLogPdf(self, rng_factory, shapes, dtypes): - rng = rng_factory() - scipy_fun = osp_stats.multivariate_normal.logpdf - lax_fun = lsp_stats.multivariate_normal.logpdf - dim = 4 - shapex = (dim,) - - def args_maker(): - x, mean, cov = map(rng, (shapex, shapex, (dim, dim)), dtypes) - cov = random_correlation.rvs(onp.arange(1, 1+dim) * 2 / (dim + 1)) - return [x, mean, cov] - - self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False, - tol=1e-4) - self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) - @genNamedParametersNArgs(3, jtu.rand_default) def testNormLogPdf(self, rng_factory, shapes, dtypes): rng = rng_factory() @@ -400,6 +382,67 @@ def testIssue972(self): lsp_stats.norm.cdf(onp.full((4,), onp.inf, onp.float32)), check_dtypes=False) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_x={}_mean={}_cov={}".format( + jtu.format_shape_dtype_string(x_shape, x_dtype), + jtu.format_shape_dtype_string(mean_shape, mean_dtype) + if mean_shape is not None else None, + jtu.format_shape_dtype_string(cov_shape, cov_dtype) + if cov_shape is not None else None), + "x_shape": x_shape, "x_dtype": x_dtype, + "mean_shape": mean_shape, "mean_dtype": mean_dtype, + "cov_shape": cov_shape, "cov_dtype": cov_dtype, + "rng_factory": rng_factory} + for x_shape, mean_shape, cov_shape in [ + # # These test cases cover default values for mean/cov, but we don't + # # support those yet (and they seem not very valuable). + # [(), None, None], + # [(), (), None], + # [(2,), None, None], + # [(2,), (), None], + # [(2,), (2,), None], + # [(3, 2), (3, 2,), None], + # [(5, 3, 2), (5, 3, 2,), None], + + [(), (), ()], + [(3,), (), ()], + [(3,), (3,), ()], + [(3,), (3,), (3, 3)], + [(3, 4), (4,), (4, 4)], + + # # These test cases are where scipy flattens things, which has + # # different batch semantics than some might expect + # [(5, 3, 2), (5, 3, 2,), ()], + # [(5, 3, 2), (5, 3, 2,), (5, 3, 2, 2)], + # [(5, 3, 2), (3, 2,), (5, 3, 2, 2)], + # [(5, 3, 2), (3, 2,), (2, 2)], + ] + for x_dtype, mean_dtype, cov_dtype in CombosWithReplacement(float_dtypes, 3) + if (mean_shape is not None or mean_dtype == onp.float32) + and (cov_shape is not None or cov_dtype == onp.float32) + for rng_factory in [jtu.rand_default])) + def testMultivariateNormalLogpdf(self, x_shape, x_dtype, mean_shape, + mean_dtype, cov_shape, cov_dtype, rng_factory): + rng = rng_factory() + def args_maker(): + args = [rng(x_shape, x_dtype)] + if mean_shape is not None: + args.append(5 * rng(mean_shape, mean_dtype)) + if cov_shape is not None: + if cov_shape == (): + args.append(0.1 + rng(cov_shape, cov_dtype) ** 2) + else: + factor_shape = (*cov_shape[:-1], 2 * cov_shape[-1]) + factor = rng(factor_shape, cov_dtype) + args.append(onp.matmul(factor, onp.swapaxes(factor, -1, -2))) + return args + + self._CheckAgainstNumpy(osp_stats.multivariate_normal.logpdf, + lsp_stats.multivariate_normal.logpdf, + args_maker, check_dtypes=True, tol=1e-3) + self._CompileAndCheck(lsp_stats.multivariate_normal.logpdf, args_maker, + check_dtypes=True) + if __name__ == "__main__": absltest.main()
Add multivariate normal pdf evalutation to jax.scipy It would be great to have a Multivariate gaussian pdf/logpdf implementation, similar to the univariate version in [jax.scipy.stats.norm](https://jax.readthedocs.io/en/latest/_modules/jax/scipy/stats/norm.html#logpdf). I am currently working with this hacky function: ``` @jit def multi_gauss_logpdf(x, mean, cov): """ Calculate the probability density of a sample from the multivariate normal. """ D = mean.shape[0] (sign, logdet) = np.linalg.slogdet(cov) p1 = D*np.log(2*np.pi) + logdet p2 = (x-mean).T @ np.linalg.inv(cov) @ (x-mean) return -1./2 * (p1 + p2) batch_logpdf = vmap(multi_gauss_logpdf, in_axes=(0, None, None)) ``` My `lax`/primitive knowledge is still fairly limited but I will try to put together a pr. Any recommendations how to speed things up?
@RobertTLange are you actively working on a PR? If not, I can probably help with this one, since I've written similar code too many times in the past! It's usually better (faster and more numerically stable) to do a Cholesky and a triangular solve, and you can compute the log det in terms of the diagonal elements of the Cholesky too. Here's [an example](https://github.com/mattjj/pybasicbayes/blob/61f65ad6c781288605ec5f7347efcc5dbd73c4fc/pybasicbayes/distributions/gaussian.py#L67-L75). @mattjj thanks for coming back and sorry for the late response. Yes, it is on my todo-list. My focus is suffering a little. Should be able to get this done in the next days. Okaydokey than I have learned some new computational linear algebra tricks ;) P.S.: I wrote a little JAX intro tutorial (https://roberttlange.github.io/posts/2020/03/blog-post-10/). Thanks for the great project and all the effort! Awesome tutorial, and artwork! Actually, I just noticed: a [multivariate normal logpdf function](https://github.com/google/jax/blob/6545cf3421e45aa06beb8ad2bb3c73d34ce7716a/jax/scipy/stats/multivariate_normal.py#L26-L40) seems to be checked in already from #268, but it's inefficient because of how it computes `inv` and `det` (it even computes `inv` twice!). Looks like I LGTM'd it though, probably thinking we could improve the efficiency and numerics in follow-up work. Does that function work for you? if so, we can change this issue title to be about improving it.
2020-03-21T20:14:59
google/jax
2,503
google__jax-2503
[ "2502" ]
0cf84f925b049264ca2f18d95d87b7305ab36bac
diff --git a/jax/core.py b/jax/core.py --- a/jax/core.py +++ b/jax/core.py @@ -708,6 +708,13 @@ def strip_weak_type(self): """Returns a copy of the aval with weak_type=False.""" return UnshapedArray(self.dtype) if self.weak_type else self + @property + def shape(self): + msg = ("UnshapedArray has no shape. Please open an issue at " + "https://github.com/google/jax/issues because it's unexpected for " + "UnshapedArray instances to ever be produced.") + raise TypeError(msg) + class ShapedArray(UnshapedArray): __slots__ = ['shape'] array_abstraction_level = 1 diff --git a/jax/custom_derivatives.py b/jax/custom_derivatives.py --- a/jax/custom_derivatives.py +++ b/jax/custom_derivatives.py @@ -186,8 +186,23 @@ def _flatten_jvp(in_tree, *args): tangents_out, out_tree2 = tree_flatten(py_tangents_out) if out_tree != out_tree2: msg = ("Custom JVP rule must produce primal and tangent outputs with equal " - "container (pytree) structures, but got {} and {}.") + "container (pytree) structures, but got {} and {} respectively.") raise TypeError(msg.format(out_tree, out_tree2)) from None + primal_avals_out = [raise_to_shaped(core.get_aval(x)) for x in primals_out] + tangent_avals_out = [raise_to_shaped(core.get_aval(t)) for t in tangents_out] + if primal_avals_out != tangent_avals_out: + if len(primal_avals_out) == 1: + (av1,), (av2,) = primal_avals_out, tangent_avals_out + msg = ("Custom JVP rule must produce primal and tangent outputs with " + "equal shapes and dtypes, but got {} and {} respectively.") + raise TypeError(msg.format(av1.str_short(), av2.str_short())) + else: + msg = ("Custom JVP rule must produce primal and tangent outputs with " + "equal shapes and dtypes, but got:\n{}") + disagreements = ( + " primal {} for tangent {}".format(av1.str_short(), av2.str_short()) + for av1, av2 in zip(primal_avals_out, tangent_avals_out) if av1 != av2) + raise TypeError(msg.format('\n'.join(disagreements))) yield primals_out + tangents_out, out_tree def _custom_deriv_call_bind(primitive, f, *args, **params): diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -1309,9 +1309,9 @@ def _check_tree(func_name, expected_name, actual_tree, expected_tree): def _check_tree_and_avals(what, tree1, avals1, tree2, avals2): """Raises TypeError if (tree1, avals1) does not match (tree2, avals2). - Corresponding `tree` and `avals` must match in the sense that the number of leaves in - `tree` must be equal to the length of `avals`. - `what` will be prepended to details of the mismatch in TypeError. + Corresponding `tree` and `avals` must match in the sense that the number of + leaves in `tree` must be equal to the length of `avals`. `what` will be + prepended to details of the mismatch in TypeError. """ if tree1 != tree2: msg = ("{} must have same type structure, got {} and {}.")
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -2228,7 +2228,7 @@ def test_vmap_axes(self): def test_pmap(self): raise unittest.SkipTest("TODO") # TODO(mattjj): write test - def test_missing_jvp_rule_error(self): + def test_missing_jvp_rule_error_message(self): @api.custom_jvp def foo(x): return x ** 2 @@ -2246,7 +2246,7 @@ def foo(x): r"No JVP defined for custom_jvp function foo using defjvp.", lambda: api.grad(foo)(2.)) - def test_jvp_rule_inconsistent_pytree_structures_error(self): + def test_jvp_rule_inconsistent_pytree_structures_error_message(self): @api.custom_jvp def f(x): return (x**2,) @@ -2263,12 +2263,32 @@ def foo_jvp(primals, tangents): re.escape( "Custom JVP rule must produce primal and tangent outputs " "with equal container (pytree) structures, but got " - "{} and {}.".format( + "{} and {} respectively.".format( tree_util.tree_structure((1,)), tree_util.tree_structure([1, 2])) ), lambda: api.jvp(f, (2.,), (1.,))) + def test_primal_tangent_aval_disagreement_error_message(self): + @api.custom_jvp + def f(x): + return x ** 2 + + @f.defjvp + def foo_jvp(primals, tangents): + x, = primals + t, = tangents + return f(x), np.reshape(t, (1,)) + + f(2.) # doesn't crash + self.assertRaisesRegex( + TypeError, + re.escape( + "Custom JVP rule must produce primal and tangent outputs " + "with equal shapes and dtypes, but got float32[] and float32[1] " + "respectively."), + lambda: api.jvp(f, (np.float32(2.),), (np.float32(1.),))) + class CustomVJPTest(jtu.JaxTestCase):
Spurious Exception: Tracer can't be used with raw numpy functions? I cannot seem to make this minimum working example any smaller. It seems like some combination of factors is causing this error, but I have no idea where to start. ``` import jax import jax.scipy.special as jss from jax import numpy as jnp @jax.custom_jvp def log_normalizer(q): return jnp.logaddexp(q[0], 0.0) def nat_to_exp(q): return jss.expit(q) @log_normalizer.defjvp def ln_jvp(primals, tangents): x, = primals x_dot, = tangents y = log_normalizer(x) y_dot = nat_to_exp(x) * x_dot return y, y_dot def cross_entropy_loss(p, q): # Bernoulli cross_entropy(p, q) p_dot_q = jnp.sum(p * q) ln = log_normalizer(q) return -p_dot_q + ln - jnp.zeros(()) # p are expectation parameters of a Bernoulli distribution corresponding to # probability 0.4. p = jnp.array([0.4]) # q are natural parameters of a Bernoulli distribution corresponding to # log-odds 0, which is probability 0.5. q = jnp.array([0.0]) print(jax.grad(cross_entropy_loss, 1)(p, q)) ```
Wow, this is a terrible error message we're raising. Like, so bad. It's actually a shape error: ``` > /usr/local/google/home/mattjj/packages/jax/issue2502.py(22)ln_jvp() 21 import ipdb; ipdb.set_trace() ---> 22 return y, y_dot 23 ipdb> p y DeviceArray(0.6931472, dtype=float32) ipdb> p y_dot Traced<ShapedArray(float32[1]):JaxprTrace(level=0/0)> ``` That is, `y` and `y_dot` are a scalar and a 1D array with length 1, respectively. This isn't checked in the custom_jvp machinery from #2026, and instead it falls through a strange cascade of steps ultimately leading to us calling `onp.shape` on a Tracer with an UnshapedArray for its abstract value. Ugh! We should probably make `onp.shape(unshaped)` raise a better error message, but the more important action item here is to raise an error message when the output of the custom jvp rule has inconsistent shapes for the primal and tangent values. That's how JAX can raise a better error. But also we need to fix this code here so that both primal and tangent outputs are scalars (or both are 1D arrays with the same shape). WDYT? (I meant to add these kinds of error checks to #2026 a few weeks ago, but then had to stop working on it for a few weeks and forgot my context...)
2020-03-25T03:44:13
google/jax
2,512
google__jax-2512
[ "2189" ]
1d09b6be2660fcb2f887c9fd6ee7c72eb27f6a45
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2317,25 +2317,20 @@ def dot(a, b, precision=None): # pylint: disable=missing-docstring def matmul(a, b, precision=None): # pylint: disable=missing-docstring _check_arraylike("matmul", a, b) a_is_vec, b_is_vec = (ndim(a) == 1), (ndim(b) == 1) - a = lax.reshape(a, (1,) + shape(a)) if a_is_vec else a - b = lax.reshape(b, shape(b) + (1,)) if b_is_vec else b - - a, b = _promote_dtypes(a, b) - batch_shape = lax.broadcast_shapes(shape(a)[:-2], shape(b)[:-2]) - a = broadcast_to(a, batch_shape + shape(a)[-2:]) - b = broadcast_to(b, batch_shape + shape(b)[-2:]) - batch_dims = tuple(range(len(batch_shape))) - dim_numbers = (((ndim(a) - 1,), (ndim(b) - 2,)), (batch_dims, batch_dims)) - result = lax.dot_general(a, b, dim_numbers, precision) - - if a_is_vec or b_is_vec: - m, n = shape(result)[-2:] - new_m = () if a_is_vec else (m,) - new_n = () if b_is_vec else (n,) - return lax.reshape(result, batch_shape + new_m + new_n) + # We lower to einsum here because it handles batch dimensions for us. + # np.matmul is stricter than np.einsum with respect to size 1 contracting + # dimensions, so we need an additional check. + if shape(a)[0 if a_is_vec else -1] != shape(b)[0 if b_is_vec else -2]: + msg = "matmul requires contracting dimension to match, got {} and {}" + raise ValueError(msg.format(shape(a), shape(b))) + if a_is_vec and b_is_vec: + return lax.dot(a, b, precision=precision) + elif a_is_vec: + return einsum('i,...ij->...j', a, b, precision=precision) + elif b_is_vec: + return einsum('...ij,j->...i', a, b, precision=precision) else: - return result - + return einsum('...ij,...jk->...ik', a, b, precision=precision) @_wraps(onp.vdot, lax_description=_PRECISION_DOC) def vdot(a, b, precision=None): @@ -2425,6 +2420,17 @@ def sum_repeats(operand, names, counts, keep_names): names = names.replace(name, '', count - 1) return operand, names + def filter_singleton_dims(operand, names, other_shape, other_names): + s = shape(operand) + new_shape = [] + new_names = [] + for i, d in enumerate(names): + other_i = other_names.find(d) + if s[i] != 1 or other_i == -1 or other_shape[other_i] == 1: + new_shape.append(s[i]) + new_names.append(d) + return reshape(operand, tuple(new_shape)), "".join(new_names) + for operand_indices, contracted_names, einstr in contractions: input_str, result_names = einstr.split('->') input_names = input_str.split(',') @@ -2445,9 +2451,18 @@ def sum_repeats(operand, names, counts, keep_names): elif len(operand_indices) == 2: lhs, rhs = map(operands.pop, operand_indices) - lhs_counts, rhs_counts = map(collections.Counter, input_names) lhs_names, rhs_names = input_names + # handle cases where one side of a contracting or batch dimension is 1 + # but its counterpart is not. + lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs), + rhs_names) + rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs), + lhs_names) + + lhs_counts = collections.Counter(lhs_names) + rhs_counts = collections.Counter(rhs_names) + # sum out unique contracted indices in lhs and rhs lhs_uniques = [name for name in contracted_names if lhs_counts[name] == 1 and rhs_counts[name] == 0] @@ -2465,14 +2480,16 @@ def sum_repeats(operand, names, counts, keep_names): contracted_names = contracted_names & (set(lhs_names) | set(rhs_names)) batch_names = (set(lhs_names) & set(rhs_names)) - contracted_names + lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n)) for n in batch_names) # NOTE(mattjj): this can fail non-deterministically in python3, maybe # due to opt_einsum - assert _all(name in lhs_names and name in rhs_names and - lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)] - for name in contracted_names) + assert _all( + name in lhs_names and name in rhs_names and + lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)] + for name in contracted_names) # move batch dims to the front (required by lax.dot_general, and easier) batch_dims = tuple(range(len(batch_names)))
diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py --- a/tests/lax_numpy_einsum_test.py +++ b/tests/lax_numpy_einsum_test.py @@ -315,6 +315,34 @@ def test_einsum_kpmurphy_example(self): self.assertAllClose(L, np.einsum('ntk,kd,dc->nc', S, W, V, optimize=path), check_dtypes=False, rtol=rtol) + def test_contraction_broadcasting(self): + r = rng() + x = r.randn(3, 4, 5) + y = r.randn(3, 1, 6) + s = 'cij,cjk->cik' + self._check(s, x, y) + + def test_batch_broadcasting(self): + r = rng() + x = r.randn(1, 4, 5) + y = r.randn(3, 5, 6) + s = 'cij,cjk->cik' + self._check(s, x, y) + + def test_batch_and_contraction_broadcasting(self): + r = rng() + x = r.randn(1, 4, 5) + y = r.randn(3, 1, 6) + s = 'cij,cjk->cik' + self._check(s, x, y) + + def test_broadcasting_issue_2189(self): + r = rng() + x = r.randn(2, 1, 3, 3) + y = r.randn(2, 4, 3) + s = '...ij,...j' + self._check(s, x, y) + if __name__ == '__main__': absltest.main()
einsum broadcasting failure ```python import jax.numpy as jnp import numpy as np x = np.zeros((2, 1, 3, 3)) y = np.zeros((2, 4, 3)) assert np.einsum('...ij,...j', x, y).shape == (2, 4, 3) # passes assert jnp.einsum('...ij,...j', x, y).shape == (2, 4, 3) # raises an error ``` Errors with: `TypeError: dot_general requires lhs batch dimensions and rhs batch dimensions to have the same shape, got [2 4] and [2 1].` Full traceback: <details> ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-5-ba41a135a1c7> in <module>() 5 y = np.zeros((2, 4, 3)) 6 assert np.einsum('...ij,...j', x, y).shape == (2, 4, 3) ----> 7 assert jnp.einsum('...ij,...j', x, y).shape == (2, 4, 3) 8 # jnp.einsum('...ij,...j', x, y) /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in einsum(*operands, **kwargs) 2342 *operands, einsum_call=True, use_blas=True, optimize=optimize) 2343 contractions = tuple(data[:3] for data in contractions) -> 2344 return _einsum(operands, contractions, precision) 2345 2346 @_wraps(onp.einsum_path) /usr/local/lib/python3.6/dist-packages/jax/api.py in f_jitted(*args, **kwargs) 148 flat_fun, out_tree = flatten_fun(f, in_tree) 149 out = xla.xla_call(flat_fun, *args_flat, device=device, backend=backend, --> 150 name=flat_fun.__name__) 151 return tree_unflatten(out_tree(), out) 152 /usr/local/lib/python3.6/dist-packages/jax/core.py in call_bind(primitive, f, *args, **params) 603 if top_trace is None: 604 with new_sublevel(): --> 605 outs = primitive.impl(f, *args, **params) 606 else: 607 tracers = map(top_trace.full_raise, args) /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _xla_call_impl(fun, device, backend, name, *args) 447 448 def _xla_call_impl(fun, *args, device, backend, name): --> 449 compiled_fun = _xla_callable(fun, device, backend, name, *map(arg_spec, args)) 450 try: 451 return compiled_fun(*args) /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in memoized_fun(fun, *args) 221 fun.populate_stores(stores) 222 else: --> 223 ans = call(fun, *args) 224 cache[key] = (ans, fun.stores) 225 return ans /usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in _xla_callable(fun, device, backend, name, *arg_specs) 464 pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args] 465 with core.new_master(pe.StagingJaxprTrace, True) as master: --> 466 jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals) 467 assert not env # no subtraces here 468 del master, env /usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs) 150 gen = None 151 --> 152 ans = self.f(*args, **dict(self.params, **kwargs)) 153 del args 154 while stack: /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in _einsum(operands, contractions, precision) 2444 bdims = tuple(range(len(batch_dims))) 2445 dimension_numbers = [(lhs_cont, rhs_cont), (bdims, bdims)] -> 2446 operand = lax.dot_general(lhs, rhs, dimension_numbers, precision) 2447 deleted_names = batch_names + ''.join(contracted_names) 2448 names = (batch_names + removechars(lhs_names, deleted_names) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in dot_general(lhs, rhs, dimension_numbers, precision) 615 return dot_general_p.bind(lhs, rhs, 616 dimension_numbers=(contract_dims, batch_dims), --> 617 precision=_canonicalize_precision(precision)) 618 619 def broadcast(operand, sizes): /usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs) 160 161 tracers = map(top_trace.full_raise, args) --> 162 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 163 if self.multiple_results: 164 return map(full_lower, out_tracer) /usr/local/lib/python3.6/dist-packages/jax/interpreters/partial_eval.py in process_primitive(self, primitive, tracers, params) 104 tracers = map(self.instantiate_const, tracers) 105 avals = [t.aval for t in tracers] --> 106 out_aval = primitive.abstract_eval(*avals, **params) 107 if primitive.multiple_results: 108 out_tracers = [JaxprTracer(self, PartialVal((aval, unit)), None) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs) 1540 return ConcreteArray(prim.impl(*[x.val for x in args], **kwargs)) 1541 elif least_specialized is ShapedArray: -> 1542 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) 1543 elif least_specialized is UnshapedArray: 1544 return UnshapedArray(dtype_rule(*args, **kwargs)) /usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in _dot_general_shape_rule(lhs, rhs, dimension_numbers, precision) 2186 msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions " 2187 "to have the same shape, got {} and {}.") -> 2188 raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape)) 2189 if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))): 2190 msg = ("dot_general requires lhs batch dimensions to precede contracting " TypeError: dot_general requires lhs batch dimensions and rhs batch dimensions to have the same shape, got [2 4] and [2 1]. ``` </details>
2020-03-25T19:59:57