repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
google/jax
233
google__jax-233
[ "222" ]
04835b9ca5d032cef785c0f4741d32efbb6e649c
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -245,6 +245,10 @@ def randint(key, shape, minval, maxval, dtype=onp.int32): if nbits not in (32, 64): raise TypeError("randint only accepts 32- or 64-bit dtypes.") + # if we don't have minval < maxval, just always return minval + # https://github.com/google/jax/issues/222 + maxval = lax.max(lax.add(minval, onp.array(1, dtype)), maxval) + # This algorithm is biased whenever (maxval - minval) is not a power of 2. # We generate double the number of random bits required by the dtype so as to # reduce that bias.
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -149,6 +149,10 @@ def testShuffle(self, dtype): self.assertFalse(onp.all(perm1 == x)) # seems unlikely! self.assertTrue(onp.all(onp.sort(perm1) == x)) + def testIssue222(self): + x = random.randint(random.PRNGKey(10003), (), 0, 0) + assert x == 0 + if __name__ == "__main__": absltest.main()
jax.random.randint range must be valid `random.randint` doesn't check that `minval` < `maxval`. If it's not, you get surprising results. For example, `print(random.randint(random.PRNGKey(10003), (), 0, 0))` yields -1234647498. The best thing would be to throw an error message, but if that's not practical then it'd at least be nice to have the behavior be defined.
Great point. Hmm... if the arguments are static constants then it's easy to check in Python, but if `minval` and/or `maxval` are computed in XLA then we don't have a runtime error system to hook into. Maybe in that case we could just have a less surprising behavior, like deterministically returning minval (by setting `maxval = max(minval, maxval)` or something). Any other ideas?
2019-01-12T21:13:16
google/jax
234
google__jax-234
[ "221", "221" ]
dfa2cb821f226df3b3f988d380426f86fdc9ff92
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -240,7 +240,7 @@ def vmap(fun, in_axes=0, out_axes=0): @wraps(fun, docstr=docstr) def batched_fun(*args, **kwargs): if not isinstance(fun, lu.WrappedFun): - f = lu.wrap_init(fun) + f = lu.wrap_init(fun, kwargs) in_axes_ = (in_axes,) * len(args) if type(in_axes) is int else in_axes in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees) diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -391,7 +391,7 @@ def sort(operand, dimension=-1): return sort_p.bind(operand, dimension=dimension) def sort_key_val(keys, values, dimension=-1): - # TODO new sort_key_val is variadic + # TODO(mattjj): new sort_key_val is variadic result = sort_key_val_p.bind(keys, values, dimension=dimension) sorted_keys, sorted_values = result return sorted_keys, sorted_values @@ -2275,19 +2275,48 @@ def sort_key_val_jvp(primals, tangents, dimension): def sort_key_val_transpose_rule(t, keys, values, dimension): t_keys, t_values = t assert t_keys is ad_util.zero - broadcasted_iota = broadcast_in_dim( - onp.arange(keys.shape[dimension]), keys.shape, [dimension % keys.ndim]) - _, perm = sort_key_val(keys, broadcasted_iota) + iota = broadcasted_iota(onp.int32, keys.shape, dimension % keys.ndim) + _, perm = sort_key_val(keys, iota) keys_result = ad_util.zero if keys is None else None values_result = sort_key_val(perm, t_values)[1] if values is None else None return [keys_result, values_result] +def sort_key_val_batch_rule(batched_args, batch_dims, dimension): + keys, values = batched_args + keys_bdim, values_bdim = batch_dims + assert keys_bdim is not None or values_bdim is not None + if keys_bdim == values_bdim: + new_dimension = dimension + (keys_bdim <= dimension) + out = sort_key_val(keys, values, new_dimension) + return core.pack(out), keys_bdim + elif keys_bdim is not None and values_bdim is not None: + keys_trans = batching.moveaxis(keys.shape[keys_bdim], values_bdim, + keys_bdim, keys) + new_dimension = dimension + (values_bdim <= dimension) + out = sort_key_val(keys_trans, values, new_dimension) + return core.pack(out), values_bdim + elif keys_bdim is None: + broadcast_dimensions = onp.delete(onp.arange(values.ndim), values_bdim) + new_keys = broadcast_in_dim(keys, values.shape, broadcast_dimensions) + new_dimension = dimension + (values_bdim <= dimension) + out = sort_key_val(new_keys, values, new_dimension) + return core.pack(out), values_bdim + elif values_bdim is None: + broadcast_dimensions = onp.delete(onp.arange(keys.ndim), keys_bdim) + new_values = broadcast_in_dim(values, keys.shape, broadcast_dimensions) + new_dimension = dimension + (keys_bdim <= dimension) + out = sort_key_val(keys, new_values, new_dimension) + return core.pack(out), keys_bdim + else: + raise Exception # unreachable + sort_key_val_p = Primitive('sort_key_val') sort_key_val_p.def_impl(sort_key_val_impl) sort_key_val_p.def_abstract_eval(sort_key_val_abstract_eval) xla.translations[sort_key_val_p] = partial(standard_translate, 'sort_key_val') ad.primitive_jvps[sort_key_val_p] = sort_key_val_jvp ad.primitive_transposes[sort_key_val_p] = sort_key_val_transpose_rule +batching.primitive_batchers[sort_key_val_p] = sort_key_val_batch_rule def while_loop_abstract_eval(init_val, opaque_params): diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -19,6 +19,7 @@ import collections import itertools import string +import warnings import numpy as onp import opt_einsum @@ -235,7 +236,6 @@ def _one_to_one_binop(numpy_fn, lax_fn, promote_like=False): fabs = _one_to_one_unop(onp.fabs, lax.abs, True) bitwise_not = _one_to_one_unop(onp.bitwise_not, lax.bitwise_not) negative = _one_to_one_unop(onp.negative, lax.neg) -sort = _one_to_one_unop(onp.sort, lax.sort) sign = _one_to_one_unop(onp.sign, lax.sign) floor = _one_to_one_unop(onp.floor, lax.floor, True) @@ -1523,6 +1523,36 @@ def _argminmax(op, a, axis): mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval) return min(mask_idxs, axis) + +@_wraps(onp.sort) +def sort(a, axis=-1, kind='quicksort', order=None): + if kind != 'quicksort': + warnings.warn("'kind' argument to sort is ignored.") + if order is not None: + raise ValueError("'order' argument to sort is not supported.") + + if axis is None: + return lax.sort(a.ravel(), 0) + else: + return lax.sort(a, axis % ndim(a)) + + +@_wraps(onp.argsort) +def argsort(a, axis=-1, kind='quicksort', order=None): + if kind != 'quicksort': + warnings.warn("'kind' argument to argsort is ignored.") + if order is not None: + raise ValueError("'order' argument to argsort is not supported.") + + if axis is None: + return argsort(a.ravel(), 0) + else: + axis = axis % ndim(a) + iota = lax.broadcasted_iota(onp.int64, shape(a), axis) + _, perm = lax.sort_key_val(a, iota, dimension=axis) + return perm + + ### Indexing @@ -1722,7 +1752,8 @@ def _static_idx(idx, size): def _not_implemented(fun): @_wraps(fun) def wrapped(*args, **kwargs): - raise Exception("Numpy function {} not yet implemented".format(fun)) + msg = "Numpy function {} not yet implemented" + raise NotImplementedError(msg.format(fun)) return wrapped # Build a set of all unimplemented NumPy functions.
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -335,6 +335,36 @@ def testRandom(self): self.assertAllClose(ans, expected, check_dtypes=False) assert len(onp.unique(ans)) == 10 * 3 * 2 + def testSortKeyVal(self): + k = onp.arange(12)[::-1].reshape(3, 4) + v = onp.random.RandomState(0).permutation(12).reshape(3, 4) + + sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (0, 0))(k, v) + self.assertAllClose(sk, k[:, ::-1], check_dtypes=True) + self.assertAllClose(sv, v[:, ::-1], check_dtypes=True) + + sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, 1), 1)(k, v) + self.assertAllClose(sk, k[::-1, :], check_dtypes=True) + self.assertAllClose(sv, v[::-1, :], check_dtypes=True) + + sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (0, 1))(k, v.T) + self.assertAllClose(sk, k[:, ::-1], check_dtypes=True) + self.assertAllClose(sv, v[:, ::-1], check_dtypes=True) + + sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, 0))(k.T, v) + self.assertAllClose(sk, k[:, ::-1], check_dtypes=True) + self.assertAllClose(sv, v[:, ::-1], check_dtypes=True) + + sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (None, 0))(k[0], v) + self.assertAllClose(sk, onp.broadcast_to(k[0, ::-1], (3, 4)), + check_dtypes=True) + self.assertAllClose(sv, v[:, ::-1], check_dtypes=True) + + sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, None))(k.T, v[0]) + self.assertAllClose(sk, k[:, ::-1], check_dtypes=True) + self.assertAllClose(sv, onp.broadcast_to(v[0, ::-1], (3, 4)), + check_dtypes=True) + if __name__ == '__main__': absltest.main() diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -932,7 +932,6 @@ def testRot90(self, shape, dtype, k, axes, rng): # TODO(mattjj): test infix operator overrides def testRavel(self): - # TODO(mattjj): support this method-based syntax? rng = onp.random.RandomState(0) args_maker = lambda: [rng.randn(3, 4).astype("float32")] self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True) @@ -959,6 +958,54 @@ def testArangeOnFloats(self): ans = lnp.arange(0.0, 1.0, 0.1) self.assertAllClose(expected, ans, check_dtypes=True) + def testSortManually(self): + # manual tests for sort are nice because we don't have to worry about ties. + # lax.sort is tested combinatorially. + ans = lnp.sort(onp.array([16, 15, 23, 42, 8, 4])) + expected = onp.array([4, 8, 15, 16, 23, 42]) + self.assertAllClose(expected, ans, check_dtypes=True) + + a = onp.array([[1, 4], [3, 1]]) + ans = lnp.sort(a, axis=None) + expected = onp.array([[1, 1, 3, 4]]) + self.assertAllClose(expected, ans, check_dtypes=True) + + a = onp.array([[1, 4], [3, 1]]) + ans = lnp.sort(a) # last axis + expected = onp.array([[1, 4], [1, 3]]) + self.assertAllClose(expected, ans, check_dtypes=True) + + a = onp.array([[1, 4], [3, 1]]) + ans = lnp.sort(a, axis=0) + expected = onp.array([[1, 1], [3, 4]]) + self.assertAllClose(expected, ans, check_dtypes=True) + + def testArgsortManually(self): + x = onp.array([16, 15, 23, 42, 8, 4]) + ans = lnp.argsort(x) + expected = onp.argsort(x) + self.assertAllClose(expected, ans, check_dtypes=False) + + x = onp.array([[16, 15, 23], [42, 8, 4]]) + ans = lnp.argsort(x, axis=0) + expected = onp.argsort(x, axis=0) + self.assertAllClose(expected, ans, check_dtypes=False) + + x = onp.array([[16, 15, 23], [42, 8, 4]]) + ans = lnp.argsort(x, axis=1) + expected = onp.argsort(x, axis=1) + self.assertAllClose(expected, ans, check_dtypes=False) + + x = onp.array([[16, 15, 23], [42, 8, 4]]) + ans = lnp.argsort(x, axis=None) + expected = onp.argsort(x, axis=None) + self.assertAllClose(expected, ans, check_dtypes=False) + + x = onp.array([[16, 15, 23], [42, 8, 4]]) + ans = lnp.argsort(x) + expected = onp.argsort(x) + self.assertAllClose(expected, ans, check_dtypes=False) + if __name__ == "__main__": absltest.main()
Batching rule for 'sort_key_val' not implemented I have implemented argsort like this: ``` @jax.numpy.lax_numpy._wraps(onp.argsort) def argsort(a, axis=-1, kind='quicksort', order=None): assert(a.ndim == 1) # starting simple vals = np.arange(len(a), dtype=int) sorted_keys, sorted_vals = jax.lax.sort_key_val(keys=a, values=vals) return sorted_vals ``` This works: `argsort(np.ones(3))` And I can also write an argsort that works with 2d arrays, but instead I would like to use vmap so I don't need to rewrite my code with a batch dimension. But using vmap gives me an error: `jax.vmap(argsort)(np.ones((3, 3)))` `NotImplementedError: Batching rule for 'sort_key_val' not implemented` So I am trying to implement the batch rule for `jax.lax.sort_key_val`, but it is more complicated than eg https://github.com/google/jax/pull/82 because `jax.lax.sort_key_val` returns multiple values, and the batching code assumes only a single return value. I've tried a few things but it's tricky. Here's an attempt: ``` def sort_key_val_batched(batched_args, batch_dims, dimension=-1): keys, values = batched_args sorted_keys, sorted_values = jax.lax.sort_key_val_p.bind( keys=keys, values=values, dimension=dimension ) return sorted_keys, 0 jax.batching.primitive_batchers[jax.lax.sort_key_val_p] = sort_key_val_batched ``` But: `jax.vmap(argsort)(np.ones((3, 3)))` `RuntimeError: Invalid argument: Sort keys and values dimensions must match. Keys shape is: f32[10,3], Values shape (operand index 1) is: s32[3]:` In other words the batching rule recieves the full 2d keys array but only one row at a time of the values array. Not sure how to handle this. One potential workaround would be to use `np.sort(order=...` to sort rows in place where one column is the values to sort by, and the other column is indices: ``` @jax.numpy.lax_numpy._wraps(onp.argsort) def jax_argsort(a, axis=-1, kind='quicksort', order=None): assert(a.ndim == 1) # starting simple inds = np.arange(len(a), dtype=int) inds = np.expand_dims(inds, 1) a = np.expand_dims(a, 1) a = a.view('i8,i8') tmp = np.concatenate((a, inds), axis=1) tmp_sorted = np.sort(a, order=['f0']) sort_inds = tmp_sorted[:, 1] return sort_inds ``` But jax does not support the order keyword to np.sort. Any ideas for a solution or workaround? Batching rule for 'sort_key_val' not implemented I have implemented argsort like this: ``` @jax.numpy.lax_numpy._wraps(onp.argsort) def argsort(a, axis=-1, kind='quicksort', order=None): assert(a.ndim == 1) # starting simple vals = np.arange(len(a), dtype=int) sorted_keys, sorted_vals = jax.lax.sort_key_val(keys=a, values=vals) return sorted_vals ``` This works: `argsort(np.ones(3))` And I can also write an argsort that works with 2d arrays, but instead I would like to use vmap so I don't need to rewrite my code with a batch dimension. But using vmap gives me an error: `jax.vmap(argsort)(np.ones((3, 3)))` `NotImplementedError: Batching rule for 'sort_key_val' not implemented` So I am trying to implement the batch rule for `jax.lax.sort_key_val`, but it is more complicated than eg https://github.com/google/jax/pull/82 because `jax.lax.sort_key_val` returns multiple values, and the batching code assumes only a single return value. I've tried a few things but it's tricky. Here's an attempt: ``` def sort_key_val_batched(batched_args, batch_dims, dimension=-1): keys, values = batched_args sorted_keys, sorted_values = jax.lax.sort_key_val_p.bind( keys=keys, values=values, dimension=dimension ) return sorted_keys, 0 jax.batching.primitive_batchers[jax.lax.sort_key_val_p] = sort_key_val_batched ``` But: `jax.vmap(argsort)(np.ones((3, 3)))` `RuntimeError: Invalid argument: Sort keys and values dimensions must match. Keys shape is: f32[10,3], Values shape (operand index 1) is: s32[3]:` In other words the batching rule recieves the full 2d keys array but only one row at a time of the values array. Not sure how to handle this. One potential workaround would be to use `np.sort(order=...` to sort rows in place where one column is the values to sort by, and the other column is indices: ``` @jax.numpy.lax_numpy._wraps(onp.argsort) def jax_argsort(a, axis=-1, kind='quicksort', order=None): assert(a.ndim == 1) # starting simple inds = np.arange(len(a), dtype=int) inds = np.expand_dims(inds, 1) a = np.expand_dims(a, 1) a = a.view('i8,i8') tmp = np.concatenate((a, inds), axis=1) tmp_sorted = np.sort(a, order=['f0']) sort_inds = tmp_sorted[:, 1] return sort_inds ``` But jax does not support the order keyword to np.sort. Any ideas for a solution or workaround?
Thanks for digging into this! You're on the right track, but you're right that `sort_key_val` is especially tricky. I don't have any specific suggestions yet, but I'll try to poke at it this week when I can. Thanks for digging into this! You're on the right track, but you're right that `sort_key_val` is especially tricky. I don't have any specific suggestions yet, but I'll try to poke at it this week when I can.
2019-01-13T19:17:30
google/jax
236
google__jax-236
[ "220" ]
1ea77af36466ab6137ea6b10b6b486ed67e468ba
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1553,6 +1553,21 @@ def argsort(a, axis=-1, kind='quicksort', order=None): return perm +@_wraps(onp.take_along_axis) +def take_along_axis(arr, indices, axis): + if axis is None and ndim(arr) != 1: + return take_along_axis(arr.ravel(), indices.ravel(), 0) + elif ndim(arr) == 1: + return lax.index_take(arr, (indices,), (0,)) + else: + all_indices = [lax.broadcasted_iota(_dtype(indices), shape(indices), i) + for i in range(ndim(arr))] + all_indices[axis] = indices + all_indices = tuple(map(ravel, all_indices)) + out_flat = lax.index_take(arr, all_indices, tuple(range(ndim(arr)))) + return reshape(out_flat, shape(indices)) + + ### Indexing
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1006,6 +1006,25 @@ def testArgsortManually(self): expected = onp.argsort(x) self.assertAllClose(expected, ans, check_dtypes=False) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_{}_axis={}".format( + jtu.format_shape_dtype_string(shape, dtype), axis), + "rng": rng, "shape": shape, "dtype": dtype, "axis": axis} + for shape in [(3,), (3, 4), (3, 4, 5)] + for axis in itertools.chain(range(len(shape)), [-1], [None]) + for dtype in default_dtypes + for rng in [jtu.rand_default()])) + def testTakeAlongAxis(self, shape, dtype, axis, rng): + def args_maker(): + x = rng(shape, dtype) + i = onp.argsort(x, axis=axis) + return x, i + + lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis) + onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis) + self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) + if __name__ == "__main__": absltest.main()
Support for numpy.take_along_axis Requesting support for [this Numpy primitive](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.take_along_axis.html#numpy.take_along_axis).
2019-01-13T20:27:25
google/jax
257
google__jax-257
[ "255" ]
a223757110ad5ffab90c7d5cb440bc8a82974a6a
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -831,7 +831,7 @@ def _brcast_to(x, shape): _f32 = {onp.float32} _float = {onp.floating} -_complex = {onp.complex} +_complex = {onp.complexfloating} _complex_elem_types = {onp.float32, onp.float64} _int = {onp.integer} _bool = {onp.bool_}
NumPy 1.16.0 causes warnings ```jax/lax.py:769: FutureWarning: Conversion of the second argument of issubdtype from complex to np.complexfloating is deprecated. In future, it will be treated as np.complex128 == np.dtype(complex).type.``` (I don't have code to reproduce it right now, because any attempt to create a minimal example makes the warning disappear even though it doesn't make any sense.)
Thanks for catching this! That should be enough information for us to track it down. For me even this simple example gives that error: ``` import jax.numpy as np from jax import grad, jit f = lambda xx: np.sin(xx) gf = jit(grad(f)) print(gf(1.)) ``` ``` 0.5403023 /global/u2/h/hejia/jax/jax/lax.py:749: FutureWarning: Conversion of the second argument of issubdtype from `complex` to `np.complexfloating` is deprecated. In future, it will be treated as `np.complex128 == np.dtype(complex).type`. if not any(onp.issubdtype(aval.dtype, t) for t in accepted_dtypes): /global/u2/h/hejia/jax/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU. warnings.warn('No GPU found, falling back to CPU.') ``` I'm using a jupyter notebook. The "No GPU" warning disappears if I run the cell again, but the "complexfloating" warning is always there.
2019-01-17T18:43:53
google/jax
285
google__jax-285
[ "273" ]
57fc3a4cca22a855d983277e78e47b7bac1d0b2a
diff --git a/jax/experimental/stax.py b/jax/experimental/stax.py --- a/jax/experimental/stax.py +++ b/jax/experimental/stax.py @@ -178,9 +178,9 @@ def apply_fun(params, inputs, rng=None): def _normalize_by_window_size(dims, strides, padding): def rescale(outputs, inputs): - one = np.ones(inputs.shape[1:3], dtype=inputs.dtype) + one = np.ones(inputs.shape[1:-1], dtype=inputs.dtype) window_sizes = lax.reduce_window(one, 0., lax.add, dims, strides, padding) - return outputs / window_sizes + return outputs / window_sizes[..., np.newaxis] return rescale AvgPool = _pooling_layer(lax.add, 0., _normalize_by_window_size)
diff --git a/tests/stax_test.py b/tests/stax_test.py --- a/tests/stax_test.py +++ b/tests/stax_test.py @@ -100,16 +100,20 @@ def testReluShape(self, input_shape): @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_window_shape={}_padding={}_strides={}_input_shape={}" - .format(window_shape, padding, strides, input_shape), + "_maxpool={}" + .format(window_shape, padding, strides, input_shape, + max_pool), "window_shape": window_shape, "padding": padding, "strides": strides, - "input_shape": input_shape} + "input_shape": input_shape, "max_pool": max_pool} for window_shape in [(1, 1), (2, 3)] for padding in ["VALID"] for strides in [None, (2, 1)] - for input_shape in [(2, 5, 6, 1)])) - def testPoolingShape(self, window_shape, padding, strides, input_shape): - init_fun, apply_fun = stax.MaxPool(window_shape, padding=padding, - strides=strides) + for input_shape in [(2, 5, 6, 1)] + for max_pool in [False, True])) + def testPoolingShape(self, window_shape, padding, strides, input_shape, + max_pool): + layer = stax.MaxPool if max_pool else stax.AvgPool + init_fun, apply_fun = layer(window_shape, padding=padding, strides=strides) _CheckShapeAgreement(self, init_fun, apply_fun, input_shape) @parameterized.named_parameters(jtu.cases_from_list(
AvgPool does only work as global average pooling ```python3 from jax.experimental import stax import jax.numpy as np init_fn, apply_fun = stax.AvgPool((2, 2), strides=(2, 2)) output_shape, params = init_fn((-1, 32, 32, 3)) print(output_shape) print(params) apply_fun(params, np.zeros((100, 32, 32, 3))) ``` This should work but instead it fails with this error: ``` (-1, 16, 16, 3) () Traceback (most recent call last): File "minimal_example.py", line 9, in <module> apply_fun(params, np.zeros((100, 32, 32, 3))) File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/experimental/stax.py", line 172, in apply_fun return rescale(out, inputs) if rescale else out File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/experimental/stax.py", line 183, in rescale return outputs / window_sizes File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 350, in true_divide x1, x2 = _promote_shapes(x1, x2) File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 134, in _promote_shapes nd = len(_broadcast_shapes(*shapes)) File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/util.py", line 161, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 151, in _broadcast_shapes .format(tuple(map(tuple, shapes)))) ValueError: Incompatible shapes for broadcasting: ((100, 16, 16, 3), (1, 1, 16, 16)) ``` Note that it also doesn't work if stride is 1. The only case in which `AvgPool` works seems to be if stride is 1 and the pooling size is identical to the input size (i.e. global average pooling). `MaxPool` seems to work fine.
Thanks for catching this!
2019-01-28T14:24:37
google/jax
298
google__jax-298
[ "296" ]
7eb8579c8e8a2dbfae86b1b5af9f31e1460d6937
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -370,6 +370,14 @@ def _reduce_window_sum(operand, window_dimensions, window_strides, padding): window_strides=tuple(window_strides), padding=padding, input_shape=operand.shape) +def _reduce_window_prod(operand, window_dimensions, window_strides, padding): + init_value = _const(operand, 1) + jaxpr, consts = _reduction_jaxpr(mul, init_value) + return reduce_window_p.bind( + operand, init_value, jaxpr=jaxpr, consts=consts, + window_dimensions=tuple(window_dimensions), + window_strides=tuple(window_strides), padding=padding) + def _reduce_window_max(operand, window_dimensions, window_strides, padding): return reduce_window_max_p.bind( operand, window_dimensions=tuple(window_dimensions), diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -882,6 +882,56 @@ def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs): nansum = _make_nan_reduction(onp.nansum, sum, 0, nan_if_all_nan=False) nanprod = _make_nan_reduction(onp.nanprod, prod, 1, nan_if_all_nan=False) + +def _make_cumulative_reduction(onp_reduction, window_reduce, init_val, + squash_nan=False): + @_wraps(onp_reduction) + def cumulative_reduction(a, axis=None, dtype=None): + if axis is None or isscalar(a): + a = ravel(a) + axis = 0 + + a_shape = list(shape(a)) + num_dims = len(a_shape) + + if axis < 0: + axis = axis + num_dims + if axis < 0 or axis >= num_dims: + raise ValueError( + "axis {} is out of bounds for array of dimension {}".format( + axis, num_dims)) + + if squash_nan: + a = where(isnan(a), _constant_like(a, init_val), a) + + if dtype: + a = lax.convert_element_type(a, dtype) + + if a_shape[axis] == 0: + return a + + padding = [(0, 0, 0)] * num_dims + padding[axis] = (a_shape[axis] - 1, 0, 0) + a = lax.pad(a, _constant_like(a, init_val), padding) + strides = [1] * num_dims + window_dims = [1] * num_dims + window_dims[axis] = a_shape[axis] + return window_reduce( + a, window_dims, strides, xla_bridge.get_xla_client().PaddingType.VALID) + + return cumulative_reduction + + +cumsum = _make_cumulative_reduction( + onp.cumsum, lax._reduce_window_sum, 0, squash_nan=False) +cumprod = _make_cumulative_reduction( + onp.cumprod, lax._reduce_window_prod, 1, squash_nan=False) +nancumsum = _make_cumulative_reduction( + onp.nancumsum, lax._reduce_window_sum, 0, squash_nan=True) +nancumprod = _make_cumulative_reduction( + onp.nancumprod, lax._reduce_window_prod, 1, squash_nan=True) + + ### Array-creation functions @_wraps(onp.pad)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -529,6 +529,29 @@ def testRepeat(self, axis, shape, dtype, repeats, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format( + op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype), + "axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype, + "rng": jtu.rand_default(), "lnp_op": getattr(lnp, op), + "onp_op": getattr(onp, op)} + for op in ["cumsum", "cumprod"] + # TODO(phawkins): replace both type lists with default_dtypes after a + # Jaxlib update includes + # https://github.com/google/jax/commit/86f5d189cf563b027c3cd00eea38072c003905c8 + for dtype in [onp.float32, onp.int32] + for out_dtype in [onp.float32, onp.int32] + for shape in all_shapes + for axis in [None] + list(range(-len(shape), len(shape))))) + def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng): + onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype) + lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype) + + args_maker = lambda: [rng(shape, dtype)] + + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_dtype={}_m={}_n={}_k={}".format( onp.dtype(dtype).name, m, n, k),
np.cumsum not implemented. If the JAX version of np.cumsum were prioritized as a function to implement, I'd appreciate it. I've implemented my own for now. Thank you, -David Exception: Numpy function <function cumsum at 0x7f600be74a60> not yet implemented
2019-02-01T00:00:21
google/jax
303
google__jax-303
[ "301" ]
670f14a2eecd7000b72da9c716faf6062cb417d8
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -42,7 +42,7 @@ from .interpreters import ad from .interpreters import batching from .interpreters import parallel -from .util import curry, safe_zip, unzip2, prod +from .util import curry, memoize, safe_zip, unzip2, prod from .tree_util import build_tree from .lib import xla_bridge @@ -52,6 +52,23 @@ _min = builtins.max _reduce = six.moves.reduce + +@memoize +def broadcast_shapes(*shapes): + """Returns the shape that results from NumPy broadcasting of `shapes`.""" + if len(shapes) == 1: + return shapes[0] + ndim = _max(len(shape) for shape in shapes) + shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes]) + min_shape = onp.min(shapes, axis=0) + max_shape = onp.max(shapes, axis=0) + result_shape = onp.where(min_shape == 0, 0, max_shape) + if not onp.all((shapes == result_shape) | (shapes == 1)): + raise ValueError("Incompatible shapes for broadcasting: {}" + .format(tuple(map(tuple, shapes)))) + return tuple(result_shape) + + def identity(x): return x ### traceables @@ -97,8 +114,19 @@ def mul(x, y): return mul_p.bind(x, y) def div(x, y): return div_p.bind(x, y) def rem(x, y): return rem_p.bind(x, y) -def max(x, y): return max_p.bind(x, y) -def min(x, y): return min_p.bind(x, y) +def max(x, y): + """Elementwise maximum. + + For complex numbers, uses a lexicographic comparison on the + `(real, imaginary)` pairs.""" + return max_p.bind(x, y) + +def min(x, y): + """Elementwise minimum. + + For complex numbers, uses a lexicographic comparison on the + `(real, imaginary)` pairs.""" + return min_p.bind(x, y) def shift_left(x, y): return shift_left_p.bind(x, y) def shift_right_arithmetic(x, y): return shift_right_arithmetic_p.bind(x, y) @@ -312,7 +340,7 @@ def _get_monoid_reducer(monoid_op, x): return aval.val == _get_min_identity(aval.dtype) and _reduce_and def _get_max_identity(dtype): - if onp.issubdtype(dtype, onp.floating): + if onp.issubdtype(dtype, onp.inexact): return onp.array(-onp.inf, dtype) elif onp.issubdtype(dtype, onp.integer): return onp.array(onp.iinfo(dtype).min, dtype) @@ -320,7 +348,7 @@ def _get_max_identity(dtype): return onp.array(False, onp.bool_) def _get_min_identity(dtype): - if onp.issubdtype(dtype, onp.floating): + if onp.issubdtype(dtype, onp.inexact): return onp.array(onp.inf, dtype) elif onp.issubdtype(dtype, onp.integer): return onp.array(onp.iinfo(dtype).max, dtype) @@ -809,10 +837,11 @@ def broadcasting_shape_rule(name, *avals): return tuple(result_shape) -def binop(result_dtype, accepted_dtypes, name): +def binop(result_dtype, accepted_dtypes, name, translation_rule=None): dtype_rule = partial(binop_dtype_rule, result_dtype, accepted_dtypes, name) shape_rule = partial(broadcasting_shape_rule, name) - prim = standard_primitive(shape_rule, dtype_rule, name) + prim = standard_primitive(shape_rule, dtype_rule, name, + translation_rule=translation_rule) batching.defbroadcasting(prim) parallel.defbroadcasting(prim) return prim @@ -999,12 +1028,39 @@ def div_transpose_rule(cotangent, x, y): lambda g, x, y: mul(neg(g), floor(div(x, y)))) -max_p = standard_binop([_any, _any], 'max') +def _broadcasting_select(c, which, x, y): + """Wrapper around XLA `Select` that broadcasts its arguments.""" + which_shape, x_shape, y_shape = ( + c.GetShape(t).dimensions() for t in (which, x, y)) + out_shape = broadcast_shapes(which_shape, x_shape, y_shape) + bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape), + len(out_shape))) + which = c.BroadcastInDim(which, out_shape, bcast_dims(which_shape)) + x = c.BroadcastInDim(x, out_shape, bcast_dims(x_shape)) + y = c.BroadcastInDim(y, out_shape, bcast_dims(y_shape)) + return c.Select(which, x, y) + + +def _minmax_translation_rule(c, x, y, minmax=None, cmp=None): + dtype = c.GetShape(x).numpy_dtype() + if onp.issubdtype(dtype, onp.complexfloating): + comparator = cmp(c) + rx = c.Real(x) + ry = c.Real(y) + return _broadcasting_select( + c, c.Select(c.Eq(rx, ry), comparator(c.Imag(x), c.Imag(y)), + comparator(rx, ry)), + x, y) + return minmax(c)(x, y) + +max_p = standard_binop([_any, _any], 'max', translation_rule=partial( + _minmax_translation_rule, minmax=lambda c: c.Max, cmp=lambda c: c.Gt)) ad.defjvp2(max_p, lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)), lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x))) -min_p = standard_binop([_any, _any], 'min') +min_p = standard_binop([_any, _any], 'min', translation_rule=partial( + _minmax_translation_rule, minmax=lambda c: c.Min, cmp=lambda c: c.Lt)) ad.defjvp2(min_p, lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)), lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x))) @@ -1159,7 +1215,7 @@ def conv_general_dilated_batch_rule( # convolution isn't the first dimension. if lhs_dim[0] != 0 or out_dim[0] != 0: raise NotImplementedError - + lhs = batching.move_dim_to_front(lhs, lhs_bdim) batched_size = lhs.shape[0] n_size = lhs.shape[1] diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -132,27 +132,10 @@ def _promote_shapes(*args): return args else: shapes = [shape(arg) for arg in args] - nd = len(_broadcast_shapes(*shapes)) + nd = len(lax.broadcast_shapes(*shapes)) return [lax.reshape(arg, (1,) * (nd - len(shp)) + shp) if len(shp) != nd else arg for arg, shp in zip(args, shapes)] - -@memoize -def _broadcast_shapes(*shapes): - """Apply Numpy broadcasting rules to the given shapes.""" - if len(shapes) == 1: - return shapes[0] - ndim = _max(len(shape) for shape in shapes) - shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes]) - min_shape = onp.min(shapes, axis=0) - max_shape = onp.max(shapes, axis=0) - result_shape = onp.where(min_shape == 0, 0, max_shape) - if not onp.all((shapes == result_shape) | (shapes == 1)): - raise ValueError("Incompatible shapes for broadcasting: {}" - .format(tuple(map(tuple, shapes)))) - return tuple(result_shape) - - def _promote_dtypes(*args): """Convenience function to apply Numpy argument dtype promotion.""" # TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing. @@ -292,6 +275,8 @@ def _one_to_one_binop(numpy_fn, lax_fn, promote_like=False): subtract = _one_to_one_binop(onp.subtract, lax.sub) power = _one_to_one_binop(onp.power, lax.pow, True) arctan2 = _one_to_one_binop(onp.arctan2, lax.atan2, True) +minimum = _one_to_one_binop(onp.minimum, lax.min) +maximum = _one_to_one_binop(onp.maximum, lax.max) def _comparison_op(numpy_fn, lax_fn): @@ -312,24 +297,6 @@ def fn(x, y): less_equal = _comparison_op(onp.less_equal, lax.le) less = _comparison_op(onp.less, lax.lt) -def _minmax_op(numpy_fn, lax_fn, lax_cmp_fn): - def fn(x, y): - x, y = _promote_args(numpy_fn.__name__, x, y) - # Comparison on complex types are defined as a lexicographic ordering on - # the (real, imag) pair. - if issubdtype(_dtype(x), complexfloating): - rx = lax.real(x) - ry = lax.real(y) - return where( - lax.select(lax.eq(rx, ry), lax_cmp_fn(lax.imag(x), lax.imag(y)), - lax_cmp_fn(rx, ry)), - x, y) - return lax_fn(x, y) - return _wraps(numpy_fn)(fn) - -maximum = _minmax_op(onp.maximum, lax.max, lax.gt) -minimum = _minmax_op(onp.minimum, lax.min, lax.lt) - def _logical_op(np_op, bitwise_op): @_wraps(np_op) @@ -632,7 +599,7 @@ def broadcast_arrays(*args): if len(set(shapes)) == 1: return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg) for arg in args] - result_shape = _broadcast_shapes(*shapes) + result_shape = lax.broadcast_shapes(*shapes) return [broadcast_to(arg, result_shape) for arg in args] @@ -642,7 +609,7 @@ def broadcast_to(arr, shape): if _shape(arr) != shape: # TODO(mattjj): revise this to call lax.broadcast_in_dim rather than # lax.broadcast and lax.transpose - _broadcast_shapes(shape, _shape(arr)) # error checking + lax.broadcast_shapes(shape, _shape(arr)) # error checking nlead = len(shape) - len(_shape(arr)) diff, = onp.where(onp.not_equal(shape[nlead:], _shape(arr))) @@ -675,11 +642,11 @@ def clip(a, a_min=None, a_max=None): if a_min is not None: if _dtype(a_min) != _dtype(a): a_min = lax.convert_element_type(a_min, _dtype(a)) - a = maximum(a_min, a) + a = lax.max(a_min, a) if a_max is not None: if _dtype(a_max) != _dtype(a): a_max = lax.convert_element_type(a_max, _dtype(a)) - a = minimum(a_max, a) + a = lax.min(a_max, a) return a @@ -817,8 +784,8 @@ def _reduction_init_val(a, init_val): sum = _make_reduction(onp.sum, lax.add, 0) prod = _make_reduction(onp.prod, lax.mul, 1) -amax = max = _make_reduction(onp.max, maximum, -onp.inf) -amin = min = _make_reduction(onp.min, minimum, onp.inf) +amax = max = _make_reduction(onp.max, lax.max, -onp.inf) +amin = min = _make_reduction(onp.min, lax.min, onp.inf) all = alltrue = _make_reduction(onp.all, lax.bitwise_and, True, _cast_to_bool) any = sometrue = _make_reduction(onp.any, lax.bitwise_or, False, _cast_to_bool) @@ -1303,7 +1270,7 @@ def matmul(a, b): # pylint: disable=missing-docstring b = lax.reshape(b, shape(b) + (1,)) if b_is_vec else b a, b = _promote_dtypes(a, b) - batch_shape = _broadcast_shapes(shape(a)[:-2], shape(b)[:-2]) + batch_shape = lax.broadcast_shapes(shape(a)[:-2], shape(b)[:-2]) a = broadcast_to(a, batch_shape + shape(a)[-2:]) b = broadcast_to(b, batch_shape + shape(b)[-2:]) batch_dims = tuple(range(len(batch_shape)))
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -108,7 +108,7 @@ def rand_like(rng, x): dtype = _dtype(x) randn = lambda: onp.asarray(rng.randn(*shape), dtype=dtype) if onp.issubdtype(dtype, onp.complexfloating): - return randn() + 1.0j * randn() + return randn() + dtype.type(1.0j) * randn() else: return randn() diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -53,6 +53,7 @@ def num_float_bits(dtype): float_dtypes = [onp.float32, onp.float64] complex_dtypes = [onp.complex64, onp.complex128] +inexact_dtypes = float_dtypes + complex_dtypes int_dtypes = [onp.int32, onp.int64] bool_dtypes = [onp.bool_] default_dtypes = float_dtypes + int_dtypes @@ -122,8 +123,8 @@ def op_record(op, nargs, dtypes, rng, tol=1e-5): op_record(lax.div, 2, default_dtypes + complex_dtypes, jtu.rand_nonzero()), op_record(lax.rem, 2, default_dtypes, jtu.rand_nonzero()), - op_record(lax.max, 2, default_dtypes, jtu.rand_small()), - op_record(lax.min, 2, default_dtypes, jtu.rand_small()), + op_record(lax.max, 2, all_dtypes, jtu.rand_small()), + op_record(lax.min, 2, all_dtypes, jtu.rand_small()), op_record(lax.eq, 2, all_dtypes, jtu.rand_some_equal()), op_record(lax.ne, 2, all_dtypes, jtu.rand_small()), @@ -1976,9 +1977,9 @@ def testTransposeGrad(self, shape, dtype, perm, rng): "op": op, "init_val": init_val, "shape": shape, "dtype": dtype, "dims": dims, "rng": rng} for init_val, op, dtypes in [ - (0, lax.add, float_dtypes), - (-onp.inf, lax.max, float_dtypes), - (onp.inf, lax.min, float_dtypes), + (0, lax.add, inexact_dtypes), + (-onp.inf, lax.max, inexact_dtypes), + (onp.inf, lax.min, inexact_dtypes), ] for dtype in dtypes for shape, dims in [
grad bug: Forward-mode differentiation rule for 'reduce' not implemented I implemented the transformer in jax and got the following mysterious exception ``` File "jax_transformer.py", line 92, in _attn W_bhtt = stax.softmax(W_bhtt, axis=-1) File "/Users/joschu/Src/jax/jax/experimental/stax.py", line 52, in softmax unnormalized = np.exp(x - x.max(axis, keepdims=True)) File "/Users/joschu/Src/jax/jax/numpy/lax_numpy.py", line 771, in reduction result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims) File "/Users/joschu/Src/jax/jax/lax.py", line 293, in reduce jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions)) File "/Users/joschu/Src/jax/jax/core.py", line 74, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/Users/joschu/Src/jax/jax/interpreters/ad.py", line 178, in process_primitive .format(primitive)) NotImplementedError: Forward-mode differentiation rule for 'reduce' not implemented ``` However, after replacing `stax.softmax` with my own softmax implementation that doesn't subtract out the max, the code runs just fine. (See `unstable_softmax` in the code linked below). To reproduce the bug, run bug=1 python jax_transformer.py alice.txt using https://github.com/joschu/jax-exp The code runs without error when `bug=1` is removed.
I think the problem is that `max()` (in contrast to `sum()`) is defined as `_make_reduction(onp.max, maximum, -onp.inf)` instead of `_make_reduction(onp.max, lax.max, -onp.inf)` as expected by `_get_monoid_reducer()` in `lax.py`. Fixing the definition of `np.amax` as shown below does work and it should be correct, except when the arguments are complex numbers (which seems to be the main reason why `lax_numpy` used its own `maximum` instead of `lax.max`). ```python3 import jax.numpy as np import numpy as onp import jax from jax import lax amax = np.lax_numpy._make_reduction(onp.max, lax.max, -onp.inf) x = np.array([2, 0, 1.], dtype=np.float32) def f(x): return amax(x) print(f(x)) g = jax.grad(f) print(g(x)) ``` Oops! I guess I broke the gradient rule when I added complex number support to `np.amin` and `np.amax`. Thanks for the bug report. This is probably most easily fixed by defining the semantics of `lax.max()` to be the same as `np.maximum` for complex numbers (lexicographic comparison on the `(real, imaginary)` pair.). We can then change the definition of `np.amax` to use `lax.max`, as in @jonasrauber 's code snippet. Currently `lax.max` would raise an error if asked to compare two complex numbers, and this seems like as good a definition as any.
2019-02-01T16:55:11
google/jax
312
google__jax-312
[ "311" ]
1ab4a2ea541f83b03249e78f2c15e458d409f6e2
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -265,9 +265,9 @@ def zeros_like_batched(batched_args, batch_dims): # method. To handle that case, the `broadcast` function uses a try/except. -def bdim_at_front(x, bdim, broadcast_size=1): +def bdim_at_front(x, bdim, broadcast_size=1, force_broadcast=False): if bdim is None: - return broadcast(x, broadcast_size) + return broadcast(x, broadcast_size, force_broadcast=force_broadcast) else: return move_dim_to_front(x, bdim) diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1755,17 +1755,30 @@ def _select_transpose_rule(t, pred, on_true, on_false): select(pred, zeros, t) if on_false is None else None] def _select_batch_rule(batched_args, batch_dims, **unused_kwargs): - oprand, on_true, on_false, = batched_args + pred, on_true, on_false, = batched_args pred_bdim, ot_bdim, of_bdim = batch_dims + size = next(x.shape[i] for x, i in zip(batched_args, batch_dims) + if i is not None) - if (ot_bdim not in {None, pred_bdim}) or (of_bdim not in {None, pred_bdim}): - raise NotImplementedError # TODO(schsam, mattjj): Handle more cases. - - # TODO(schsam, mattjj): Switch to using broadcast_in_dim. - ot = _ones(oprand) * on_true - of = _ones(oprand) * on_false - - return select(oprand, ot, of), pred_bdim + # avoid transposes and some broadcasts in special cases + if pred_bdim == ot_bdim == of_bdim: + if onp.shape(pred) == onp.shape(on_true): + return select(pred, on_true, on_false), pred_bdim + else: + # vmapped function had a scalar pred with nonscalar args + assert onp.ndim(pred) == 1 + pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim]) + return select(pred, on_true, on_false), pred_bdim + + pred = batching.bdim_at_front(pred, pred_bdim, size, force_broadcast=True) + on_true = batching.bdim_at_front(on_true, ot_bdim, size, force_broadcast=True) + on_false = batching.bdim_at_front(on_false, of_bdim, size, force_broadcast=True) + assert onp.shape(on_true) == onp.shape(on_false) + if 0 < onp.ndim(pred) < onp.ndim(on_true): + # vmapped function had a scalar pred with nonscalar args + assert onp.ndim(pred) == 1 + pred = broadcast_in_dim(pred, on_true.shape, [0]) + return select(pred, on_true, on_false), 0 select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select') ad.defjvp(select_p,
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -485,5 +485,43 @@ def f(params, x): per_example_direct = np.concatenate(per_example_direct, axis=0) self.assertAllClose(per_example, per_example_direct, check_dtypes=True) + def testSelect(self): + pred = onp.array([True, False]) + on_true = onp.array([0, 1]) + on_false = onp.array([2, 3]) + ans = vmap(lax.select)(pred, on_true, on_false) + expected = onp.array([0, 3]) + self.assertAllClose(ans, expected, check_dtypes=True) + + pred = onp.array([False, True]) + on_true = onp.array([0, 1]) + on_false = onp.array([2, 3]) + ans = vmap(lax.select, (0, None, None))(pred, on_true, on_false) + expected = onp.array([[2, 3], + [0, 1]]) + self.assertAllClose(ans, expected, check_dtypes=True) + + pred = True + on_true = onp.array([0, 1], onp.float32) + on_false = onp.array(3, onp.float32) + ans = vmap(lax.select, (None, 0, None))(pred, on_true, on_false) + expected = onp.array([0, 1], onp.float32) + self.assertAllClose(ans, expected, check_dtypes=True) + + pred = onp.array([False, True]) + on_true = onp.array([0, 1], onp.float32) + on_false = onp.array(3, onp.float32) + ans = vmap(lax.select, (0, 0, None))(pred, on_true, on_false) + expected = onp.array([3, 1], onp.float32) + self.assertAllClose(ans, expected, check_dtypes=True) + + pred = onp.array([False, True]) + on_true = onp.array([2], onp.float32) + on_false = onp.array([[3, 4]], onp.float32) + ans = vmap(lax.select, (0, None, 1), 1)(pred, on_true, on_false) + expected = onp.array([[3, 2]], onp.float32) + self.assertAllClose(ans, expected, check_dtypes=True) + + if __name__ == '__main__': absltest.main()
dev timeline for more batched ops Hi JAX Team, I'm interested in using your library, e.g. to use gradients to optimize functions whose terms include functions g() of jacobians of functions f(). There's no good way in PyTorch to do this for f() with output dim > 1, and certainly no good way to do this in a batched setting. In JAX, i'm able to very easily get e.g. the trace of the jacobian of a plain relu neural net. So cool! Things break down when I try to use vmap to get a batched version of the computation. I'm wondering, do you have a rough guess at the timeline for when I might be able to vmap in this kind of setting? Note: I've successfully used JAX to do vmap's of functions g() of jacobians of functions f() for simple choices of f() ```python import jax.numpy as np from jax import jit, jacrev, vmap W = np.eye(3) b = np.zeros(3) def relu(x): return np.maximum(0,x) def NN(x): return relu(np.dot(W,x) + b) Jx_NN = jit(jacrev(NN)) # By the way, is there a recommended way to compose already JIT'ed functions? def trace_Jx_NN(x): J = Jx_NN(x) return np.trace(J) x1 = np.array([2.0,2.0,2.0]) x2 = np.array([3.0,3.0,3.0]) X = np.vstack((x1,x2)) # this works print(trace_Jx_NN(x1)) # this line executes batched_trace_Jx_NN = jit(vmap(trace_Jx_NN,in_axes=(0))) # raises "NotImplementedError # TODO(schsam, mattjj): Handle more cases." print(batched_trace_Jx_NN(X)) ``` Thanks! mark
Dev timeline! What kind of professional and organized operation do you think we're running here? :) More seriously, thanks for your interest, and the kind words about JAX! To answer your question, our work is driven primarily in two ways: first, we're pushing forward the core system as a research project (e.g. with new function transformations for parallel programming and automatic masking), and second, we're extending the system's coverage, adding miscellaneous features and docs, and squashing bugs. There's an approximately infinite amount of work we could do in the latter category, and so we prioritize that work based on what users ask for. In other words, _you_ drive our timeline, along with all the other users kind enough to open GitHub issues and provide feedback. Even better, you can join us and help accomplish these goals by contributing code. One of the strengths of JAX is it's a pretty small pure-Python system, so once you get over the hump of learning about the codebase, it should be relatively easy for any Python programmer to contribute. (But one of the weaknesses of JAX is that we haven't yet written down enough documentation to help new developers get on board, so that initial hump is much higher than it needs to be. We're working on that!) On your specific question about this batching rule (as we call the primitive-by-primitive transformation rules for `vmap`), let's consider this issue a feature request for it. @sschoenholz do you have a sense for how tricky it would be to cover the case needed by this issue? I believe [this is the error being raised](https://github.com/google/jax/blob/1ab4a2ea541f83b03249e78f2c15e458d409f6e2/jax/lax.py#L1762). If you wanted to dig in, that line is where you'd want to start adding code. Here's a quick attempt at explaining what's going on. That function, `_select_batch_rule`, is basically responsible for dealing with a `lax.select` in the context of a `vmap` transformation. The game is this: the user code called `lax.select` (maybe indirectly through a numpy function, in this case `np.maximum`), blissfully ignorant that, because of a `vmap`, there was an extra batch dimension lurking behind the scenes. But someone has to deal with that extra dimension when it comes time to evaluate the `lax.select` call, and that someone is `_select_batch_rule`. To do that, `_select_batch_rule` is given information about what it has to deal with: [the full arguments, with any batch dimensions exposed](https://github.com/google/jax/blob/1ab4a2ea541f83b03249e78f2c15e458d409f6e2/jax/lax.py#L1758), and [their corresponding batch dimensions](https://github.com/google/jax/blob/1ab4a2ea541f83b03249e78f2c15e458d409f6e2/jax/lax.py#L1759), represented as an integer (indicating which dimension/axis is the one hidden from the `lax.select` being called) or a None if that argument doesn't have a hidden dimension after all. It looks like we've only implemented the logic for the cases where the batch dimensions corresponding to `on_true` and `on_false` are either `None` (i.e. no hidden dimension to deal with) or equal to `pred_dim`. One easy way to handle the other cases might be just to transpose those dimensions to the front; I suspect we have to do something like that anyway. I edited the original post to add these lines to the top of the code, making it a full repro: ```python import jax.numpy as np from jax import jit, jacrev, vmap W = np.eye(3) b = np.zeros(3) ``` Please revise further if that edit was misguided.
2019-02-03T17:27:31
google/jax
326
google__jax-326
[ "324" ]
fce159b59614cf1956b48f1f6955627893602554
diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -19,7 +19,7 @@ import scipy.special as osp_special from .. import lax -from ..numpy.lax_numpy import _wraps +from ..numpy.lax_numpy import _wraps, asarray # need to create new functions because _wraps sets the __name__ attribute @@ -28,3 +28,16 @@ erf = _wraps(osp_special.erf)(lambda x: lax.erf(x)) erfc = _wraps(osp_special.erfc)(lambda x: lax.erfc(x)) erfinv = _wraps(osp_special.erfinv)(lambda x: lax.erf_inv(x)) + + +@_wraps(osp_special.logit) +def logit(x): + x = asarray(x) + return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x))) + + +@_wraps(osp_special.expit) +def expit(x): + x = asarray(x) + one = lax._const(x, 1) + return lax.div(one, lax.add(one, lax.exp(lax.neg(x))))
diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py --- a/tests/lax_scipy_test.py +++ b/tests/lax_scipy_test.py @@ -62,6 +62,8 @@ def op_record(name, nargs, dtypes, rng, diff_modes, test_name=None): op_record("erf", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), op_record("erfc", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), op_record("erfinv", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), + op_record("logit", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), + op_record("expit", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), ] CombosWithReplacement = itertools.combinations_with_replacement
jax missing scipy.special.expit Would be possible to add gradients for `expit` and `logit`?
2019-02-05T18:28:52
google/jax
332
google__jax-332
[ "329" ]
a75d1c6e08cce3fd7a7e049b02a2aa1b41c3ac48
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -995,6 +995,18 @@ def cumulative_reduction(a, axis=None, dtype=None): ### Array-creation functions +# TODO(phawkins): use this helper everywhere. +def _canonicalize_axis(axis, num_dims): + """Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims).""" + if axis < 0: + axis = axis + num_dims + if axis < 0 or axis >= num_dims: + raise ValueError( + "axis {} is out of bounds for array of dimension {}".format( + axis, num_dims)) + return axis + + @_wraps(onp.pad) def pad(array, pad_width, mode, constant_values=0): if mode != "constant": @@ -1014,11 +1026,19 @@ def pad(array, pad_width, mode, constant_values=0): @_wraps(onp.stack) -def stack(arrays): +def stack(arrays, axis=0): if not arrays: raise ValueError("Need at least one array to stack.") - new_arrays = [reshape(x, (-1,) + onp.shape(x)) for x in arrays] - return reshape(concatenate(new_arrays), (len(arrays),) + arrays[0].shape) + shape0 = shape(arrays[0]) + axis = _canonicalize_axis(axis, len(shape0) + 1) + new_shape = list(shape0) + new_shape.insert(axis, 1) + new_arrays = [] + for a in arrays: + if shape(a) != shape0: + raise ValueError("All input arrays must have the same shape.") + new_arrays.append(reshape(a, new_shape)) + return concatenate(new_arrays, axis=axis) @_wraps(onp.concatenate) @@ -1044,6 +1064,11 @@ def hstack(tup): return concatenate(arrs, 1) +@_wraps(onp.dstack) +def dstack(tup): + return concatenate([atleast_3d(m) for m in tup], axis=2) + + @_wraps(onp.column_stack) def column_stack(tup): arrays = [] @@ -1073,6 +1098,19 @@ def atleast_2d(*arys): return [atleast_2d(arr) for arr in arys] +@_wraps(onp.atleast_3d) +def atleast_3d(*arys): + if len(arys) == 1: + arr = array(arys[0]) + if ndim(arr) <= 1: + arr = arr.reshape((1, -1, 1)) + elif ndim(arr) == 2: + arr = arr.reshape(shape(arr) + (1,)) + return arr + else: + return [atleast_3d(arr) for arr in arys] + + # TODO(mattjj): can this be simplified? @_wraps(onp.array) def array(object, dtype=None, copy=True, order="K", ndmin=0): @@ -1711,18 +1749,6 @@ def argsort(a, axis=-1, kind='quicksort', order=None): return perm -# TODO(phawkins): use this helper everywhere. -def _canonicalize_axis(axis, num_dims): - """Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims).""" - if axis < 0: - axis = axis + num_dims - if axis < 0 or axis >= num_dims: - raise ValueError( - "axis {} is out of bounds for array of dimension {}".format( - axis, num_dims)) - return axis - - @_wraps(onp.take) def take(a, indices, axis=None, out=None, mode=None): if out:
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -18,6 +18,7 @@ import collections import functools +from functools import partial import itertools from unittest import skip @@ -108,6 +109,9 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None): JAX_COMPOUND_OP_RECORDS = [ op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default(), []), + op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default(), []), + op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default(), []), + op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default(), []), op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]), op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default(), []), op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), ["rev"]), @@ -664,9 +668,9 @@ def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng): self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_{}".format( - jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)), - "shape": shape, "dtypes": dtypes, "rng": rng} + {"testcase_name": "_{}_axis={}".format( + jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis), + "shape": shape, "axis": axis, "dtypes": dtypes, "rng": rng} for dtypes in [ [onp.float32], [onp.float32, onp.float32], @@ -675,10 +679,35 @@ def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng): [onp.float32, onp.int32, onp.float64], ] for shape in [(), (2,), (3, 4), (1, 100)] + for axis in range(-len(shape), len(shape) + 1) for rng in [jtu.rand_default()])) - def testStack(self, shape, dtypes, rng): + def testStack(self, shape, axis, dtypes, rng): args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] - self._CheckAgainstNumpy(lnp.stack, onp.stack, args_maker, check_dtypes=True) + onp_fun = partial(onp.stack, axis=axis) + lnp_fun = partial(lnp.stack, axis=axis) + self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True) + + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_op={}_{}".format( + op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)), + "shape": shape, "op": op, "dtypes": dtypes, "rng": rng} + for op in ["hstack", "vstack", "dstack"] + for dtypes in [ + [onp.float32], + [onp.float32, onp.float32], + [onp.float32, onp.int32, onp.float32], + [onp.float32, onp.int64, onp.float32], + [onp.float32, onp.int32, onp.float64], + ] + for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)] + for rng in [jtu.rand_default()])) + def testHVDStack(self, shape, op, dtypes, rng): + args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]] + onp_fun = getattr(onp, op) + lnp_fun = getattr(lnp, op) + self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_inshape={}_outdtype={}".format(
np.stack should support axis argument
2019-02-06T13:46:59
google/jax
335
google__jax-335
[ "330" ]
bf2abc886af9ad6ded39df44b681bb7f5e6d1bf0
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -462,7 +462,8 @@ def full(shape, fill_value, dtype): if onp.isscalar(fill_value) or type(fill_value) is onp.ndarray: return FilledConstant(onp.asarray(fill_value, dtype), shape) elif isinstance(fill_value, xla.DeviceValue): - return FilledConstant(convert_element_type(fill_value, dtype), shape) + val = onp.asarray(fill_value, dtype) + return FilledConstant(val, shape) else: return broadcast(convert_element_type(fill_value, dtype), shape)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1187,7 +1187,6 @@ def args_maker(): self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_n={}_increasing={}".format( jtu.format_shape_dtype_string([shape], dtype), @@ -1207,6 +1206,10 @@ def testVander(self, shape, dtype, n, increasing, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False) + def testIssue330(self): + x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash + self.assertEqual(x[0, 0], 1) + if __name__ == "__main__": absltest.main()
bug in np.full / lax.full for DeviceArray wrapped scalars If you pass in a jax-wrapped scalar as the fill-constant value to np.fill, it breaks, e.g.: ```python np.full((1,1), np.array([1])[0]) ``` this hits the condition in lax.full: ```python elif isinstance(fill_value, xla.DeviceValue): return FilledConstant(convert_element_type(fill_value, dtype), shape) ``` no matter what dtype is, the convert_element_type result is still a DeviceArray, then this line in lax.FillConstant: ```python class FilledConstant(xla.DeviceConstant): __slots__ = ["fill_value"] def __init__(self, fill_value, shape): assert type(fill_value) is onp.ndarray ``` wants the fill value to be an onp ndarray instead. I'm not too certain about the type logic here to make a PR confidently.
2019-02-06T17:25:41
google/jax
337
google__jax-337
[ "252" ]
ce74bc55ce057c8d91b446ecfe7b199b46cd1a49
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -26,8 +26,11 @@ import itertools import operator as op +import os import numpy as onp +from contextlib import contextmanager +from distutils.util import strtobool from . import core from . import linear_util as lu @@ -47,10 +50,16 @@ from .interpreters import ad from .interpreters import batching from .interpreters import parallel +from .config import flags, config map = safe_map zip = safe_zip +FLAGS = flags.FLAGS +flags.DEFINE_bool("jax_disable_jit", + strtobool(os.getenv("JAX_DISABLE_JIT", "False")), + "Disable JIT compilation and just call original Python.") + def jit(fun, static_argnums=()): """Sets up `fun` for just-in-time compilation with XLA. @@ -71,6 +80,8 @@ def jit(fun, static_argnums=()): """ @wraps(fun) def f_jitted(*args, **kwargs): + if _jit_is_disabled or config.read('jax_disable_jit'): + return fun(*args, **kwargs) f = lu.wrap_init(fun, kwargs) dyn_argnums = [i for i in range(len(args)) if i not in static_argnums] f, dyn_args = argnums_partial(f, dyn_argnums, args) @@ -84,6 +95,15 @@ def f_jitted(*args, **kwargs): return f_jitted +@contextmanager +def disable_jit(): + global _jit_is_disabled + _jit_is_disabled, prev_val = True, _jit_is_disabled + yield + _jit_is_disabled = prev_val +_jit_is_disabled = False + + def grad(fun, argnums=0): """Creates a function which evaluates the gradient of `fun`.
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -321,6 +321,23 @@ def test_hessian_on_pytrees(self): (onp.array([0., 0.]), onp.array([0., 2.]))) self.assertAllClose(ans, expected, check_dtypes=False) + def test_disable_jit(self): + effects = [] + + @api.jit + def f(x): + effects.append(1) + return x + + with api.disable_jit(): + f(2) + f(2) + assert len(effects) == 2 + + f(2) + f(2) + assert len(effects) == 3 + if __name__ == '__main__': absltest.main()
add option to globally disable jit for better debugging (Relates to #196) It's harder to debug jitted functions than non-jitted functions, since it's essentially impossible to introspect their intermediate state. One option is to comment out the jit decorator, but then you have to remember to put it back when you're done debugging. It'd be nice to have a mechanism (maybe a context manager?) to globally disable jitting for debugging purposes; once bugs are fixed, you don't have to reinsert all of your @jit statements.
2019-02-07T03:30:39
google/jax
351
google__jax-351
[ "246" ]
1b0069ff3f29545e9de3cdbdd5a31dbe9973761b
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -2079,12 +2079,77 @@ def _gather_transpose_rule(t, operand, start_indices, dimension_numbers, index_vector_dim=dimension_numbers.index_vector_dim) return [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero] +def _gather_batching_rule(batched_args, batch_dims, dimension_numbers, + slice_sizes, operand_shape): + operand, start_indices = batched_args + operand_bdim, start_indices_bdim = batch_dims + + if operand_bdim is not None and start_indices_bdim is None: + operand = batching.move_dim_to_front(operand, operand_bdim) + slice_sizes = (operand.shape[0],) + slice_sizes + offset_dims = (0,) + tuple(onp.add(1, dimension_numbers.offset_dims)) + collapsed_slice_dims = tuple(onp.add(1, dimension_numbers.collapsed_slice_dims)) + start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map)) + dnums = GatherDimensionNumbers( + offset_dims=offset_dims, + collapsed_slice_dims=collapsed_slice_dims, + start_index_map=start_index_map, + index_vector_dim=dimension_numbers.index_vector_dim) + return gather(operand, start_indices, dimension_numbers=dnums, + slice_sizes=slice_sizes), 0 + + elif operand_bdim is None and start_indices_bdim is not None: + start_indices = batching.move_dim_to_front(start_indices, start_indices_bdim) + offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims)) + index_vector_dim = dimension_numbers.index_vector_dim + 1 + dnums = GatherDimensionNumbers( + offset_dims=offset_dims, + collapsed_slice_dims=dimension_numbers.collapsed_slice_dims, + start_index_map=dimension_numbers.start_index_map, + index_vector_dim=index_vector_dim) + return gather(operand, start_indices, dimension_numbers=dnums, + slice_sizes=slice_sizes), 0 + + else: + # get rid of scalar index case (noticing our start_indices.ndim is + # incremented by one compared to the original user code) + if dimension_numbers.index_vector_dim == start_indices.ndim - 1: + start_indices = reshape(start_indices, start_indices.shape + (1,)) + + # move our batch dimensions to the front to preserve sanity + operand = batching.move_dim_to_front(operand, operand_bdim) + start_indices = batching.move_dim_to_front(start_indices, start_indices_bdim) + + # Example: user code had start_indices shape (3, 4, 5) and index_vector_dim + # of 2, and we have to deal with start_indices shape (7, 3, 4, 5). We + # transform that to an index_vector_dim of 3, and a start_indices of shape + # (7, 3, 4, 6) where we concatenated an iota that counts along our batch + # dimension to the front of the ndindex. + index_vector_dim = dimension_numbers.index_vector_dim + 1 + count_shape = list(start_indices.shape) + count_shape[index_vector_dim] = 1 + counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0) + start_indices = concatenate([counts, start_indices], index_vector_dim) + + slice_sizes = (1,) + slice_sizes + collapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims)) + offset_dims = tuple(onp.add(1, dimension_numbers.offset_dims)) + start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map)) + + dnums = GatherDimensionNumbers( + offset_dims=offset_dims, + collapsed_slice_dims=collapsed_slice_dims, + start_index_map=start_index_map, + index_vector_dim=index_vector_dim) + return gather(operand, start_indices, dimension_numbers=dnums, + slice_sizes=slice_sizes), 0 gather_p = standard_primitive( _gather_shape_rule, _gather_dtype_rule, 'gather', _gather_translation_rule) ad.defjvp(gather_p, _gather_jvp_rule, None) ad.primitive_transposes[gather_p] = _gather_transpose_rule +batching.primitive_batchers[gather_p] = _gather_batching_rule ScatterDimensionNumbers = collections.namedtuple(
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -539,6 +539,130 @@ def testLaxLinalgCholesky(self): expected = onp.linalg.cholesky(b) self.assertAllClose(ans, expected, check_dtypes=False) + @parameterized.named_parameters( + {"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format( + jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums, + slice_sizes), + "axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums, + "slice_sizes": slice_sizes, "rng": rng, "rng_idx": rng_idx} + for dtype in [onp.float32, onp.int32] + for axis, shape, idxs, dnums, slice_sizes in [ + (0, (3, 5), onp.array([0, 2]), lax.GatherDimensionNumbers( + offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1,)), + (1, (10, 3), onp.array([0, 0, 0]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,), + index_vector_dim=1), (2,)), + (1, (10, 3, 5), onp.array([0, 2, 1]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1, 3)), + (2, (10, 5, 3), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), + ] + for rng_idx in [jtu.rand_int(max(shape))] + for rng in [jtu.rand_default()]) + def testGatherBatchedOperand(self, axis, shape, dtype, idxs, dnums, + slice_sizes, rng, rng_idx): + fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes) + operand = rng(shape, dtype) + ans = vmap(fun, (axis, None))(operand, idxs) + expected = onp.stack([fun(operand[(slice(None),) * axis + (i,)], idxs) + for i in range(operand.shape[axis])]) + self.assertAllClose(ans, expected, check_dtypes=False) + + @parameterized.named_parameters( + {"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format( + jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums, + slice_sizes), + "axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums, + "slice_sizes": slice_sizes, "rng": rng, "rng_idx": rng_idx} + for dtype in [onp.float32, onp.int32] + for axis, shape, idxs, dnums, slice_sizes in [ + (0, (5,), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers( + offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1,)), + (1, (10,), onp.array([[0, 0, 0], [0, 2, 1]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,), + index_vector_dim=1), (2,)), + (1, (10, 5), onp.array([[0, 2, 1], [0, 3, 3]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1, 3)), + (0, (10, 5), onp.array([[[0, 2], [1, 0]], + [[1, 2], [0, 3]]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), + ] + for rng_idx in [jtu.rand_int(max(shape))] + for rng in [jtu.rand_default()]) + def testGatherBatchedIndices(self, axis, shape, dtype, idxs, dnums, + slice_sizes, rng, rng_idx): + fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes) + operand = rng(shape, dtype) + ans = vmap(fun, (None, axis))(operand, idxs) + expected = onp.stack([fun(operand, idxs[(slice(None),) * axis + (i,)]) + for i in range(idxs.shape[axis])]) + self.assertAllClose(ans, expected, check_dtypes=False) + + @parameterized.named_parameters( + {"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format( + jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs, + dnums, slice_sizes), + "op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype": + dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes, + "rng": rng, "rng_idx": rng_idx} + for dtype in [onp.float32, onp.int32] + for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [ + (0, 0, (2, 5), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers( + offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1,)), + (1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,), + index_vector_dim=1), (2,)), + (0, 1, (2, 10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1, 3)), + (2, 0, (10, 5, 2), onp.array([[[0, 2], [1, 0]], + [[1, 0], [2, 0]]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), + ] + for rng_idx in [jtu.rand_int(max(shape))] + for rng in [jtu.rand_default()]) + def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums, + slice_sizes, rng, rng_idx): + fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes) + operand = rng(shape, dtype) + assert operand.shape[op_axis] == idxs.shape[idxs_axis] + ans = vmap(fun, (op_axis, idxs_axis))(operand, idxs) + expected = onp.stack([fun(operand[(slice(None),) * op_axis + (i,)], + idxs[(slice(None),) * idxs_axis + (i,)]) + for i in range(idxs.shape[idxs_axis])]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testNumpyIndexing1(self): + a = np.arange(2 * 3 * 4).reshape((2, 3, 4)) + ind = onp.array([[0, 1], + [2, 0]]) + def f(a, ind): + return a[:, ind] + expected = onp.stack([f(a, ind[i, :]) for i in range(ind.shape[0])]) + ans = vmap(f, (None, 0))(a, ind) + assert onp.all(ans == expected) + + def testNumpyIndexing2(self): + a = np.arange(2 * 3 * 4).reshape((2, 3, 4)) + def f(a): + inds = np.array([0, 2]) + return a[:, inds] + ans = vmap(f)(a) + expected = onp.stack([f(a[:, i, :]) for i in range(a.shape[1])], axis=1) + assert onp.all(ans == expected) + if __name__ == '__main__': absltest.main() diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1381,6 +1381,9 @@ def testIndexTake(self, shape, dtype, idxs, axes, rng): ((10, 5,), onp.array([0, 2, 1]), lax.GatherDimensionNumbers( offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), index_vector_dim=1), (1, 3)), + ((10, 5), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), ] for rng_idx in [jtu.rand_int(max(shape))] for rng in [jtu.rand_default()]))
Batching rule for 'gather' not implemented Here is a minimal example to reproduce the error. ``` import jax import jax.numpy as np a = np.ones((3, 3)) def f(a): inds = np.arange(a.shape[0]) return a[inds] jax.vmap(f)(a) ``` `NotImplementedError: Batching rule for 'index_take' not implemented` So if you want to use `vmap`, the only form of indexing you can do is scalar indexing. Am I interpreting this correctly? For me that's a huge limitation. I would try to implement the batch rule but I think it will be complicated and I have a workaround: `np.stack([a[ind] for ind in inds])` instead of `a[inds]`. With an ideal JIT compiler those two would be equivalent, but my guess is that the performance of this workaround is not optimal.
Thanks for the request! Fortunately batching rules are pretty easy to write :) We just wait for people to ask for them because we love closing issues. I started a batching rule in 87673c7 and pushed it to a branch. It should cover examples like the one you wrote. I still need to add support for (1) batching along the index arrays themselves and (2) batching for the transpose operation `index_untake`, which might be needed for vmapping gradients of functions that involve `index_take`. I think both should be pretty straightforward, so I'll get back to them soon. Oh and (3) tests! Thank you for being so responsive! Unfortunately when I run my original example I am hitting the NotImplementedError that you added. ``` import jax import jax.numpy as np def index_take_batch_rule(batched_args, batch_dims, **kwargs): src = batched_args[0] src_bdim = batch_dims[0] idxs = batched_args[1:] idxs_bdims = batch_dims[1:] if any(bdim is not None for bdim in idxs_bdims): raise NotImplementedError # TODO(mattjj) axes = tuple(i+1 if i >= src_bdim else i for i in kwargs['axes']) return jax.lax.index_take(src, idxs, axes), src_bdim jax.lax.batching.primitive_batchers[jax.lax.index_take_p] = index_take_batch_rule a = np.ones((3, 3)) def f(a): inds = np.arange(a.shape[0]) return a[inds] jax.vmap(f)(a) ``` `NotImplementedError` ``` $ pip freeze | grep jax jax==0.1.16 jaxlib==0.1.4 ``` This seems to work for that case: ``` def index_take_batch_rule(batched_args, batch_dims, **kwargs): src = batched_args[0] src_bdim = batch_dims[0] idxs = batched_args[1:] idxs_bdims = batch_dims[1:] axes = tuple(i for i in kwargs['axes']) return jax.lax.index_take(src, idxs, axes), src_bdim ``` Hm, the example works for me on the branch, and in this code the index array shouldn't have a batch dim on it. Weird! Maybe there's a whitespace issue? In any case, I'll dig in more when back at a keyboard, either later tonight or tomorrow. Maybe it doesn't work for me because I'm missing some other recent commit. I'm using a jax wheel from [here](https://storage.googleapis.com/jax-wheels) and just pasted in your changes from 87673c7 (I would build from source if I could but I can't get bazel to trust the self-signed cert in my network). Edit: fixed my bazel issue ([link](https://groups.google.com/forum/#!topic/bazel-discuss/13uPDObyfQg) for posterity). Will see if a source build fixes this for me. This stuff doesn't really involve any compiled code, so I don't think rebuilding jaxlib should be necessary. I pushed another commit that fixed an issue, but it's still a WIP. (I've only got a few minutes here and there to work on it.) Since #307 removed index_take as a primitive, running the OP's code now results in ``` NotImplementedError: Batching rule for 'gather' not implemented ``` I changed the title of the issue to reflect the new topic: we want a batching rule for `lax.gather`!
2019-02-11T17:36:03
google/jax
353
google__jax-353
[ "350" ]
1e44dd419fe6fd1bc5fecdd9d6fd82d6f9e4141c
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -2206,7 +2206,6 @@ def _scatter_jvp(primals, tangents, update_jaxpr, update_consts, updates_shape=updates_shape) return val_out, tangent_out - def _scatter_transpose_rule(t, operand, scatter_indices, updates, update_jaxpr, update_consts, dimension_numbers, updates_shape): @@ -2233,12 +2232,65 @@ def _scatter_transpose_rule(t, operand, scatter_indices, updates, slice_sizes=slice_sizes) return [operand_t, None, update_t] +def _scatter_batching_rule(batched_args, batch_dims, update_jaxpr, + update_consts, dimension_numbers, updates_shape): + operand, scatter_indices, updates = batched_args + operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims + del update_jaxpr, update_consts, updates_shape # Unused. + + # move the operand batch dim to the front if it is not None, otherwise create + # it at the front (so that we can scatter into it) + size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims) + if ax is not None) + operand = batching.bdim_at_front(operand, operand_bdim, broadcast_size=size, + force_broadcast=True) + operand_bdim = 0 + + if scatter_indices_bdim is not None and updates_bdim is None: + raise NotImplementedError # TODO(mattjj,phawkins) + elif scatter_indices_bdim is None and updates_bdim is not None: + updates = batching.move_dim_to_front(updates, updates_bdim) + inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims)) + update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims)) + scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims)) + dnums = ScatterDimensionNumbers( + update_window_dims=update_window_dims, + inserted_window_dims=inserted_window_dims, + scatter_dims_to_operand_dims=scatter_dims_to_operand_dims, + index_vector_dim=dimension_numbers.index_vector_dim) + return scatter_add(operand, scatter_indices, updates, dnums), 0 + else: + # see the third case in _gather_batching_rule for comparison and comments + if dimension_numbers.index_vector_dim == scatter_indices.ndim - 1: + scatter_indices = reshape(scatter_indices, scatter_indices.shape + (1,)) + + scatter_indices = batching.move_dim_to_front(scatter_indices, + scatter_indices_bdim) + updates = batching.move_dim_to_front(updates, updates_bdim) + + index_vector_dim = dimension_numbers.index_vector_dim + 1 + count_shape = list(scatter_indices.shape) + count_shape[index_vector_dim] = 1 + counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0) + scatter_indices = concatenate([counts, scatter_indices], index_vector_dim) + + update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims)) + inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims)) + scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims)) + + dnums = ScatterDimensionNumbers( + update_window_dims=update_window_dims, + inserted_window_dims=inserted_window_dims, + scatter_dims_to_operand_dims=scatter_dims_to_operand_dims, + index_vector_dim=index_vector_dim) + return scatter_add(operand, scatter_indices, updates, dnums), 0 scatter_p = standard_primitive( _scatter_shape_rule, _scatter_dtype_rule, 'scatter-add', _scatter_translation_rule) ad.primitive_jvps[scatter_p] = _scatter_jvp ad.primitive_transposes[scatter_p] = _scatter_transpose_rule +batching.primitive_batchers[scatter_p] = _scatter_batching_rule def _reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -571,6 +571,39 @@ def testGatherBatchedOperand(self, axis, shape, dtype, idxs, dnums, for i in range(operand.shape[axis])]) self.assertAllClose(ans, expected, check_dtypes=False) + @parameterized.named_parameters( + {"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format( + jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums, + slice_sizes), + "axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums, + "slice_sizes": slice_sizes, "rng": rng, "rng_idx": rng_idx} + for dtype in [onp.float32, onp.float64] + for axis, shape, idxs, dnums, slice_sizes in [ + (0, (3, 5), onp.array([0, 2]), lax.GatherDimensionNumbers( + offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1,)), + (1, (10, 3), onp.array([0, 0, 0]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,), + index_vector_dim=1), (2,)), + (1, (10, 3, 5), onp.array([0, 2, 1]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1, 3)), + (2, (10, 5, 3), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), + ] + for rng_idx in [jtu.rand_int(max(shape))] + for rng in [jtu.rand_default()]) + def testGatherGradBatchedOperand(self, axis, shape, dtype, idxs, dnums, + slice_sizes, rng, rng_idx): + fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes) + gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx)))) + operand = rng(shape, dtype) + ans = vmap(gfun, (axis, None))(operand, idxs) + expected = onp.stack([gfun(operand[(slice(None),) * axis + (i,)], idxs) + for i in range(operand.shape[axis])]) + self.assertAllClose(ans, expected, check_dtypes=False) + @parameterized.named_parameters( {"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format( jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums, @@ -606,6 +639,42 @@ def testGatherBatchedIndices(self, axis, shape, dtype, idxs, dnums, for i in range(idxs.shape[axis])]) self.assertAllClose(ans, expected, check_dtypes=False) + @parameterized.named_parameters( + {"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format( + jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums, + slice_sizes), + "axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums, + "slice_sizes": slice_sizes, "rng": rng, "rng_idx": rng_idx} + for dtype in [onp.float32, onp.float64] + for axis, shape, idxs, dnums, slice_sizes in [ + (0, (5,), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers( + offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1,)), + (1, (10,), onp.array([[0, 0, 0], [0, 2, 1]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,), + index_vector_dim=1), (2,)), + (1, (10, 5), onp.array([[0, 2, 1], [0, 3, 3]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1, 3)), + (0, (10, 5), onp.array([[[0, 2], [1, 0]], + [[1, 2], [0, 3]]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), + ] + for rng_idx in [jtu.rand_int(max(shape))] + for rng in [jtu.rand_default()]) + def testGatherGradBatchedIndices(self, axis, shape, dtype, idxs, dnums, + slice_sizes, rng, rng_idx): + fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes) + gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx)))) + operand = rng(shape, dtype) + ans = vmap(gfun, (None, axis))(operand, idxs) + expected = onp.stack([gfun(operand, idxs[(slice(None),) * axis + (i,)]) + for i in range(idxs.shape[axis])]) + self.assertAllClose(ans, expected, check_dtypes=False) + @parameterized.named_parameters( {"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format( jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs, @@ -644,6 +713,45 @@ def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums, for i in range(idxs.shape[idxs_axis])]) self.assertAllClose(ans, expected, check_dtypes=False) + @parameterized.named_parameters( + {"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format( + jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs, + dnums, slice_sizes), + "op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype": + dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes, + "rng": rng, "rng_idx": rng_idx} + for dtype in [onp.float32, onp.int32] + for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [ + (0, 0, (2, 5), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers( + offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1,)), + (1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,), + index_vector_dim=1), (2,)), + (0, 1, (2, 10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T, + lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,), + index_vector_dim=1), (1, 3)), + (2, 0, (10, 5, 2), onp.array([[[0, 2], [1, 0]], + [[1, 0], [2, 0]]]), lax.GatherDimensionNumbers( + offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1), + index_vector_dim=1), (1, 3)), + ] + for rng_idx in [jtu.rand_int(max(shape))] + for rng in [jtu.rand_default()]) + def testGatherGradBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums, + slice_sizes, rng, rng_idx): + fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes) + gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx)))) + operand = rng(shape, dtype) + assert operand.shape[op_axis] == idxs.shape[idxs_axis] + ans = vmap(gfun, (op_axis, idxs_axis))(operand, idxs) + expected = onp.stack([gfun(operand[(slice(None),) * op_axis + (i,)], + idxs[(slice(None),) * idxs_axis + (i,)]) + for i in range(idxs.shape[idxs_axis])]) + self.assertAllClose(ans, expected, check_dtypes=False) + def testNumpyIndexing1(self): a = np.arange(2 * 3 * 4).reshape((2, 3, 4)) ind = onp.array([[0, 1],
lax.scatter batching (vmap) rule Follow-up from #246.
2019-02-11T18:52:17
google/jax
360
google__jax-360
[ "357" ]
3e58e74280d1f171a77806204f4754b2b026f5be
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -135,20 +135,23 @@ def write(v, node): env = {} consts_env = dict(zip(jaxpr.constvars, const_vals)) write(core.unitvar, c.Tuple()) - map(write, jaxpr.constvars, map(c.Constant, const_vals)) - map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes)) + if const_vals: + map(write, jaxpr.constvars, map(c.Constant, const_vals)) + map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes)) + else: + all_freevars = it.chain(jaxpr.constvars, jaxpr.freevars) + map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes)) map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes)) for eqn in jaxpr.eqns: in_nodes = map(read, eqn.invars) in_shapes = map(c.GetShape, in_nodes) - subcs = [jaxpr_computation(subjaxpr, - [consts_env[b] for b in const_bindings], - map(c.GetShape, map(read, freevar_bindings)), + subcs = [jaxpr_computation(subjaxpr, (), + map(c.GetShape, map(read, const_bindings + freevar_bindings)), *in_shapes) for subjaxpr, const_bindings, freevar_bindings in eqn.bound_subjaxprs] - subfuns = [(subc, tuple(map(read, freevar_bindings))) - for subc, (_, _, freevar_bindings) + subfuns = [(subc, tuple(map(read, const_bindings + freevar_bindings))) + for subc, (_, const_bindings, freevar_bindings) in zip(subcs, eqn.bound_subjaxprs)] ans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params) out_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1142,6 +1142,38 @@ def loop_body(state): self.assertEqual(cloop(3, limit, 1), limit - 3) assert not effect[0] + def testWhileWithClosureJit(self): + + def loop(init, local_limit, inc): + + def loop_cond(state): + pos, _ = state + return lax.lt(pos, local_limit) + + def loop_body(state): + effect[0] = True + pos, count = state + f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc)) + return api.jit(f)(pos, inc) + + result = lax._while_loop(loop_cond, loop_body, (init, 0)) + _, count = result + return count + + cloop = api.jit(loop) + + limit = 10 + effect = [False] + self.assertEqual(loop(2, limit, 1), limit - 2) + assert effect[0] + effect[0] = False + self.assertEqual(cloop(2, limit, 1), limit - 2) + assert effect[0] + effect[0] = False + self.assertEqual(cloop(2, limit, 1), limit - 2) + self.assertEqual(cloop(3, limit, 1), limit - 3) + assert not effect[0] + def testNestedWhileWithDynamicUpdateSlice(self): num = 5
jit inside while_loop causes an error I'll help generate a repro tomorrow.
2019-02-12T19:41:00
google/jax
383
google__jax-383
[ "347" ]
8791f91b0d245fe3d64e8c514c32e7c0aa1dc034
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -493,6 +493,9 @@ def stop_gradient(x): return stop_gradient_p.bind(x) +def _safe_mul(x, y): return safe_mul_p.bind(x, y) + + def psum(x, axis_name): return psum_p.bind(x, axis_name=axis_name) @@ -957,11 +960,18 @@ def _conj_transpose_rule(t, x, input_dtype): # TODO handle broadcasting pow_p = standard_binop([_float | _complex, _float | _complex], 'pow') -ad.defjvp(pow_p, - lambda g, x, y: mul(_brcast(g, y), mul(y, pow(x, select( - eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))), - lambda g, x, y: mul(_brcast(g, x), - mul(log(_replace_zero(x)), pow(x, y)))) + +def pow_jvp_lhs(g, x, y): + # we call _safe_mul here so that we get the behavior 0*inf = 0, since when a + # coefficient in `g` is zero we want to keep it at zero, not produce a nan. + # see https://github.com/google/jax/pull/383 + jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y))))) + return _safe_mul(_brcast(g, y), jac) + +def pow_jvp_rhs(g, x, y): + return mul(_brcast(g, x), mul(log(_replace_zero(x)), pow(x, y))) + +ad.defjvp(pow_p, pow_jvp_lhs, pow_jvp_rhs) _replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x) not_p = standard_unop(_int | _bool, 'not') @@ -992,6 +1002,20 @@ def _add_transpose(t, x, y): ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul) # TODO +def _safe_mul_translation_rule(c, x, y): + dtype = c.GetShape(x).numpy_dtype() + zero = c.Constant(onp.array(0, dtype=dtype)) + out_shape = tuple(onp.maximum(c.GetShape(x).dimensions(), + c.GetShape(y).dimensions())) + return c.Select(c.Or(c.Eq(x, zero), c.Eq(y, zero)), + c.Broadcast(zero, out_shape), + c.Mul(x, y)) + +safe_mul_p = standard_binop([_num, _num], 'safe_mul', + translation_rule=_safe_mul_translation_rule) +ad.defbilinear_broadcasting(_brcast, safe_mul_p, _safe_mul, _safe_mul) + + def _div_transpose_rule(cotangent, x, y): assert x is None and y is not None res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y) diff --git a/jax/lib/xla_bridge.py b/jax/lib/xla_bridge.py --- a/jax/lib/xla_bridge.py +++ b/jax/lib/xla_bridge.py @@ -348,7 +348,7 @@ def NumpyArrayConstant(self, value, canonicalize_types=True): def ConstantLike(self, example_value, value, canonicalize_types=True): example_value = onp.asarray(example_value) - return self.Constant(onp.array(value).astype(example_value.dtype)) + return self.Constant(onp.array(value, dtype=example_value.dtype)) def Constant(self, py_val, canonicalize_types=True): """Translate constant `py_val` to a constant for this ComputationBuilder.
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1223,6 +1223,18 @@ def testSymmetrizeDtypePromotion(self): jax_numpy_result = ((x + x.T) / 2).dtype self.assertEqual(orig_numpy_result, jax_numpy_result) + def testIssue347(self): + # https://github.com/google/jax/issues/347 + def test_fail(x): + x = lnp.sqrt(lnp.sum(x ** 2, axis=1)) + ones = lnp.ones_like(x) + x = lnp.where(x > 0.5, x, ones) + return lnp.sum(x) + + x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64) + result = api.grad(test_fail)(x) + assert not onp.any(onp.isnan(result)) + if __name__ == "__main__": absltest.main()
Issue taking gradients through np.where when one of branches is nan. If you take gradients through np.where and one of the branches is nan, the gradient will be nan even if the branch is not taken. Here's a repro: ``` def test_fail(x): x = np.sqrt(np.sum(x ** 2, axis=1)) ones = np.ones_like(x) x = np.where(x > 0.5, x, ones) return np.sum(x) def test_succeed(x): x = np.sum(x ** 2, axis=1) ones = np.ones_like(x) x = np.sqrt(np.where(x > 0.5, x, ones)) return np.sum(x) x = np.array([[1, 2], [3, 4], [0, 0]], dtype=np.float64) print('Test np.sqrt then np.where = {}'.format(test_fail(x))) print('Gradient:') print(grad(test_fail)(x)) print('Test np.where then np.sqrt = {}'.format(test_succeed(x))) print('Gradient:') print(grad(test_succeed)(x)) ```
2019-02-15T15:06:30
google/jax
384
google__jax-384
[ "380" ]
708c764497a6ed8a3a1feaee708bc8a24e699b2c
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1136,7 +1136,7 @@ def _conv_general_dilated_transpose_lhs( lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers) lhs_spec, rhs_spec, out_spec = dimension_numbers t_rhs_spec = _conv_transpose(rhs_spec) - trans_dimension_numbers = ConvDimensionNumbers(lhs_spec, t_rhs_spec, out_spec) + trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec) padding = _conv_general_vjp_lhs_padding( onp.take(lhs_shape, lhs_sdims), onp.take(rhs_shape, rhs_sdims), window_strides, onp.take(g.shape, out_sdims), padding, lhs_dilation, @@ -3194,7 +3194,10 @@ def remaining(original, *removed_lists): blacklist = set(itertools.chain(*removed_lists)) return [i for i in original if i not in blacklist] - +# lhs_spec and out_spec are lists containing +# [batch dim, feature dim, spatial dims ...] +# rhs_spec is a list containing: +# [out feature dim, in feature dim, spatial dims ...] ConvDimensionNumbers = collections.namedtuple( "ConvDimensionNumbers", ["lhs_spec", "rhs_spec", "out_spec"])
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -422,10 +422,11 @@ def numpy_fun(lhs, rhs): for lhs_dilation, rhs_dilation in itertools.product( [(1, 1), (1, 2)], repeat=2) for rng in [jtu.rand_small()] - for dim_nums, perms in [(("NCHW", "OIHW", "NCHW"), - ([0, 1, 2, 3], [0, 1, 2, 3])), - (("NHWC", "HWIO", "NHWC"), - ([0, 2, 3, 1], [2, 3, 1, 0]))])) + for dim_nums, perms in [ + (("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])), + (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])), + (("NCHW", "HWIO", "NHWC"), ([0, 1, 2, 3], [2, 3, 1, 0])), + ])) def testConvGeneralDilated(self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, perms, rng): @@ -1756,7 +1757,8 @@ def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides, for rng in [jtu.rand_default()] for dim_nums, perms in [ (("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])), - (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])) + (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])), + (("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3])) ])) @jtu.skip_on_devices("tpu") def testConvGeneralDilatedGrad(self, lhs_shape, rhs_shape, dtype, strides,
MAML Gradients + Convnets don't work The attached python code snippet implements a MAML gradient update on 1) an MLP and 2) a CNN. When MODEL_TYPE = 'mlp', everything works. When MODEL_TYPE = 'conv', I get the following error: RuntimeError: Invalid argument: Expected LHS feature dimension (value 1) to be a multiple of feature_group_count (value 1), and LHS feature dimension / feature_group_count = RHS feature dimension (value 40); got <conv>(f32[1,1,40,40], f32[10,1,1,40]) Dimension numbers: {kernel_input_feature_dimension: 3 kernel_spatial_dimensions: 1 kernel_spatial_dimensions: 2 input_batch_dimension: 3 output_batch_dimension: 2 output_feature_dimension: 3 input_spatial_dimensions: 1 input_spatial_dimensions: 2 output_spatial_dimensions: 0 [maml-cnn.txt](https://github.com/google/jax/files/2867892/maml-cnn.txt) output_spatial_dimensions: 1 }.: @ 0x7ff2befc9214 xla::XlaBuilder::ReportErrorOrReturn() @ 0x7ff2befd6eaa xla::ConvGeneralDilated() @ 0x7ff2b82e4cb8 xla::swig::LocalComputationBuilder::ConvGeneralDilated() @ 0x7ff029565901 _wrap_LocalComputationBuilder_ConvGeneralDilated() @ 0x4bc454 PyEval_EvalFrameEx Note that for both model architectures, the maml objective loss can be computed. It's only in the case of attempting to take a gradient of the CNN-based loss, things break.
[jax_maml_images.txt](https://github.com/google/jax/files/2867918/jax_maml_images.txt) Also, here is another MAML implementation with a CNN which produces a different error: `TypeError: reshape total size must be unchanged, got new_sizes (1, 40, 25, 25, 32) for shape (25, 54, 32, 40).` When there is only one conv layer there is no error (though I haven't verified that the gradient is correct). The error only appears when there are 2+ conv layers.
2019-02-15T17:55:11
google/jax
391
google__jax-391
[ "387" ]
8791f91b0d245fe3d64e8c514c32e7c0aa1dc034
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -241,11 +241,13 @@ def reducer_batcher(prim, batched_args, batch_dims, axes, **params): def add_batched(batched_args, batch_dims): bdx, bdy = batch_dims + xs, ys = batched_args if bdx == bdy: - xs, ys = batched_args return add_jaxvals_p.bind(xs, ys), bdx else: - xs, ys = map(bdim_at_front, batched_args, batch_dims) + sz = (dimsize(bdx, xs) | dimsize(bdy, ys)).pop() + move_bdim = partial(bdim_at_front, broadcast_size=sz, force_broadcast=True) + xs, ys = map(move_bdim, batched_args, batch_dims) return add_jaxvals_p.bind(xs, ys), 0 primitive_batchers[add_jaxvals_p] = add_batched
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -810,6 +810,23 @@ def f(scale): expected = onp.stack([grad(f)(scale) for scale in scales]) self.assertAllClose(ans, expected, check_dtypes=False) + def testIssue387(self): + # https://github.com/google/jax/issues/387 + R = onp.random.RandomState(0).rand(100, 2) + + def dist_sq(R): + dR = R[:, np.newaxis, :] - R[np.newaxis, :, :] + zero = np.zeros_like(dR) + dR = dR - np.where(np.abs(dR) < 0.5, zero, 0.5 * np.sign(dR)) + return np.sum(dR ** 2, axis=2) + + @jit + def f(R): + dr = dist_sq(R) + return np.sum(R ** 2) + + H = hessian(f)(R) # don't crash on UnshapedArray + if __name__ == '__main__': absltest.main()
Hessian calculation finds an UnshapedArray when jitted. The code below throws an error that it finds an unshaped array only when f is jitted and the line `dR = dR - np.where(np.abs(dR) < 0.5, zero, 0.5 * np.sign(dR))` is present. ``` R = npr.rand(100, 2) def dist_sq(R): dR = R[:, np.newaxis, :] - R[np.newaxis, :, :] zero = np.zeros_like(dR) dR = dR - np.where(np.abs(dR) < 0.5, zero, 0.5 * np.sign(dR)) return np.sum(dR ** 2, axis=2) @jit def f(R): dr = dist_sq(R) return np.sum(R ** 2) H = hessian(f)(R) ``` Here is the full trace. ``` google3/third_party/py/jax/api.py in jacfun(*args, **kwargs) 183 f_partial, dyn_args = argnums_partial(f, argnums, args) 184 pushfwd = partial(jvp, f_partial, dyn_args) --> 185 y, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args)) 186 example_args = dyn_args[0] if isinstance(argnums, int) else dyn_args 187 return tree_map(partial(_unravel_array_into_pytree, example_args, -1), jac) google3/third_party/py/jax/api.py in batched_fun(*args, **kwargs) 273 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) 274 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees) --> 275 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes) 276 return build_tree(out_tree(), out_flat) 277 google3/third_party/py/jax/interpreters/batching.py in batch(fun, in_vals, in_dims, out_dim_target) 41 return fun.call_wrapped(*in_vals), None # no mapped dimensions 42 elif len(sizes) == 1: ---> 43 out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims) 44 return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val) 45 else: google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args) 84 85 del gen ---> 86 ans = self.f(*args, **self.kwargs) 87 del args 88 while stack: google3/third_party/py/jax/api.py in jvp(fun, primals, tangents) 350 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents)) 351 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) --> 352 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat) 353 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent)) 354 google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args) 84 85 del gen ---> 86 ans = self.f(*args, **self.kwargs) 87 del args 88 while stack: google3/third_party/py/jax/api.py in jacfun(*args, **kwargs) 195 f = lu.wrap_init(fun, kwargs) 196 f_partial, dyn_args = argnums_partial(f, argnums, args) --> 197 y, pullback = vjp(f_partial, *dyn_args) 198 jac = vmap(pullback)(_std_basis(y)) 199 jac = jac[0] if isinstance(argnums, int) else jac google3/third_party/py/jax/api.py in vjp(fun, *primals) 378 check_args(primals_flat) 379 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) --> 380 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat) 381 out_tree = out_tree() 382 out_primal_py = build_tree(out_tree, out_primal) google3/third_party/py/jax/interpreters/ad.py in vjp(traceable, primals) 72 73 def vjp(traceable, primals): ---> 74 out_primal, pval, jaxpr, consts = linearize(traceable, *primals) 75 def vjp_(ct): 76 ct = ignore_consts(ct, pval) google3/third_party/py/jax/interpreters/ad.py in linearize(traceable, *primals) 65 in_pvals = (pe.PartialVal((None, pack(primals))), 66 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit))) ---> 67 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals) 68 pval_primal, pval_tangent = unpair_pval(out_pval) 69 aval_primal, const_primal = pval_primal google3/third_party/py/jax/interpreters/partial_eval.py in trace_to_jaxpr(fun, pvals, **kwargs) 254 with new_master(JaxprTrace) as master: 255 fun = trace_to_subjaxpr(fun, master) --> 256 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs) 257 assert not env 258 del master google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args) 84 85 del gen ---> 86 ans = self.f(*args, **self.kwargs) 87 del args 88 while stack: google3/third_party/py/jax/api.py in f_jitted(*args, **kwargs) 89 check_args(jaxtupletree_args) 90 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees) ---> 91 jaxtupletree_out = xla.xla_call(jaxtree_fun, *jaxtupletree_args) 92 return build_tree(out_tree(), jaxtupletree_out) 93 google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **kwargs) 534 else: 535 tracers = map(top_trace.full_raise, args) --> 536 ans = full_lower(top_trace.process_call(primitive, f, tracers, kwargs)) 537 return apply_todos(env_trace_todo(), ans) 538 google3/third_party/py/jax/interpreters/ad.py in process_call(self, call_primitive, f, tracers, params) 191 result = call_primitive.bind(f, pack(primals), nonzero_tangents, **new_params) 192 else: --> 193 result = call_primitive.bind(f, pack(primals), nonzero_tangents, **params) 194 primal_out, tangent_out = build_tree(out_tree_def(), result) 195 return JVPTracer(self, primal_out, tangent_out) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **kwargs) 534 else: 535 tracers = map(top_trace.full_raise, args) --> 536 ans = full_lower(top_trace.process_call(primitive, f, tracers, kwargs)) 537 return apply_todos(env_trace_todo(), ans) 538 google3/third_party/py/jax/interpreters/partial_eval.py in process_call(self, call_primitive, f, tracers, params) 79 in_pvs, in_consts = unzip2([t.pval for t in tracers]) 80 fun, aux = partial_eval(f, self, in_pvs) ---> 81 out_pv_const, consts = call_primitive.bind(fun, *in_consts, **params) 82 out_pv, jaxpr, env = aux() 83 const_tracers = map(self.new_instantiated_const, consts) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **kwargs) 534 else: 535 tracers = map(top_trace.full_raise, args) --> 536 ans = full_lower(top_trace.process_call(primitive, f, tracers, kwargs)) 537 return apply_todos(env_trace_todo(), ans) 538 google3/third_party/py/jax/interpreters/ad.py in process_call(self, call_primitive, f, tracers, params) 191 result = call_primitive.bind(f, pack(primals), nonzero_tangents, **new_params) 192 else: --> 193 result = call_primitive.bind(f, pack(primals), nonzero_tangents, **params) 194 primal_out, tangent_out = build_tree(out_tree_def(), result) 195 return JVPTracer(self, primal_out, tangent_out) google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **kwargs) 534 else: 535 tracers = map(top_trace.full_raise, args) --> 536 ans = full_lower(top_trace.process_call(primitive, f, tracers, kwargs)) 537 return apply_todos(env_trace_todo(), ans) 538 google3/third_party/py/jax/interpreters/batching.py in process_call(self, call_primitive, f, tracers, params) 129 else: 130 f, dim_out = batch_subtrace(f, self.master, dims) --> 131 val_out = call_primitive.bind(f, *vals, **params) 132 return BatchTracer(self, val_out, dim_out()) 133 google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **kwargs) 531 if top_trace is None: 532 with new_sublevel(): --> 533 ans = primitive.impl(f, *args, **kwargs) 534 else: 535 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in xla_call_impl(fun, *args) 420 fun, out_tree = flatten_fun(fun, in_trees) 421 --> 422 compiled_fun = xla_callable(fun, *map(abstractify, flat_args)) 423 flat_ans = compiled_fun(*flat_args) 424 google3/third_party/py/jax/linear_util.py in memoized_fun(f, *args) 144 if len(cache) > max_size: 145 cache.popitem(last=False) --> 146 ans = call(f, *args) 147 cache[key] = (ans, f) 148 return ans google3/third_party/py/jax/interpreters/xla.py in xla_callable(fun, *abstract_args) 432 pvals = [PartialVal((aval, core.unit)) for aval in abstract_args] 433 with core.new_master(JaxprTrace, True) as master: --> 434 jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals) 435 assert not env # no subtraces here (though cond might eventually need them) 436 compiled, result_shape = compile_jaxpr(jaxpr, consts, *abstract_args) google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args) 84 85 del gen ---> 86 ans = self.f(*args, **self.kwargs) 87 del args 88 while stack: <ipython-input-72-ce1d436081cc> in f(R) 8 @jit 9 def f(R): ---> 10 dr = dist_sq(R) 11 return np.sum(R ** 2) 12 <ipython-input-72-ce1d436081cc> in dist_sq(R) 4 zero = np.zeros_like(dR) 5 dR = dR - np.where(np.abs(dR) < 0.5, zero, 0.5 * np.sign(dR)) ----> 6 return np.sum(dR ** 2, axis=2) 7 8 @jit google3/third_party/py/jax/core.py in __pow__(self, other) 231 def __mod__(self, other): return self.aval._mod(self, other) 232 def __rmod__(self, other): return self.aval._rmod(self, other) --> 233 def __pow__(self, other): return self.aval._pow(self, other) 234 def __rpow__(self, other): return self.aval._rpow(self, other) 235 def __matmul__(self, other): return self.aval._matmul(self, other) google3/third_party/py/jax/numpy/lax_numpy.py in <lambda>(x, y) 243 def _one_to_one_binop(numpy_fn, lax_fn, promote_like=False): 244 if promote_like: --> 245 fn = lambda x, y: lax_fn(*_promote_args_like(numpy_fn, x, y)) 246 else: 247 fn = lambda x, y: lax_fn(*_promote_args(numpy_fn.__name__, x, y)) google3/third_party/py/jax/lax.py in pow(x, y) 103 def conj(x): return conj_p.bind(x, input_dtype=_dtype(x)) 104 def abs(x): return abs_p.bind(x) --> 105 def pow(x, y): return pow_p.bind(x, y) 106 107 def bitwise_not(x): return not_p.bind(x) google3/third_party/py/jax/core.py in bind(self, *args, **kwargs) 76 77 tracers = map(top_trace.full_raise, args) ---> 78 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 79 return full_lower(out_tracer) 80 google3/third_party/py/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params) 178 "Forward-mode differentiation rule for '{}' not implemented" 179 .format(primitive)) --> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params) 181 return JVPTracer(self, primal_out, tangent_out) 182 google3/third_party/py/jax/interpreters/ad.py in standard_jvp(jvprules, primitive, primals, tangents, **params) 299 300 def standard_jvp(jvprules, primitive, primals, tangents, **params): --> 301 val_out = primitive.bind(*primals, **params) 302 tangents_out = (rule(t, *primals, **params) for rule, t in zip(jvprules, tangents) 303 if rule is not None and t is not zero) google3/third_party/py/jax/core.py in bind(self, *args, **kwargs) 76 77 tracers = map(top_trace.full_raise, args) ---> 78 out_tracer = top_trace.process_primitive(self, tracers, kwargs) 79 return full_lower(out_tracer) 80 google3/third_party/py/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params) 178 "Forward-mode differentiation rule for '{}' not implemented" 179 .format(primitive)) --> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params) 181 return JVPTracer(self, primal_out, tangent_out) 182 google3/third_party/py/jax/interpreters/ad.py in standard_jvp(jvprules, primitive, primals, tangents, **params) 302 tangents_out = (rule(t, *primals, **params) for rule, t in zip(jvprules, tangents) 303 if rule is not None and t is not zero) --> 304 return val_out, reduce(add_tangents, tangents_out, zero) 305 306 google3/third_party/py/jax/interpreters/ad.py in <genexpr>((rule, t)) 301 val_out = primitive.bind(*primals, **params) 302 tangents_out = (rule(t, *primals, **params) for rule, t in zip(jvprules, tangents) --> 303 if rule is not None and t is not zero) 304 return val_out, reduce(add_tangents, tangents_out, zero) 305 google3/third_party/py/jax/lax.py in <lambda>(g, x, y) 959 pow_p = standard_binop([_float | _complex, _float | _complex], 'pow') 960 ad.defjvp(pow_p, --> 961 lambda g, x, y: mul(_brcast(g, y), mul(y, pow(x, select( 962 eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))), 963 lambda g, x, y: mul(_brcast(g, x), google3/third_party/py/jax/lax.py in _brcast(x, *others) 836 # We don't need full numpy broadcasting, but otherwise the logic is the same 837 # so we reuse the broadcast_shapes function after filtering out scalars. --> 838 shapes = tuple(filter(None, map(onp.shape, (x,) + others))) 839 shape = shapes and broadcast_shapes(*shapes) 840 if onp.shape(x) != shape: google3/third_party/py/numpy/core/fromnumeric.py in shape(a) 1602 """ 1603 try: -> 1604 result = a.shape 1605 except AttributeError: 1606 result = asarray(a).shape google3/third_party/py/jax/core.py in __getattr__(self, name) 262 263 try: --> 264 attr = getattr(self.aval, name) 265 except KeyError: 266 raise AttributeError( google3/third_party/py/jax/interpreters/batching.py in aval(self) 82 @property 83 def aval(self): ---> 84 batched_aval = get_aval(self.val) 85 return remove_batch_dim_from_aval(self.batch_dim, batched_aval) 86 google3/third_party/py/jax/interpreters/batching.py in get_aval(x) 152 def get_aval(x): 153 if isinstance(x, Tracer): --> 154 return raise_to_shaped(x.aval) 155 else: 156 return shaped_aval(x) google3/third_party/py/jax/interpreters/batching.py in raise_to_shaped(aval) 168 return ShapedArray(aval.shape, aval.dtype) 169 else: --> 170 raise TypeError(type(aval)) 171 172 def remove_batch_dim_from_aval(bdim, aval): TypeError: <class 'jax.abstract_arrays.UnshapedArray'> ```
2019-02-16T04:56:58
google/jax
399
google__jax-399
[ "75" ]
901a5e5291fa90ee488065366bc885f5edf84dc8
diff --git a/jax/abstract_arrays.py b/jax/abstract_arrays.py --- a/jax/abstract_arrays.py +++ b/jax/abstract_arrays.py @@ -161,9 +161,11 @@ def zeros_like_array(x): dtype = xla_bridge.canonicalize_dtype(onp.result_type(x)) return onp.broadcast_to(onp.array(0, dtype), onp.shape(x)) -array_types = [onp.ndarray, onp.float64, onp.float32, onp.complex64, - onp.complex128, onp.int64, onp.int32, onp.bool_, onp.uint64, - onp.uint32, complex, float, int, bool] +array_types = [onp.ndarray, onp.float64, onp.float32, onp.float16, + onp.complex64, onp.complex128, + onp.int64, onp.int32, onp.int16, onp.int8, + onp.bool_, onp.uint64, onp.uint32, onp.uint16, onp.uint8, + complex, float, int, bool] for t in array_types: core.pytype_aval_mappings[t] = ConcreteArray
float16 support Add support for `np.float16`.
2019-02-17T22:20:29
google/jax
410
google__jax-410
[ "407" ]
febadd7354b6f132a175c07a1db62d60425e314e
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1016,10 +1016,16 @@ def _add_transpose(t, x, y): ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x)) ad.primitive_transposes[add_p] = _add_transpose + +def _sub_transpose(t, x, y): + assert x is None and y is None # computation must be linear, not affine + return [t, neg(t)] + sub_p = standard_binop([_num, _num], 'sub') ad.defjvp(sub_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(neg(g), x)) +ad.primitive_transposes[sub_p] = _sub_transpose mul_p = standard_binop([_num, _num], 'mul') ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul) # TODO
Reverse-mode differentiation rule for 'sub' not implemented I get an error with the gradients of multivariate_normal.logpdf wrt the covariance (input and mean are fine). For example, this ```python import jax.numpy as np from jax import grad from jax.scipy.stats import multivariate_normal f = lambda x: multivariate_normal.logpdf(np.zeros(1), np.zeros(1), np.ones((1, 1))*x) grad(f)(1.) # fails ``` gives the error: ``` Traceback (most recent call last): File "[...]/jax/jax/interpreters/ad.py", line 149, in get_primitive_transpose return primitive_transposes[p] KeyError: sub During handling of the above exception, another exception occurred: Traceback (most recent call last): File "[...]", line 43, in <module> print(grad(f)(1.)) # fails File "[...]/jax/jax/api.py", line 149, in grad_f ans, g = value_and_grad_f(*args, **kwargs) File "[...]/jax/jax/api.py", line 186, in value_and_grad_f g = vjp_py(onp.ones((), onp.result_type(ans))) File "[...]/jax/jax/api_util.py", line 56, in apply_jaxtree_fun ans = fun(*args) File "[...]/jax/jax/api.py", line 457, in out_vjp_packed return out_vjp(cotangent_in) File "[...]/jax/jax/interpreters/ad.py", line 78, in vjp_ _, arg_cts = backward_pass(jaxpr, consts, (), dummy_primal_and_ct) File "[...]/jax/jax/interpreters/ad.py", line 135, in backward_pass cts_out = get_primitive_transpose(eqn.primitive)(ct_in, *invals, **eqn.params) File "[...]/jax/jax/interpreters/ad.py", line 152, in get_primitive_transpose "Reverse-mode differentiation rule for '{}' not implemented".format(p)) NotImplementedError: Reverse-mode differentiation rule for 'sub' not implemented ``` Also, I notice that the multivariate_normal density is implemented with matrix inverses and determinant rather than the [usual](https://github.com/scipy/scipy/blob/d681fc87f42bbc237d5b7e98fbfbfb5e3a51ae13/scipy/stats/_multivariate.py#L495) more efficient formulation in terms of the square root cov. Is there a reason for this?
2019-02-19T16:47:18
google/jax
422
google__jax-422
[ "48" ]
4ba7d517df80bdc4f31f30f96475d302f7c54f54
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -115,6 +115,8 @@ class ndarray(six.with_metaclass(_ArrayMeta, onp.ndarray)): complexfloating = onp.complexfloating floating = onp.floating integer = onp.integer +signedinteger = onp.signedinteger +unsignedinteger = onp.unsignedinteger iinfo = onp.iinfo finfo = onp.finfo @@ -284,7 +286,6 @@ def _one_to_one_binop(numpy_fn, lax_fn, promote_like=False): multiply = _one_to_one_binop(onp.multiply, lax.mul) not_equal = _one_to_one_binop(onp.not_equal, lax.ne) subtract = _one_to_one_binop(onp.subtract, lax.sub) -power = _one_to_one_binop(onp.power, lax.pow, True) arctan2 = _one_to_one_binop(onp.arctan2, lax.atan2, True) minimum = _one_to_one_binop(onp.minimum, lax.min) maximum = _one_to_one_binop(onp.maximum, lax.max) @@ -388,6 +389,28 @@ def _float_divmod(x1, x2): return lax.round(div), mod +@_wraps(onp.power) +def power(x1, x2): + x1 = asarray(x1) + x2 = asarray(x2) + x1, x2 = _promote_args_like(onp.power, x1, x2) + dtype = lax._dtype(x1) + if not issubdtype(dtype, integer): + return lax.pow(x1, x2) + + # Integer power => use binary exponentiation. + + # TODO(phawkins): add integer pow support to XLA. + bits = 6 # Anything more would overflow for any x1 > 1 + acc = ones(shape(x1), dtype=dtype) + for _ in xrange(bits): + acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)), + lax.mul(acc, x1), acc) + x1 = lax.mul(x1, x1) + x2 = lax.shift_right_logical(x2, _constant_like(x2, 1)) + return acc + + @_wraps(onp.logaddexp) def logaddexp(x1, x2): x1, x2 = _promote_shapes(*_promote_to_result_dtype(onp.logaddexp, x1, x2))
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -92,7 +92,7 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None, op_record("multiply", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]), op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]), op_record("not_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), ["rev"]), - op_record("power", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), + op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default(), []), op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]), op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default(), ["rev"]),
Unimplemented: binary integer op 'power' ```python def square(x): return x**2 val = 3 dfn = grad(square) print(dfn(val)) ``` I was surprised this threw an error. Changing it to `val = 3.0` works as expected.
This seems like an important operation to offer :) I think the binary integer pow function isn't implemented in XLA, since we seem to be getting [this error](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/xla/service/elemental_ir_emitter.cc#L1280) and the `kPow` opcode doesn't seem to appear in that list. Maybe just an oversight. I'll follow up with XLA folks. A second issue here is that `grad` should raise an error on non-floating argument types. @hawkinsp guessed that XLA's Pow HLO is meant to model `std::pow`, which apparently doesn't work on integer values either. We should solve this in JAX at the `jax.numpy` level.
2019-02-21T13:22:57
google/jax
429
google__jax-429
[ "428" ]
6a843aaddcefa439ab828585b16036e7b99b2053
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -503,12 +503,52 @@ def dynamic_update_slice(operand, update, start_indices): return dynamic_update_slice_p.bind(operand, update, start_indices, update_shape=update.shape) -def gather(operand, start_indices, dimension_numbers=None, slice_sizes=None): +def gather(operand, start_indices, dimension_numbers, slice_sizes): + """Gather operator. + + Wraps `XLA's Gather operator + <https://www.tensorflow.org/xla/operation_semantics#gather>`_. + + The semantics of gather are complicated, and its API might change in the + future. For most use cases, you should prefer `Numpy-style indexing + <https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_ + (e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly. + + Args: + operand: an array from which slices should be taken + start_indices: the indices at which slices should be taken + dimension_numbers: a `lax.GatherDimensionNumbers` object that describes + how dimensions of `operand`, `start_indices` and the output relate. + slice_sizes: the size of each slice. Must be a sequence of non-negative + integers with size equal to `ndim(operand)`. + + Returns: + An array containing the gather output. + """ return gather_p.bind( operand, start_indices, dimension_numbers=dimension_numbers, slice_sizes=tuple(slice_sizes), operand_shape=operand.shape) -def scatter_add(operand, scatter_indices, updates, dimension_numbers=None): +def scatter_add(operand, scatter_indices, updates, dimension_numbers): + """Scatter operator. + + Wraps `XLA's Scatter operator + <https://www.tensorflow.org/xla/operation_semantics#scatter>`_. + + The semantics of scatter are complicated and its API is subject to change. + + Args: + operand: an array to which the scatter should be applied + scatter_indices: an array that gives the indices in `operand` to which each + update in `updates` should be applied. + updates: the updates that should be scattered onto `operand`. + dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes + how dimensions of `operand`, `start_indices`, `updates` and the output + relate. + + Returns: + An array containing the sum of `operand` and the scattered updates. + """ jaxpr, consts = _reduction_jaxpr(add, _const(operand, 0)) return scatter_p.bind( operand, scatter_indices, updates, update_jaxpr=jaxpr, @@ -2364,10 +2404,30 @@ def _dynamic_update_slice_translation_rule(c, operand, update, start_indices, -GatherDimensionNumbers = collections.namedtuple( +class GatherDimensionNumbers(collections.namedtuple( "GatherDimensionNumbers", ["offset_dims", "collapsed_slice_dims", "start_index_map", - "index_vector_dim"]) + "index_vector_dim"])): + """ + Describes the dimension number arguments to an `XLA's Gather operator + <https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA + documentation for more details of what the dimension numbers mean. + + Args: + offset_dims: the set of dimensions in the `gather` output that offset into + an array sliced from `operand`. Must be a tuple of integers in ascending + order, each representing a dimension number of the output. + collapsed_slice_dims: the set of dimensions `i` in `operand` that have + `slice_sizes[i] == 1` and that should not have a corresponding dimension + in the output of the gather. Must be a tuple of integers in ascending + order. + start_index_map: for each dimension in `start_indices`, gives the + corresponding dimension in `operand` that is to be sliced. Must be a + tuple of integers with size equal to `ndim(start_indices)`. + index_vector_dim: describes which dimension of `start_indices` "contains" + the start indices. If equal to `len(start_indices)` the indices are + taken to be scalars. + """ def _gather_dimensions_proto(dimension_numbers): assert type(dimension_numbers) is GatherDimensionNumbers @@ -2506,10 +2566,29 @@ def _gather_batching_rule(batched_args, batch_dims, dimension_numbers, batching.primitive_batchers[gather_p] = _gather_batching_rule -ScatterDimensionNumbers = collections.namedtuple( +class ScatterDimensionNumbers(collections.namedtuple( "ScatterDimensionNumbers", ["update_window_dims", "inserted_window_dims", - "scatter_dims_to_operand_dims", "index_vector_dim"]) + "scatter_dims_to_operand_dims", "index_vector_dim"])): + """ + Describes the dimension number arguments to an `XLA's Scatter operator + <https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA + documentation for more details of what the dimension numbers mean. + + Args: + update_window_dims: the set of dimensions in the `updates` that are window + dimensions. Must be a tuple of integers in ascending + order, each representing a dimension number. + inserted_window_dims: the set of size 1 window dimensions that must be inserted + into the shape of `updates`. Must be a tuple of integers in ascending + order, each representing a dimension number of the output. These are the + mirror image of `collapsed_slice_dims` in the case of `gather`. + scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives + the corresponding dimension in `operand`. + index_vector_dim: describes which dimension of `scatter_indices` "contains" + the start indices. If equal to `len(scatter_indices)` the indices are + taken to be scalars. + """ def _scatter_dimensions_proto(dimension_numbers): assert type(dimension_numbers) is ScatterDimensionNumbers @@ -2523,7 +2602,7 @@ def _scatter_dimensions_proto(dimension_numbers): def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs): if not onp.issubdtype(scatter_indices.dtype, onp.integer): - raise ValueError("start_indices must have an integer type") + raise ValueError("scatter_indices must have an integer type") _check_same_dtypes("scatter", False, operand.dtype, updates.dtype) return xla_bridge.canonicalize_dtype(operand.dtype)
scatter_add requires dimension numbers, default value invalid ```python3 jax.lax.scatter_add(np.zeros(10), np.asarray([3, 1]), np.asarray([5., 7.])) ``` fails with `AssertionError` because of `assert type(dimension_numbers) is ScatterDimensionNumbers` The following works, but there is no documentation on these parameters, so I inferred them from the tests. ```python3 dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(), inserted_window_dims=(0,), scatter_dims_to_operand_dims=(0,), index_vector_dim=1) jax.lax.scatter_add(np.zeros(10), np.asarray([3, 1]), np.asarray([5., 7.]), dnums) ``` Also, it might be worth adding `jax.lax.scatter` with the first argument set to zeros.
Thanks for the issue report! I'll add doc strings for these functions (we haven't gotten around to documenting all of `lax` yet), but essentially gather and scatter_add are direct wrappers around XLA's [gather](https://www.tensorflow.org/xla/operation_semantics#gather) and [scatter](https://www.tensorflow.org/xla/operation_semantics#scatter) primitives, so you can refer to the XLA documentation for details. That said, I personally find their APIs too complicated. Expect us to simplify these APIs in the future. For most `gather` use cases, it's probably better to use NumPy indexing syntax, which will lower to gather already. For scatter, unfortunately we don't yet have an equivalent to the gather syntax, but we definitely plan to add some kind of update syntax that looks a lot more like NumPy indexed updates! The closest I can offer you right now is some gradient trickery that exploits the fact that scatter-add is the transpose of gather, and we do have nice indexing support for gather: ``` In [49]: x = np.ones((10,), onp.float32) In [50]: y = onp.array([2, 4, 5]) In [51]: def scatter_add(f, base, updates): ...: _, grad = jax.vjp(f, base) ...: (out,) = grad(updates) ...: return base + out ...: In [52]: scatter_add(lambda a: a[y], x, np.array([7, 4, 32], np.float32)).copy() Out[52]: array([ 1., 1., 8., 1., 5., 33., 1., 1., 1., 1.], dtype=float32) ``` Expect us to offer something along those lines in the future, but it won't be that exact implementation.
2019-02-22T13:41:08
google/jax
437
google__jax-437
[ "433" ]
f08c3b746f82c8a75b1c23146c68deaa8c6e3d43
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1363,7 +1363,7 @@ def _add_transpose(t, x, y): def _sub_transpose(t, x, y): assert x is None and y is None # computation must be linear, not affine - return [t, neg(t)] + return [t, neg(t) if t is not ad_util.zero else ad_util.zero] sub_p = standard_binop([_num, _num], 'sub') ad.defjvp(sub_p,
Error differentiating QR decomposition This appears to be a bug but unfortunately I don't have time to delve into it. Below is a simple script to do gradient descent on a function involving a QR decomposition. It fails when I execute `training_gradient_fun` with `TypeError: No abstraction handler for type: <class 'jax.ad_util.Zero'>`. Ubuntu 16.04 python 3.6.6 jax 0.1.20 jaxlib 0.1.9 ``` import jax import jax.numpy as np import numpy as onp def loss(weights, inputs): q, r = np.linalg.qr(weights) a = np.matmul(q, weights) return np.mean(a ** 2) training_gradient_fun = jax.jit(jax.grad(loss)) inputs = onp.random.rand(5, 5) weights = onp.random.rand(5, 5) for i in range(100): weights -= 0.1 * training_gradient_fun(weights, inputs) ```
2019-02-23T04:43:04
google/jax
443
google__jax-443
[ "442" ]
dd5b2a68600eb82003675d063a00cbf571e136be
diff --git a/jax/experimental/stax.py b/jax/experimental/stax.py --- a/jax/experimental/stax.py +++ b/jax/experimental/stax.py @@ -137,7 +137,7 @@ def init_fun(input_shape): def apply_fun(params, x, rng=None): beta, gamma = params mean, var = np.mean(x, axis, keepdims=True), fastvar(x, axis, keepdims=True) - z = (x - mean) / (var + epsilon)**2 + z = (x - mean) / np.sqrt(var + epsilon) if center and scale: return gamma * z + beta if center: return z + beta if scale: return gamma * z
`stax.BatchNorm` incorrect normalization jax.experimental.stax on current master, line 140 ( `BatchNorm` function): ```python z = (x - mean) / (var + epsilon)**2 ``` The square in the denominator should probably be a square root.
Thanks so much for catching this!
2019-02-24T23:27:29
google/jax
456
google__jax-456
[ "453", "453" ]
2dae120d54846aa69e851aa584f724c9f7ba1903
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -597,19 +597,19 @@ def angle(x): @_wraps(onp.reshape) def reshape(a, newshape, order="C"): # pylint: disable=missing-docstring - if order == "C" or order is None: - dims = None + dummy_val = onp.broadcast_to(0, shape(a)) # zero strides + computed_newshape = onp.reshape(dummy_val, newshape).shape + + if order == "C": + return lax.reshape(a, computed_newshape, None) elif order == "F": dims = onp.arange(ndim(a))[::-1] + return lax.reshape(a, computed_newshape[::-1], dims).T elif order == "A": raise NotImplementedError("np.reshape order=A is not implemented.") else: raise ValueError("Unexpected value for 'order' argument: {}.".format(order)) - dummy_val = onp.broadcast_to(0, shape(a)) # zero strides - computed_newshape = onp.reshape(dummy_val, newshape).shape - return lax.reshape(a, computed_newshape, dims) - @_wraps(onp.ravel) def ravel(a, order="C"):
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -859,12 +859,14 @@ def fn(module, axis): self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_inshape={}_outshape={}".format( + {"testcase_name": "_inshape={}_outshape={}_order={}".format( jtu.format_shape_dtype_string(arg_shape, dtype), - jtu.format_shape_dtype_string(out_shape, dtype)), + jtu.format_shape_dtype_string(out_shape, dtype), + order), "arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype, - "rng": jtu.rand_default()} + "order": order, "rng": jtu.rand_default()} for dtype in default_dtypes + for order in ["C", "F"] for arg_shape, out_shape in [ (jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)), ((), (1, 1, 1)), @@ -875,9 +877,9 @@ def fn(module, axis): ((2, 1, 4), (-1,)), ((2, 2, 4), (2, 8)) ])) - def testReshape(self, arg_shape, out_shape, dtype, rng): - onp_fun = lambda x: onp.reshape(x, out_shape) - lnp_fun = lambda x: lnp.reshape(x, out_shape) + def testReshape(self, arg_shape, out_shape, dtype, order, rng): + onp_fun = lambda x: onp.reshape(x, out_shape, order=order) + lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order) args_maker = lambda: [rng(arg_shape, dtype)] self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) @@ -1327,6 +1329,13 @@ def test_fail(x): result = api.grad(test_fail)(x) assert not onp.any(onp.isnan(result)) + def testIssue453(self): + # https://github.com/google/jax/issues/453 + a = onp.arange(6) + 1 + ans = lnp.reshape(a, (3, 2), order='F') + expected = onp.reshape(a, (3, 2), order='F') + self.assertAllClose(ans, expected, check_dtypes=True) + if __name__ == "__main__": absltest.main()
Fortran reshape order runs but gives wrong behaviour Hi there, Big fan of Jax, thanks for making this! I believe there is a problem with specifying the reshaping order: ```python a = np.arange(6) + 1 np.reshape(a, (3, 2), order='F') ``` Should give: ``` array([[1, 4], [2, 5], [3, 6]]) ``` But instead gives: ``` array([[1, 2], [3, 4], [5, 6]], dtype=int32) ``` Basically, it is ignored and always does column-major reordering. If unsupported, it would be nice for it to just fail rather than silently carry on. Fortran reshape order runs but gives wrong behaviour Hi there, Big fan of Jax, thanks for making this! I believe there is a problem with specifying the reshaping order: ```python a = np.arange(6) + 1 np.reshape(a, (3, 2), order='F') ``` Should give: ``` array([[1, 4], [2, 5], [3, 6]]) ``` But instead gives: ``` array([[1, 2], [3, 4], [5, 6]], dtype=int32) ``` Basically, it is ignored and always does column-major reordering. If unsupported, it would be nice for it to just fail rather than silently carry on.
2019-02-27T15:43:03
google/jax
460
google__jax-460
[ "454" ]
359c4c1346278e9e80a2ca2750b73654cb6f26c0
diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -20,6 +20,7 @@ import scipy.special as osp_special from .. import lax +from ..numpy import lax_numpy as np from ..numpy.lax_numpy import _wraps, asarray, _reduction_dims, _constant_like @@ -55,4 +56,385 @@ def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): amax_singletons = dimadd(amax) out = lax.add(lax.log(lax.reduce(lax.exp(lax.sub(a, amax_singletons)), _constant_like(a, 0), lax.add, dims)), amax) - return dimadd(out) if keepdims else out \ No newline at end of file + return dimadd(out) if keepdims else out + + +# Normal distributions + +# Functions "ndtr" and "ndtri" are derived from calculations made in: +# https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html +# In the following email exchange, the author gives his consent to redistribute +# derived works under an Apache 2.0 license. +# +# From: Stephen Moshier <[email protected]> +# Date: Sat, Jun 9, 2018 at 2:36 PM +# Subject: Re: Licensing cephes under Apache (BSD-like) license. +# To: rif <[email protected]> +# +# +# +# Hello Rif, +# +# Yes, Google may distribute Cephes files under the Apache 2 license. +# +# If clarification is needed, I do not favor BSD over other free licenses. +# I would agree that Apache 2 seems to cover the concern you mentioned +# about sublicensees. +# +# Best wishes for good luck with your projects! +# Steve Moshier +# +# +# +# On Thu, 31 May 2018, rif wrote: +# +# > Hello Steve. +# > My name is Rif. I work on machine learning software at Google. +# > +# > Your cephes software continues to be incredibly useful and widely used. I +# > was wondering whether it would be permissible for us to use the Cephes code +# > under the Apache 2.0 license, which is extremely similar in permissions to +# > the BSD license (Wikipedia comparisons). This would be quite helpful to us +# > in terms of avoiding multiple licenses on software. +# > +# > I'm sorry to bother you with this (I can imagine you're sick of hearing +# > about this by now), but I want to be absolutely clear we're on the level and +# > not misusing your important software. In former conversation with Eugene +# > Brevdo ([email protected]), you wrote "If your licensing is similar to BSD, +# > the formal way that has been handled is simply to add a statement to the +# > effect that you are incorporating the Cephes software by permission of the +# > author." I wanted to confirm that (a) we could use the Apache license, (b) +# > that we don't need to (and probably you don't want to) keep getting +# > contacted about individual uses, because your intent is generally to allow +# > this software to be reused under "BSD-like" license, and (c) you're OK +# > letting incorporators decide whether a license is sufficiently BSD-like? +# > +# > Best, +# > +# > rif +# > +# > +# > + +# log_ndtr uses different functions over the ranges +# (-infty, lower](lower, upper](upper, infty) +# Lower bound values were chosen by examining where the support of ndtr +# appears to be zero, relative to scipy's (which is always 64bit). They were +# then made more conservative just to be safe. (Conservative means use the +# expansion more than we probably need to.) +_LOGNDTR_FLOAT64_LOWER = onp.array(-20, onp.float64) +_LOGNDTR_FLOAT32_LOWER = onp.array(-10, onp.float32) + +# Upper bound values were chosen by examining for which values of 'x' +# Log[cdf(x)] is 0, after which point we need to use the approximation +# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly +# conservative, meaning we use the approximation earlier than needed. +_LOGNDTR_FLOAT64_UPPER = onp.array(8, onp.float64) +_LOGNDTR_FLOAT32_UPPER = onp.array(5, onp.float32) + + +def ndtr(x): + """Normal distribution function. + + Returns the area under the Gaussian probability density function, integrated + from minus infinity to x: + + ``` + 1 / x + ndtr(x) = ---------- | exp(-0.5 t**2) dt + sqrt(2 pi) /-inf + + = 0.5 (1 + erf(x / sqrt(2))) + = 0.5 erfc(x / sqrt(2)) + ``` + + Args: + x: An array of type `float32`, `float64`. + + Returns: + ndtr: An array with `dtype=x.dtype`. + + Raises: + TypeError: if `x` is not floating-type. + """ + x = np.asarray(x) + dtype = lax._dtype(x) + if dtype not in (np.float32, np.float64): + raise TypeError( + "x.dtype={} is not supported, see docstring for supported types." + .format(dtype)) + return _ndtr(x) + + +def _ndtr(x): + """Implements ndtr core logic.""" + dtype = lax._dtype(x).type + half_sqrt_2 = dtype(0.5) * onp.sqrt(2., dtype=dtype) + w = x * half_sqrt_2 + z = lax.abs(w) + y = lax.select(lax.lt(z, half_sqrt_2), + dtype(1.) + lax.erf(w), + lax.select(lax.gt(w, dtype(0.)), + dtype(2.) - lax.erfc(z), + lax.erfc(z))) + return 0.5 * y + + +def ndtri(p): + """The inverse of the CDF of the Normal distribution function. + + Returns x such that the area under the pdf from minus infinity to x is equal + to p. + + A piece-wise rational approximation is done for the function. + This is a port of the implementation in netlib. + + Args: + p: an array of type `float32`, `float64`. + + Returns: + x: an array with `dtype=p.dtype`. + + Raises: + TypeError: if `p` is not floating-type. + """ + x = np.asarray(p) + dtype = lax._dtype(p) + if dtype not in (np.float32, np.float64): + raise TypeError( + "x.dtype={} is not supported, see docstring for supported types." + .format(dtype)) + return _ndtri(p) + + +def _ndtri(p): + """Implements ndtri core logic.""" + + # Constants used in piece-wise rational approximations. Taken from the cephes + # library: + # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html + p0 = list(reversed([-5.99633501014107895267E1, + 9.80010754185999661536E1, + -5.66762857469070293439E1, + 1.39312609387279679503E1, + -1.23916583867381258016E0])) + q0 = list(reversed([1.0, + 1.95448858338141759834E0, + 4.67627912898881538453E0, + 8.63602421390890590575E1, + -2.25462687854119370527E2, + 2.00260212380060660359E2, + -8.20372256168333339912E1, + 1.59056225126211695515E1, + -1.18331621121330003142E0])) + p1 = list(reversed([4.05544892305962419923E0, + 3.15251094599893866154E1, + 5.71628192246421288162E1, + 4.40805073893200834700E1, + 1.46849561928858024014E1, + 2.18663306850790267539E0, + -1.40256079171354495875E-1, + -3.50424626827848203418E-2, + -8.57456785154685413611E-4])) + q1 = list(reversed([1.0, + 1.57799883256466749731E1, + 4.53907635128879210584E1, + 4.13172038254672030440E1, + 1.50425385692907503408E1, + 2.50464946208309415979E0, + -1.42182922854787788574E-1, + -3.80806407691578277194E-2, + -9.33259480895457427372E-4])) + p2 = list(reversed([3.23774891776946035970E0, + 6.91522889068984211695E0, + 3.93881025292474443415E0, + 1.33303460815807542389E0, + 2.01485389549179081538E-1, + 1.23716634817820021358E-2, + 3.01581553508235416007E-4, + 2.65806974686737550832E-6, + 6.23974539184983293730E-9])) + q2 = list(reversed([1.0, + 6.02427039364742014255E0, + 3.67983563856160859403E0, + 1.37702099489081330271E0, + 2.16236993594496635890E-1, + 1.34204006088543189037E-2, + 3.28014464682127739104E-4, + 2.89247864745380683936E-6, + 6.79019408009981274425E-9])) + + dtype = lax._dtype(p).type + shape = np.shape(p) + + def _create_polynomial(var, coeffs): + """Compute n_th order polynomial via Horner's method.""" + coeffs = onp.array(coeffs, dtype) + if not coeffs.size: + return np.zeros_like(var) + return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var + + + maybe_complement_p = np.where(p > dtype(-onp.expm1(-2.)), dtype(1.) - p, p) + # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs + # later on. The result from the computation when p == 0 is not used so any + # number that doesn't result in NaNs is fine. + sanitized_mcp = np.where( + maybe_complement_p <= dtype(0.), + np.full(shape, dtype(0.5)), + maybe_complement_p) + + # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2). + w = sanitized_mcp - dtype(0.5) + ww = lax.square(w) + x_for_big_p = w + w * ww * (_create_polynomial(ww, p0) + / _create_polynomial(ww, q0)) + x_for_big_p *= -dtype(onp.sqrt(2. * onp.pi)) + + # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z), + # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different + # arrays based on whether p < exp(-32). + z = lax.sqrt(dtype(-2.) * lax.log(sanitized_mcp)) + first_term = z - lax.log(z) / z + second_term_small_p = ( + _create_polynomial(dtype(1.) / z, p2) / + _create_polynomial(dtype(1.) / z, q2) / z) + second_term_otherwise = ( + _create_polynomial(dtype(1.) / z, p1) / + _create_polynomial(dtype(1.) / z, q1) / z) + x_for_small_p = first_term - second_term_small_p + x_otherwise = first_term - second_term_otherwise + + x = np.where(sanitized_mcp > dtype(onp.exp(-2.)), + x_for_big_p, + np.where(z >= dtype(8.0), x_for_small_p, x_otherwise)) + + x = np.where(p > dtype(1. - onp.exp(-2.)), x, -x) + infinity = np.full(shape, dtype(onp.inf)) + x_nan_replaced = np.where( + p <= dtype(0.0), -infinity, np.where(p >= dtype(1.0), infinity, x)) + return x_nan_replaced + + +def log_ndtr(x, series_order=3): + """Log Normal distribution function. + + For details of the Normal distribution function see `ndtr`. + + This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or + using an asymptotic series. Specifically: + - For `x > upper_segment`, use the approximation `-ndtr(-x)` based on + `log(1-x) ~= -x, x << 1`. + - For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique + and take a log. + - For `x <= lower_segment`, we use the series approximation of erf to compute + the log CDF directly. + + The `lower_segment` is set based on the precision of the input: + + ``` + lower_segment = { -20, x.dtype=float64 + { -10, x.dtype=float32 + upper_segment = { 8, x.dtype=float64 + { 5, x.dtype=float32 + ``` + + When `x < lower_segment`, the `ndtr` asymptotic series approximation is: + + ``` + ndtr(x) = scale * (1 + sum) + R_N + scale = exp(-0.5 x**2) / (-x sqrt(2 pi)) + sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N} + R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3}) + ``` + + where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a + [double-factorial](https://en.wikipedia.org/wiki/Double_factorial). + + + Args: + x: an array of type `float32`, `float64`. + series_order: Positive Python `integer`. Maximum depth to + evaluate the asymptotic expansion. This is the `N` above. + + Returns: + log_ndtr: an array with `dtype=x.dtype`. + + Raises: + TypeError: if `x.dtype` is not handled. + TypeError: if `series_order` is a not Python `integer.` + ValueError: if `series_order` is not in `[0, 30]`. + """ + if not isinstance(series_order, int): + raise TypeError("series_order must be a Python integer.") + if series_order < 0: + raise ValueError("series_order must be non-negative.") + if series_order > 30: + raise ValueError("series_order must be <= 30.") + + x = np.asarray(x) + dtype = lax._dtype(x) + + if dtype == np.float64: + lower_segment = _LOGNDTR_FLOAT64_LOWER + upper_segment = _LOGNDTR_FLOAT64_UPPER + elif dtype == np.float32: + lower_segment = _LOGNDTR_FLOAT32_LOWER + upper_segment = _LOGNDTR_FLOAT32_UPPER + else: + raise TypeError("x.dtype={} is not supported.".format(onp.dtype(dtype))) + + # The basic idea here was ported from: + # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html + # We copy the main idea, with a few changes + # * For x >> 1, and X ~ Normal(0, 1), + # Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x], + # which extends the range of validity of this function. + # * We use one fixed series_order for all of 'x', rather than adaptive. + # * Our docstring properly reflects that this is an asymptotic series, not a + # Taylor series. We also provided a correct bound on the remainder. + # * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when + # x=0. This happens even though the branch is unchosen because when x=0 + # the gradient of a select involves the calculation 1*dy+0*(-inf)=nan + # regardless of whether dy is finite. Note that the minimum is a NOP if + # the branch is chosen. + return np.where( + lax.gt(x, upper_segment), + -_ndtr(-x), # log(1-x) ~= -x, x << 1 + np.where(lax.gt(x, lower_segment), + lax.log(_ndtr(lax.max(x, lower_segment))), + _log_ndtr_lower(lax.min(x, lower_segment), + series_order))) + + +def _log_ndtr_lower(x, series_order): + """Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`.""" + dtype = lax._dtype(x).type + x_2 = lax.square(x) + # Log of the term multiplying (1 + sum) + log_scale = -dtype(0.5) * x_2 - lax.log(-x) - dtype(0.5 * onp.log(2. * onp.pi)) + return log_scale + lax.log(_log_ndtr_asymptotic_series(x, series_order)) + + +def _log_ndtr_asymptotic_series(x, series_order): + """Calculates the asymptotic series used in log_ndtr.""" + dtype = lax._dtype(x).type + if series_order <= 0: + return onp.array(1, dtype) + x_2 = lax.square(x) + even_sum = np.zeros_like(x) + odd_sum = np.zeros_like(x) + x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1. + for n in range(1, series_order + 1): + y = onp.array(_double_factorial(2 * n - 1), dtype) / x_2n + if n % 2: + odd_sum += y + else: + even_sum += y + x_2n *= x_2 + return dtype(1.) + even_sum - odd_sum + + +def _double_factorial(n): + """The double factorial function for small Python integer `n`.""" + return onp.prod(onp.arange(n, 1, -2)) \ No newline at end of file diff --git a/jax/scipy/stats/norm.py b/jax/scipy/stats/norm.py --- a/jax/scipy/stats/norm.py +++ b/jax/scipy/stats/norm.py @@ -21,7 +21,7 @@ from ... import lax from ...numpy.lax_numpy import _promote_args_like, _constant_like, _wraps - +from .. import special @_wraps(osp_stats.norm.logpdf) def logpdf(x, loc=0, scale=1): @@ -32,6 +32,19 @@ def logpdf(x, loc=0, scale=1): quadratic = lax.div(lax.pow(lax.sub(x, loc), two), scale_sqrd) return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two) + @_wraps(osp_stats.norm.pdf) def pdf(x, loc=0, scale=1): return lax.exp(logpdf(x, loc, scale)) + + +@_wraps(osp_stats.norm.cdf) +def cdf(x, loc=0, scale=1): + x, loc, scale = _promote_args_like(osp_stats.norm.cdf, x, loc, scale) + return special.ndtr(lax.div(lax.sub(x, loc), scale)) + + +@_wraps(osp_stats.norm.logcdf) +def logcdf(x, loc=0, scale=1): + x, loc, scale = _promote_args_like(osp_stats.norm.logcdf, x, loc, scale) + return special.log_ndtr(lax.div(lax.sub(x, loc), scale)) \ No newline at end of file
diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py --- a/tests/lax_scipy_test.py +++ b/tests/lax_scipy_test.py @@ -49,21 +49,27 @@ OpRecord = collections.namedtuple("OpRecord", ["name", "nargs", "dtypes", "rng", - "diff_modes", "test_name"]) + "test_autodiff", "test_name"]) -def op_record(name, nargs, dtypes, rng, diff_modes, test_name=None): +def op_record(name, nargs, dtypes, rng, test_grad, test_name=None): test_name = test_name or name - return OpRecord(name, nargs, dtypes, rng, diff_modes, test_name) + return OpRecord(name, nargs, dtypes, rng, test_grad, test_name) JAX_SPECIAL_FUNCTION_RECORDS = [ - op_record("gammaln", 1, float_dtypes, jtu.rand_positive(), ["rev"]), - op_record("digamma", 1, float_dtypes, jtu.rand_positive(), []), - op_record("erf", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), - op_record("erfc", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), - op_record("erfinv", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), - op_record("logit", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), - op_record("expit", 1, float_dtypes, jtu.rand_small_positive(), ["rev"]), + # TODO: digamma has no JVP implemented. + op_record("digamma", 1, float_dtypes, jtu.rand_positive(), False), + op_record("erf", 1, float_dtypes, jtu.rand_small_positive(), True), + op_record("erfc", 1, float_dtypes, jtu.rand_small_positive(), True), + op_record("erfinv", 1, float_dtypes, jtu.rand_small_positive(), True), + op_record("expit", 1, float_dtypes, jtu.rand_small_positive(), True), + # TODO: gammaln has slightly high error. + op_record("gammaln", 1, float_dtypes, jtu.rand_positive(), False), + # TODO: NaNs in gradient for logit. + op_record("logit", 1, float_dtypes, jtu.rand_small_positive(), False), + op_record("log_ndtr", 1, float_dtypes, jtu.rand_default(), True), + op_record("ndtri", 1, float_dtypes, jtu.rand_uniform(0., 1.), True), + op_record("ndtr", 1, float_dtypes, jtu.rand_default(), True), ] CombosWithReplacement = itertools.combinations_with_replacement @@ -100,15 +106,15 @@ def lax_fun(array_to_reduce): {"testcase_name": jtu.format_test_name_suffix( rec.test_name, shapes, dtypes), "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, - "modes": rec.diff_modes, + "test_autodiff": rec.test_autodiff, "scipy_op": getattr(osp_special, rec.name), "lax_op": getattr(lsp_special, rec.name)} for rec in JAX_SPECIAL_FUNCTION_RECORDS for shapes in CombosWithReplacement(all_shapes, rec.nargs) for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))) - def testScipySpecialFun(self, scipy_op, lax_op, rng, shapes, dtypes, modes): + def testScipySpecialFun(self, scipy_op, lax_op, rng, shapes, dtypes, + test_autodiff): # TODO(mattjj): unskip this test combination when real() on tpu is improved - # TODO(mattjj): test autodiff if (FLAGS.jax_test_dut and FLAGS.jax_test_dut.startswith("tpu") and not shapes[0]): return absltest.unittest.skip("real() on scalar not supported on tpu") @@ -119,5 +125,9 @@ def testScipySpecialFun(self, scipy_op, lax_op, rng, shapes, dtypes, modes): check_dtypes=False) self._CompileAndCheck(lax_op, args_maker, check_dtypes=True) + if test_autodiff: + jtu.check_grads(lax_op, args, order=1, atol=1e-3, rtol=3e-3) + + if __name__ == "__main__": absltest.main() diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py --- a/tests/scipy_stats_test.py +++ b/tests/scipy_stats_test.py @@ -134,6 +134,37 @@ def args_maker(): self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + + @genNamedParametersNArgs(3, jtu.rand_default()) + def testNormLogCdf(self, rng, shapes, dtypes): + scipy_fun = osp_stats.norm.logcdf + lax_fun = lsp_stats.norm.logcdf + + def args_maker(): + x, loc, scale = map(rng, shapes, dtypes) + # clipping to ensure that scale is not too low + scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None) + return [x, loc, scale] + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + + + @genNamedParametersNArgs(3, jtu.rand_default()) + def testNormCdf(self, rng, shapes, dtypes): + scipy_fun = osp_stats.norm.cdf + lax_fun = lsp_stats.norm.cdf + + def args_maker(): + x, loc, scale = map(rng, shapes, dtypes) + # clipping to ensure that scale is not too low + scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None) + return [x, loc, scale] + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + + @genNamedParametersNArgs(3, jtu.rand_default()) def testUniformLogPdf(self, rng, shapes, dtypes): scipy_fun = osp_stats.uniform.logpdf
Support for scipy.stats.norm.logcdf Hi there, Adding support for `scipy.stats.norm.logcdf` would be very useful to me (for probit in the likelihood). A workaround would be good too. I saw there is `erf`, but I could really do with a log version for numerical reasons. Thanks!
2019-02-28T21:14:59
google/jax
477
google__jax-477
[ "122" ]
ff89a90abba97a6363dce914ae73837135726422
diff --git a/jax/ops/__init__.py b/jax/ops/__init__.py new file mode 100644 --- /dev/null +++ b/jax/ops/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from .scatter import index, index_add, index_update \ No newline at end of file diff --git a/jax/ops/scatter.py b/jax/ops/scatter.py new file mode 100644 --- /dev/null +++ b/jax/ops/scatter.py @@ -0,0 +1,246 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Helpers for indexed updates. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as onp + +from ..abstract_arrays import ShapedArray, ConcreteArray +from .. import core +from .. import lax +from ..numpy import lax_numpy as np + + +def _scatter_update(x, idx, y, scatter_op): + """Helper for indexed updates. + + Computes the value of x that would result from computing:: + x[idx] op= y + except in a pure functional way, with no in-place updating. + + Support NumPy-style basic indexing only, i.e., `idx` must be + `None`, an integer, a `slice` object, or ellipses, or a tuple of the above. + + TODO(phawkins): support advanced indexing. + """ + + x = np.asarray(x) + y = np.asarray(y) + x_shape = np.shape(x) + y_shape = np.shape(y) + y = lax.convert_element_type(y, lax._dtype(x)) + + if not isinstance(idx, tuple): + idx = (idx,) + + # Test for unsupported advanced indexing and report an error. + if any(onp.ndim(elt) != 0 for elt in idx): + raise NotImplementedError("Unimplemented case for indexed update. Advanced " + "indexing is not yet implemented.") + + # Remove ellipses and add trailing slice(None)s. + idx = np._canonicalize_tuple_index(x, idx) + + _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer) + + x_axis = 0 + y_axis = 0 # Current axis in y, before collapsing. See below. + collapsed_y_axis = 0 # Current axis in y, after collapsing. + + # Scatter dimension numbers. + update_window_dims = [] + inserted_window_dims = [] + scatter_dims_to_operand_dims = [] + + scatter_indices = np.zeros((0,), dtype=np.int32) + + # We perform three transformations to y before the scatter op, in order: + # First, y is broadcast to slice_shape. In general `y` only need broadcast to + # the right shape. + slice_shape = [] + # Next, y is reshaped to collapsed_slice_shape. This is to handle `None` + # indices, which the scatter cannot remove itself. + collapsed_slice_shape = [] + # Finally, we reverse reversed_y_dims to handle slices with negative strides. + reversed_y_dims = [] + + for i in idx: + try: + abstract_i = core.get_aval(i) + except TypeError: + abstract_i = None + if (isinstance(abstract_i, ConcreteArray) or + isinstance(abstract_i, ShapedArray)) and _int(abstract_i): + i = np.mod(i, np._constant_like(i, x.shape[x_axis])) + i = lax.convert_element_type(i, np.int32) + i = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,)) + scatter_indices = np.concatenate((scatter_indices, i), -1) + inserted_window_dims.append(x_axis) + scatter_dims_to_operand_dims.append(x_axis) + x_axis += 1 + elif i is None: + slice_shape.append(1) + y_axis += 1 + elif np._is_slice_none(i): + slice_shape.append(x_shape[x_axis]) + collapsed_slice_shape.append(x_shape[x_axis]) + update_window_dims.append(collapsed_y_axis) + collapsed_y_axis += 1 + y_axis += 1 + x_axis += 1 + elif isinstance(i, slice): + start, limit, stride, needs_rev = np._static_idx(i, x.shape[x_axis]) + if needs_rev: + reversed_y_dims.append(collapsed_y_axis) + if stride == 1: + i = lax.convert_element_type(start, np.int32) + i = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,)) + scatter_indices = np.concatenate((scatter_indices, i), -1) + slice_shape.append(limit - start) + collapsed_slice_shape.append(limit - start) + update_window_dims.append(collapsed_y_axis) + scatter_dims_to_operand_dims.append(x_axis) + else: + i = np.arange(start, limit, stride, dtype=np.int32) + size = i.shape[0] + slice_shape.append(size) + collapsed_slice_shape.append(size) + scatter_indices_shape = tuple(scatter_indices.shape[:-1]) + (size,) + i = lax.broadcast_in_dim( + i, shape=scatter_indices_shape + (1,), + broadcast_dimensions=(len(scatter_indices_shape) - 1,)) + scatter_indices = lax.broadcast_in_dim( + scatter_indices, + shape=scatter_indices_shape + (len(scatter_dims_to_operand_dims),), + broadcast_dimensions=( + tuple(range(len(scatter_indices_shape) - 1)) + + (len(scatter_indices_shape),))) + scatter_indices = np.concatenate( + (scatter_indices, i), len(scatter_indices_shape)) + scatter_dims_to_operand_dims.append(x_axis) + inserted_window_dims.append(x_axis) + + collapsed_y_axis += 1 + y_axis += 1 + x_axis += 1 + else: + raise IndexError("Unknown index type ", i) + + y = np.broadcast_to(y, tuple(slice_shape)) + y = lax.reshape(y, collapsed_slice_shape) + if reversed_y_dims: + y = lax.rev(y, reversed_y_dims) + + dnums = lax.ScatterDimensionNumbers( + update_window_dims = tuple(update_window_dims), + inserted_window_dims = tuple(inserted_window_dims), + scatter_dims_to_operand_dims = tuple(scatter_dims_to_operand_dims) + ) + return scatter_op(x, scatter_indices, y, dnums) + + +class _Indexable(object): + """Helper object for building indexes for indexed update functions. + + This is a singleton object that overrides the :code:`__getitem__` method + to return the index it is passed. + + >>> jax.ops.index[1:2, 3, None, ..., ::2] + (slice(1, 2, None), 3, None, Ellipsis, slice(None, None, 2)) + """ + __slots__ = () + + def __getitem__(self, index): + return index + +#: Index object singleton +index = _Indexable() + + +def index_add(x, idx, y): + """Pure equivalent of :code:`x[idx] += y`. + + Returns the the value of `x` that would result from the + NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`:: + x[idx] += y + + Note the `index_add` operator is pure; `x` itself is + not modified, instead the new value that `x` would have taken is returned. + + Unlike the NumPy code :code:`x[idx] += y`, if multiple indices refer to the + same location the updates will be summed. (NumPy would only apply the last + update, rather than summing the updates.) The order in which conflicting + updates are applied is implementation-defined and may be nondeterministic + (e.g., due to concurrency on some hardware platforms). + + Args: + x: an array. + idx: a Numpy-style basic index, consisting of `None`, integers, `slice` + objects, ellipses, or a tuple of the above. A convenient syntactic sugar + for forming indices is via the :data:`jax.ops.index` object. + y: the array of updates. `y` must be broadcastable to the shape of the + array that would be returned by `x[idx]`. + + Returns: + An array. + + >>> x = jax.numpy.ones((5, 6)) + >>> jax.ops.index_add(x, jax.ops.index[2:4, 3:], 6.) + array([[1., 1., 1., 1., 1., 1.], + [1., 1., 1., 1., 1., 1.], + [1., 1., 1., 7., 7., 7.], + [1., 1., 1., 7., 7., 7.], + [1., 1., 1., 1., 1., 1.]], dtype=float32) + """ + return _scatter_update(x, idx, y, lax.scatter_add) + +def index_update(x, idx, y): + """Pure equivalent of :code:`x[idx] = y`. + + Returns the the value of `x` that would result from the + NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`:: + x[idx] += y + + Note the `index_update` operator is pure; `x` itself is + not modified, instead the new value that `x` would have taken is returned. + + Unlike NumPy's :code:`x[idx] = y`, if multiple indices refer to the same + location it is undefined which update is chosen; JAX may choose the order of + updates arbitrarily and nondeterministically (e.g., due to concurrent + updates on some hardware platforms). + + Args: + x: an array. + idx: a Numpy-style basic index, consisting of `None`, integers, `slice` + objects, ellipses, or a tuple of the above. A convenient syntactic sugar + for forming indices is via the :data:`jax.ops.index` object. + y: the array of updates. `y` must be broadcastable to the shape of the + array that would be returned by `x[idx]`. + + Returns: + An array. + + >>> x = jax.numpy.ones((5, 6)) + >>> jax.ops.index_update(x, jax.ops.index[::2, 3:], 6.) + array([[1., 1., 1., 6., 6., 6.], + [1., 1., 1., 1., 1., 1.], + [1., 1., 1., 6., 6., 6.], + [1., 1., 1., 1., 1., 1.], + [1., 1., 1., 6., 6., 6.]], dtype=float32) + """ + return _scatter_update(x, idx, y, lax.scatter)
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -17,9 +17,10 @@ from __future__ import print_function import collections +import enum from functools import partial import itertools -from unittest import skip +import unittest from absl.testing import absltest from absl.testing import parameterized @@ -29,6 +30,7 @@ from jax import api from jax import lax from jax import numpy as lnp +from jax import ops from jax import test_util as jtu from jax.config import config @@ -59,102 +61,172 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): jtu.check_vjp(f, partial(api.vjp, f), args, atol, rtol, eps) +STATIC_INDEXING_TESTS = [ + ("OneIntIndex", [ + IndexSpec(shape=(3,), indexer=1), + IndexSpec(shape=(3, 3), indexer=0), + IndexSpec(shape=(3, 4, 5), indexer=2), + IndexSpec(shape=(3,), indexer=-1), + IndexSpec(shape=(3,), indexer=-2), + ]), + ("TwoIntIndices", [ + IndexSpec(shape=(3, 3), indexer=(2, 1)), + IndexSpec(shape=(3, 4, 5), indexer=(1, 2)), + IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)), + ]), + ("ThreeIntIndices", [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]), + ("OneSliceIndex", [ + IndexSpec(shape=(10,), indexer=slice(1, 3)), + IndexSpec(shape=(10,), indexer=slice(1, -1)), + IndexSpec(shape=(10,), indexer=slice(None, -1)), + IndexSpec(shape=(10,), indexer=slice(None, None, None)), + IndexSpec(shape=(10, 8), indexer=slice(1, 3)), + IndexSpec(shape=(10, 8), indexer=slice(1, None)), + IndexSpec(shape=(10, 8), indexer=slice(None, 3)), + IndexSpec(shape=(10, 8), indexer=slice(-3, None)), + ]), + ("OneSliceIndexNegativeStride", [ + IndexSpec(shape=(10,), indexer=slice(3, 1, -1)), + IndexSpec(shape=(10,), indexer=slice(1, 8, -1)), # empty result + IndexSpec(shape=(10,), indexer=slice(None, 1, -2)), + IndexSpec(shape=(10,), indexer=slice(None, None, -1)), + IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1)), + IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1)), # empty result + IndexSpec(shape=(10, 8), indexer=slice(None, None, -1)), + ]), + ("OneSliceIndexNonUnitStride", [ + IndexSpec(shape=(10,), indexer=slice(0, 8, 2)), + IndexSpec(shape=(10,), indexer=slice(0, 8, 3)), + IndexSpec(shape=(10,), indexer=slice(1, 3, 2)), + IndexSpec(shape=(10,), indexer=slice(1, None, 2)), + IndexSpec(shape=(10,), indexer=slice(None, 1, -2)), + IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3)), + IndexSpec(shape=(10, 8), indexer=slice(None, None, 2)), + IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2)), + IndexSpec(shape=(10, 8), indexer=slice(None, None, -2)), + ]), + ("TwoSliceIndices", [ + IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2))), + IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2))), + IndexSpec( + shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2))), + IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2))), + IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None))), + IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2))), + ]), + ("OneColonIndex", [ + IndexSpec(shape=(3,), indexer=slice(None)), + IndexSpec(shape=(3, 4), indexer=slice(None)), + ]), + ("MultipleColonIndices", [ + IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None))), + IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None))), + ]), + ("MixedSliceIndices", [ + IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2))), + IndexSpec(shape=(10, 4), indexer=(1, slice(None))), + ]), + ("EllipsisIndex", [ + IndexSpec(shape=(3,), indexer=Ellipsis), + IndexSpec(shape=(3, 4), indexer=Ellipsis), + IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis)), + IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3)), + ]), + ("NoneIndex", [ + IndexSpec(shape=(), indexer=None), + IndexSpec(shape=(), indexer=(None, None)), + IndexSpec(shape=(), indexer=(Ellipsis, None)), + IndexSpec(shape=(3,), indexer=None), + IndexSpec(shape=(3, 4), indexer=None), + IndexSpec(shape=(3, 4), indexer=(Ellipsis, None)), + IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis)), + IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis)), + ]), + ("EmptyIndex", [ + IndexSpec(shape=(), indexer=()), + IndexSpec(shape=(3,), indexer=()), + IndexSpec(shape=(3, 4), indexer=()), + ]), +] + +STATIC_INDEXING_GRAD_TESTS = [ + ("OneIntIndex", [ + IndexSpec(shape=(3,), indexer=1), + IndexSpec(shape=(3, 3), indexer=0), + IndexSpec(shape=(3, 4, 5), indexer=2), + IndexSpec(shape=(3,), indexer=-1), + IndexSpec(shape=(3,), indexer=-2), + ]), + ("TwoIntIndices", [ + IndexSpec(shape=(3, 3), indexer=(2, 1)), + IndexSpec(shape=(3, 4, 5), indexer=(1, 2)), + IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)), + ]), + ("ThreeIntIndices", [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]), + ("OneSliceIndex", [ + IndexSpec(shape=(5,), indexer=slice(1, 3)), + IndexSpec(shape=(5,), indexer=slice(1, -1)), + IndexSpec(shape=(5,), indexer=slice(None, -1)), + IndexSpec(shape=(5,), indexer=slice(None, None, None)), + IndexSpec(shape=(5, 4), indexer=slice(1, 3)), + IndexSpec(shape=(5, 4), indexer=slice(1, None)), + IndexSpec(shape=(5, 4), indexer=slice(None, 3)), + IndexSpec(shape=(5, 4), indexer=slice(-3, None)), + ]), + ("TwoSliceIndices", [ + IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))), + IndexSpec(shape=(5, 4), indexer=(slice(1, None), slice(None, 2))), + IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2))), + IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None))), + IndexSpec(shape=(5, 4, 3), indexer=(slice(1, None), slice(0, 2))), + ]), + ("OneColonIndex", [ + IndexSpec(shape=(3,), indexer=slice(None)), + IndexSpec(shape=(3, 4), indexer=slice(None)), + ]), + ("MultipleColonIndices", [ + IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None))), + IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None))), + ]), + ("MixedSliceIndices", [ + IndexSpec(shape=(5, 4), indexer=(slice(None), slice(0, 2))), + IndexSpec(shape=(5, 4), indexer=(1, slice(None))), + ]), + ("EllipsisIndex", [ + IndexSpec(shape=(3,), indexer=Ellipsis), + IndexSpec(shape=(3, 4), indexer=Ellipsis), + IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis)), + IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3)), + ]), + ("NoneIndex", [ + IndexSpec(shape=(), indexer=None), + IndexSpec(shape=(), indexer=(None, None)), + IndexSpec(shape=(), indexer=(Ellipsis, None)), + IndexSpec(shape=(3,), indexer=None), + IndexSpec(shape=(3, 4), indexer=None), + IndexSpec(shape=(3, 4), indexer=(Ellipsis, None)), + IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis)), + IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis)), + ]), + # TODO(mattjj): these fail for uninteresting dtype reasons + # ("EmptyIndex", + # [IndexSpec(shape=(), indexer=()), + # IndexSpec(shape=(3,), indexer=()), + # IndexSpec(shape=(3, 4), indexer=()), + # ]), +] + class IndexingTest(jtu.JaxTestCase): """Tests for Numpy indexing translation rules.""" - @parameterized.named_parameters({ + @parameterized.named_parameters(jtu.cases_from_list({ "testcase_name": "{}_inshape={}_indexer={}".format( name, jtu.format_shape_dtype_string( shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer - } for name, index_specs in [ - ("OneIntIndex", [ - IndexSpec(shape=(3,), indexer=1), - IndexSpec(shape=(3, 3), indexer=0), - IndexSpec(shape=(3, 4, 5), indexer=2), - IndexSpec(shape=(3,), indexer=-1), - IndexSpec(shape=(3,), indexer=-2), - ]), - ("TwoIntIndices", [ - IndexSpec(shape=(3, 3), indexer=(2, 1)), - IndexSpec(shape=(3, 4, 5), indexer=(1, 2)), - IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)), - ]), - ("ThreeIntIndices", [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]), - ("OneSliceIndex", [ - IndexSpec(shape=(10,), indexer=slice(1, 3)), - IndexSpec(shape=(10,), indexer=slice(1, -1)), - IndexSpec(shape=(10,), indexer=slice(None, -1)), - IndexSpec(shape=(10,), indexer=slice(None, None, None)), - IndexSpec(shape=(10, 8), indexer=slice(1, 3)), - IndexSpec(shape=(10, 8), indexer=slice(1, None)), - IndexSpec(shape=(10, 8), indexer=slice(None, 3)), - IndexSpec(shape=(10, 8), indexer=slice(-3, None)), - ]), - ("OneSliceIndexNegativeStride", [ - IndexSpec(shape=(10,), indexer=slice(3, 1, -1)), - IndexSpec(shape=(10,), indexer=slice(1, 8, -1)), # empty result - IndexSpec(shape=(10,), indexer=slice(None, 1, -2)), - IndexSpec(shape=(10,), indexer=slice(None, None, -1)), - IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1)), - IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1)), # empty result - IndexSpec(shape=(10, 8), indexer=slice(None, None, -1)), - ]), - ("OneSliceIndexNonUnitStride", [ - IndexSpec(shape=(10,), indexer=slice(0, 8, 2)), - IndexSpec(shape=(10,), indexer=slice(0, 8, 3)), - IndexSpec(shape=(10,), indexer=slice(1, 3, 2)), - IndexSpec(shape=(10,), indexer=slice(1, None, 2)), - IndexSpec(shape=(10,), indexer=slice(None, 1, -2)), - IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3)), - IndexSpec(shape=(10, 8), indexer=slice(None, None, 2)), - IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2)), - IndexSpec(shape=(10, 8), indexer=slice(None, None, -2)), - ]), - ("TwoSliceIndices", [ - IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2))), - IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2))), - IndexSpec( - shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2))), - IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2))), - IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None))), - IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2))), - ]), - ("OneColonIndex", [ - IndexSpec(shape=(3,), indexer=slice(None)), - IndexSpec(shape=(3, 4), indexer=slice(None)), - ]), - ("MultipleColonIndices", [ - IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None))), - IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None))), - ]), - ("MixedSliceIndices", [ - IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2))), - IndexSpec(shape=(10, 4), indexer=(1, slice(None))), - ]), - ("EllipsisIndex", [ - IndexSpec(shape=(3,), indexer=Ellipsis), - IndexSpec(shape=(3, 4), indexer=Ellipsis), - IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis)), - IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3)), - ]), - ("NoneIndex", [ - IndexSpec(shape=(), indexer=None), - IndexSpec(shape=(), indexer=(None, None)), - IndexSpec(shape=(), indexer=(Ellipsis, None)), - IndexSpec(shape=(3,), indexer=None), - IndexSpec(shape=(3, 4), indexer=None), - IndexSpec(shape=(3, 4), indexer=(Ellipsis, None)), - IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis)), - IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis)), - ]), - ("EmptyIndex", [ - IndexSpec(shape=(), indexer=()), - IndexSpec(shape=(3,), indexer=()), - IndexSpec(shape=(3, 4), indexer=()), - ]), - ] for shape, indexer in index_specs for dtype in all_dtypes - for rng in [jtu.rand_default()]) - @jtu.skip_on_devices("tpu") + } for name, index_specs in STATIC_INDEXING_TESTS + for shape, indexer in index_specs + for dtype in all_dtypes + for rng in [jtu.rand_default()])) def testStaticIndexing(self, shape, dtype, rng, indexer): args_maker = lambda: [rng(shape, dtype)] fun = lambda x: x[indexer] @@ -166,74 +238,10 @@ def testStaticIndexing(self, shape, dtype, rng, indexer): jtu.format_shape_dtype_string( shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer - } for name, index_specs in [ - ("OneIntIndex", [ - IndexSpec(shape=(3,), indexer=1), - IndexSpec(shape=(3, 3), indexer=0), - IndexSpec(shape=(3, 4, 5), indexer=2), - IndexSpec(shape=(3,), indexer=-1), - IndexSpec(shape=(3,), indexer=-2), - ]), - ("TwoIntIndices", [ - IndexSpec(shape=(3, 3), indexer=(2, 1)), - IndexSpec(shape=(3, 4, 5), indexer=(1, 2)), - IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)), - ]), - ("ThreeIntIndices", [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]), - ("OneSliceIndex", [ - IndexSpec(shape=(5,), indexer=slice(1, 3)), - IndexSpec(shape=(5,), indexer=slice(1, -1)), - IndexSpec(shape=(5,), indexer=slice(None, -1)), - IndexSpec(shape=(5,), indexer=slice(None, None, None)), - IndexSpec(shape=(5, 4), indexer=slice(1, 3)), - IndexSpec(shape=(5, 4), indexer=slice(1, None)), - IndexSpec(shape=(5, 4), indexer=slice(None, 3)), - IndexSpec(shape=(5, 4), indexer=slice(-3, None)), - ]), - ("TwoSliceIndices", [ - IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))), - IndexSpec(shape=(5, 4), indexer=(slice(1, None), slice(None, 2))), - IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2))), - IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None))), - IndexSpec(shape=(5, 4, 3), indexer=(slice(1, None), slice(0, 2))), - ]), - ("OneColonIndex", [ - IndexSpec(shape=(3,), indexer=slice(None)), - IndexSpec(shape=(3, 4), indexer=slice(None)), - ]), - ("MultipleColonIndices", [ - IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None))), - IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None))), - ]), - ("MixedSliceIndices", [ - IndexSpec(shape=(5, 4), indexer=(slice(None), slice(0, 2))), - IndexSpec(shape=(5, 4), indexer=(1, slice(None))), - ]), - ("EllipsisIndex", [ - IndexSpec(shape=(3,), indexer=Ellipsis), - IndexSpec(shape=(3, 4), indexer=Ellipsis), - IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis)), - IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3)), - ]), - ("NoneIndex", [ - IndexSpec(shape=(), indexer=None), - IndexSpec(shape=(), indexer=(None, None)), - IndexSpec(shape=(), indexer=(Ellipsis, None)), - IndexSpec(shape=(3,), indexer=None), - IndexSpec(shape=(3, 4), indexer=None), - IndexSpec(shape=(3, 4), indexer=(Ellipsis, None)), - IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis)), - IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis)), - ]), - # TODO(mattjj): these fail for uninteresting dtype reasons - # ("EmptyIndex", - # [IndexSpec(shape=(), indexer=()), - # IndexSpec(shape=(3,), indexer=()), - # IndexSpec(shape=(3, 4), indexer=()), - # ]), - ] for shape, indexer in index_specs for dtype in float_dtypes - for rng in [jtu.rand_default()]) - @jtu.skip_on_devices("tpu") + } for name, index_specs in STATIC_INDEXING_GRAD_TESTS + for shape, indexer in index_specs + for dtype in float_dtypes + for rng in [jtu.rand_default()]) def testStaticIndexingGrads(self, shape, dtype, rng, indexer): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None arg = rng(shape, dtype) @@ -322,7 +330,7 @@ def fun(x, unpacked_indexer): args_maker = lambda: [rng(shape, dtype), unpacked_indexer] self._CompileAndCheck(fun, args_maker, check_dtypes=True) - @skip + @unittest.skip @parameterized.named_parameters( {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), @@ -644,5 +652,91 @@ def testIssue187(self): self.assertAllClose(ans, expected, check_dtypes=False) +def _broadcastable_shapes(shape): + """Returns all shapes that broadcast to `shape`.""" + def f(rshape): + yield [] + if rshape: + for s in f(rshape[1:]): + yield rshape[0:1] + s + if rshape[0] != 1: + for s in f(rshape[1:]): + yield [1] + s + for x in f(list(reversed(shape))): + yield list(reversed(x)) + +def _update_shape(shape, indexer): + return onp.zeros(shape)[indexer].shape + + +class UpdateOps(enum.Enum): + UPDATE = 0 + ADD = 1 + +class IndexedUpdateTest(jtu.JaxTestCase): + + @parameterized.named_parameters(jtu.cases_from_list({ + "testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format( + name, jtu.format_shape_dtype_string(shape, dtype), indexer, + jtu.format_shape_dtype_string(update_shape, update_dtype), op.name), + "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer, + "update_shape": update_shape, "update_dtype": update_dtype, + "op": op + } for name, index_specs in STATIC_INDEXING_TESTS + for shape, indexer in index_specs + for op in [UpdateOps.UPDATE, UpdateOps.ADD] + for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes) + for update_shape in _broadcastable_shapes(_update_shape(shape, indexer)) + for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes) + for rng in [jtu.rand_default()])) + def testStaticIndexing(self, shape, dtype, update_shape, update_dtype, + rng, indexer, op): + if FLAGS.jax_test_dut == "cpu" and not shape: + # TODO(b/127315062): this case causes an XLA crash on CPU. Reenable when + # fixed. + raise unittest.SkipTest("Test case crashes on CPU") + args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)] + def onp_fn(x, y): + x = x.copy() + if op == UpdateOps.UPDATE: + x[indexer] = y + else: + x[indexer] += y + return x + + jax_op = ops.index_update if op == UpdateOps.UPDATE else ops.index_add + jax_fn = lambda x, y: jax_op(x, indexer, y) + self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True) + self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True) + + + @parameterized.named_parameters(jtu.cases_from_list({ + "testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format( + name, jtu.format_shape_dtype_string(shape, dtype), indexer, + jtu.format_shape_dtype_string(update_shape, update_dtype), op.name), + "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer, + "update_shape": update_shape, "update_dtype": update_dtype, + "op": op + } for name, index_specs in STATIC_INDEXING_TESTS + for shape, indexer in index_specs + for op in [UpdateOps.UPDATE, UpdateOps.ADD] + for dtype in float_dtypes + for update_shape in _broadcastable_shapes(_update_shape(shape, indexer)) + for update_dtype in ([dtype] if op == UpdateOps.ADD else float_dtypes) + for rng in [jtu.rand_default()])) + def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype, + rng, indexer, op): + if FLAGS.jax_test_dut == "cpu" and not shape: + # TODO(b/127315062): this case causes an XLA crash on CPU. Reenable when + # fixed. + raise unittest.SkipTest("Test case crashes on CPU") + + jax_op = ops.index_update if op == UpdateOps.UPDATE else ops.index_add + jax_fn = lambda x, y: jax_op(x, indexer, y) + x = rng(shape, dtype) + y = rng(update_shape, update_dtype) + check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.) + + if __name__ == "__main__": absltest.main()
add convenience wrapper for array updating with fancy index expressions We should provide an `update` function that lets users write ```python A = lax.update(A, (slice(1, None, 2), Ellipsis, slice(None, -3)), value) ``` which would behave similarly to this kind of expression in regular NumPy ```python A[1::2, ..., :-3] = value ``` except without the in-place mutation that would update any aliases. This wrapper would be similar to the `lax_numpy._rewriting_take` function we already have for handling complex indexing expressions, and would ultimately call into primitives like `lax.dynamic_update_slice` and `lax.index_untake`. Better name suggestions welcome! This is a separate issue from whether to provide more convenient syntax for this kind of operation.
This is a really important operation for making it simple to express certain models. When using TF I have often had to spend a lot of time figuring out exactly how to use scatter_nd or whatever so making a clear correspondence with numpy syntax is useful.
2019-03-04T20:14:12
google/jax
484
google__jax-484
[ "366" ]
c05d6e8796a62cb610cb49a3a89c55eab277fcfc
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -169,7 +169,7 @@ def computation_maker(*args, **kwargs): return computation_maker -def grad(fun, argnums=0): +def grad(fun, argnums=0, has_aux=False): """Creates a function which evaluates the gradient of `fun`. Args: @@ -179,13 +179,17 @@ def grad(fun, argnums=0): arrays with shape `(1,)` etc.) argnums: Optional, integer or tuple of integers. Specifies which positional argument(s) to differentiate with respect to (default 0). + has_aux: Optional, bool. Indicates whether `fun` returns a pair where the + first element is considered the output of the mathematical function to be + differentiated and the second element is auxiliary data. Default False. Returns: A function with the same arguments as `fun`, that evaluates the gradient of `fun`. If `argnums` is an integer then the gradient has the same shape and type as the positional argument indicated by that integer. If argnums is a tuple of integers, the gradient is a tuple of values with the same shapes - and types as the corresponding arguments. + and types as the corresponding arguments. If `has_aux` is True then a pair + of (gradient, auxiliary_data) is returned. For example: @@ -194,7 +198,7 @@ def grad(fun, argnums=0): array(0.961043, dtype=float32) """ - value_and_grad_f = value_and_grad(fun, argnums) + value_and_grad_f = value_and_grad(fun, argnums, has_aux=has_aux) docstr = ("Gradient of {fun} with respect to positional argument(s) " "{argnums}. Takes the same arguments as {fun} but returns the " @@ -203,12 +207,16 @@ def grad(fun, argnums=0): @wraps(fun, docstr=docstr, argnums=argnums) def grad_f(*args, **kwargs): - ans, g = value_and_grad_f(*args, **kwargs) - return g + if not has_aux: + _, g = value_and_grad_f(*args, **kwargs) + return g + else: + (_, aux), g = value_and_grad_f(*args, **kwargs) + return g, aux return grad_f -def value_and_grad(fun, argnums=0): +def value_and_grad(fun, argnums=0, has_aux=False): """Creates a function which evaluates both `fun` and the gradient of `fun`. Args: @@ -218,6 +226,9 @@ def value_and_grad(fun, argnums=0): arrays with shape `(1,)` etc.) argnums: Optional, integer or tuple of integers. Specifies which positional argument(s) to differentiate with respect to (default 0). + has_aux: Optional, bool. Indicates whether `fun` returns a pair where the + first element is considered the output of the mathematical function to be + differentiated and the second element is auxiliary data. Default False. Returns: A function with the same arguments as `fun` that evaluates both `fun` and @@ -238,11 +249,17 @@ def value_and_grad(fun, argnums=0): def value_and_grad_f(*args, **kwargs): f = lu.wrap_init(fun, kwargs) f_partial, dyn_args = _argnums_partial(f, argnums, args) - ans, vjp_py = vjp(f_partial, *dyn_args) + if not has_aux: + ans, vjp_py = vjp(f_partial, *dyn_args) + else: + ans, vjp_py, aux = vjp(f_partial, *dyn_args, has_aux=True) _check_scalar(ans) g = vjp_py(onp.ones((), onp.result_type(ans))) g = g[0] if isinstance(argnums, int) else g - return (ans, g) + if not has_aux: + return ans, g + else: + return (ans, aux), g return value_and_grad_f @@ -529,7 +546,7 @@ def fun(*args): return apply_jaxtree_fun(fun, io_tree, *py_args) -def vjp(fun, *primals): +def vjp(fun, *primals, **kwargs): """Compute a (reverse-mode) vector-Jacobian product of `fun`. `grad` is implemented as a special case of `vjp`. @@ -542,6 +559,9 @@ def vjp(fun, *primals): should be evaluated. The length of `primals` should be equal to the number of positional parameters to `fun`. Each primal value should be a tuple of arrays, scalar, or standard Python containers thereof. + has_aux: Optional, bool. Indicates whether `fun` returns a pair where the + first element is considered the output of the mathematical function to be + differentiated and the second element is auxiliary data. Default False. Returns: A `(primals_out, vjpfun)` pair, where `primals_out` is `fun(*primals)`. @@ -556,20 +576,30 @@ def vjp(fun, *primals): >>> g((-0.7, 0.3)) (array(-0.61430776, dtype=float32), array(-0.2524413, dtype=float32)) """ + has_aux = kwargs.pop('has_aux', False) + assert not kwargs if not isinstance(fun, lu.WrappedFun): fun = lu.wrap_init(fun) primals_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, primals)) _check_args(primals_flat) jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) - out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat) + if not has_aux: + out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat) + else: + out_primal, out_vjp, aux = ad.vjp(jaxtree_fun, primals_flat, has_aux=True) out_tree = out_tree() + if has_aux: + out_tree, aux_tree = out_tree.children out_primal_py = build_tree(out_tree, out_primal) ct_in_trees = [out_tree] ct_out_tree = PyTreeDef(node_types[tuple], None, in_trees) def out_vjp_packed(cotangent_in): return out_vjp(cotangent_in) vjp_py = partial(apply_jaxtree_fun, out_vjp_packed, (ct_in_trees, ct_out_tree)) - return out_primal_py, vjp_py + if not has_aux: + return out_primal_py, vjp_py + else: + return out_primal_py, vjp_py, build_tree(aux_tree, aux) def trace_to_jaxpr(traceable, py_pvals, **kwargs): diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -34,8 +34,12 @@ def identity(x): return x -def jvp(fun): - return jvpfun(jvp_subtrace(fun)) +def jvp(fun, has_aux=False): + if not has_aux: + return jvpfun(jvp_subtrace(fun)) + else: + fun, aux = jvp_subtrace_aux(fun) + return jvpfun(fun), aux @transformation def jvpfun(primals, tangents): @@ -56,13 +60,31 @@ def jvp_subtrace(master, primals, tangents): out_primal, out_tangent = out_tracer.primal, out_tracer.tangent yield (out_primal, out_tangent) +@transformation_with_aux +def jvp_subtrace_aux(master, primals, tangents): + trace = JVPTrace(master, core.cur_sublevel()) + for x in list(primals) + list(tangents): + if isinstance(x, Tracer): + assert x.trace.level < trace.level + ans, aux = yield map(partial(JVPTracer, trace), primals, tangents) + out_tracer, aux_tracer = map(trace.full_raise, (ans, aux)) + out_primal, out_tangent = out_tracer.primal, out_tracer.tangent + aux = aux_tracer.primal # ignore aux tangent + yield (out_primal, out_tangent), aux + + @transformation def pack_output(*args): ans = yield args yield pack(ans) -def linearize(traceable, *primals): - jvpfun = pack_output(jvp(traceable)) +def linearize(traceable, *primals, **kwargs): + has_aux = kwargs.pop('has_aux', False) + if not has_aux: + jvpfun = pack_output(jvp(traceable)) + else: + jvpfun, aux = jvp(traceable, has_aux=True) + jvpfun = pack_output(jvpfun) tangent_avals = [get_aval(p).at_least_vspace() for p in primals] in_pvals = (pe.PartialVal((None, pack(primals))), pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit))) @@ -70,10 +92,16 @@ def linearize(traceable, *primals): pval_primal, pval_tangent = unpair_pval(out_pval) aval_primal, const_primal = pval_primal assert aval_primal is None - return const_primal, pval_tangent, jaxpr, consts + if not has_aux: + return const_primal, pval_tangent, jaxpr, consts + else: + return const_primal, pval_tangent, jaxpr, consts, aux() -def vjp(traceable, primals): - out_primal, pval, jaxpr, consts = linearize(traceable, *primals) +def vjp(traceable, primals, has_aux=False): + if not has_aux: + out_primal, pval, jaxpr, consts = linearize(traceable, *primals) + else: + out_primal, pval, jaxpr, consts, aux = linearize(traceable, *primals, has_aux=True) def vjp_(ct): ct = ignore_consts(ct, pval) dummy_primal_and_ct = pack((core.unit, ct)) @@ -81,7 +109,10 @@ def vjp_(ct): _, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primal_and_ct) return instantiate_zeros(pack(primals), arg_cts[1]) - return out_primal, vjp_ + if not has_aux: + return out_primal, vjp_ + else: + return out_primal, vjp_, aux def ignore_consts(ct, pval): aval, const = pval
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -341,6 +341,41 @@ def test_large_device_constant(self): ans = jit(lambda x: 2 * x)(np.ones(int(2e6))) # doesn't crash self.assertAllClose(ans, 2., check_dtypes=False) + def test_grad_and_aux_basic(self): + g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.) + self.assertEqual(g, grad(lambda x: x**3)(3.)) + self.assertEqual(aux, [9.]) + + def test_grad_and_aux_nested(self): + def f(x): + g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x) + return aux[0] + + f2 = lambda x: x**3 + + self.assertEqual(grad(f)(4.), grad(f2)(4.)) + self.assertEqual(jit(grad(f))(4.), grad(f2)(4.)) + self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.)) + + def f(x): + g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x) + return aux[0] * np.sin(x) + + f2 = lambda x: x**3 * np.sin(x) + + self.assertEqual(grad(f)(4.), grad(f2)(4.)) + self.assertEqual(jit(grad(f))(4.), grad(f2)(4.)) + self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.)) + + def test_grad_and_aux_constant(self): + g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.) + self.assertEqual(g, grad(lambda x: x**3)(4.)) + self.assertEqual(aux, [4.]) + + g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.) + self.assertEqual(g, grad(lambda x: x**3)(4.)) + self.assertEqual(aux, [4.**2, 4.]) + if __name__ == '__main__': absltest.main()
add a way to return side-information from grad For example, a user may want to return a statistic or auxiliary data structure from the Python function to which `grad` is applied, not to be considered as part of the value of the mathematical function to be differentiated.
Following up from JAXers conversation: it's possible to do this by manually invoking `vjp` with more than one seed: ```python import jax def loss(x): return (x + x, x) def mygrad(x): (l, extra), pullback = jax.vjp(loss, x) dx, = pullback((1.0, 0.0)) return dx, extra ``` But as Dougal pointed out, even calling `jax.ad.vjp` with `jax.ad_util.zero` (rather than `0.0`) [doesn't avoid materializing](https://github.com/google/jax/blob/63608fb9715b33cfac64dc26548fcd7da35e97d3/jax/interpreters/ad.py#L79) the second seed (and performing unnecessary backward-pass computations). So it would be nice to have an API that either accepts real symbolic zeros or allows selecting a single scalar output to receive a seed. @jekbradbury as you probably know, that's [the approach taken in Autograd](https://github.com/HIPS/autograd/blob/master/autograd/differential_operators.py#L142..L147). Note that the extra flop cost will, I think, not be too bad for most use cases, because often `extra` is something that's computed during computation of `l` anyway (or doesn't take much to compute from some intermediate value during the computation of `l`). This is the case in the trivial example you give. That means that the zeros will only be back-propagated through a small piece of graph before being merged into the gradient computation that's going on anyway. Does that make sense? __EDIT:__ here's a couple of pictures to explain. Firstly the function itself: ``` inputs ---------+---------> outputs | | ↓ aux ``` then the backpropagation looks like this ``` inputs <--------+---------- outputs ↑ | < - zeros will only be propagated through this bit, | < - which is usually small aux ``` I still think there will probably be some cases where avoiding any extra FLOs would be beneficial.
2019-03-07T22:13:50
google/jax
485
google__jax-485
[ "441" ]
c05d6e8796a62cb610cb49a3a89c55eab277fcfc
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -35,28 +35,27 @@ map = safe_map -def batch(fun, in_vals, in_dims, out_dim_target): +def batch(fun, in_vals, in_dims, out_dim_dst): sizes = reduce(set.union, map(dimsize, in_dims, in_vals)) if not sizes: return fun.call_wrapped(*in_vals), None # no mapped dimensions elif len(sizes) == 1: - out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims) - return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val) + sz = sizes.pop() + return batch_transform(fun, sz, in_dims, out_dim_dst).call_wrapped(in_vals) else: raise TypeError("got inconsistent map dimension sizes: {}".format(sizes)) -# TODO(mattjj,dougalm): could call batch_subtrace here (a bit redundant) @transformation -def batch_transform(vals, dims): +def batch_transform(size, in_dims, out_dim_dst, vals): with new_master(BatchTrace) as master: trace = BatchTrace(master, core.cur_sublevel()) - in_tracers = map(partial(BatchTracer, trace), vals, dims) + in_tracers = map(partial(BatchTracer, trace), vals, in_dims) out_tracer = yield in_tracers out_tracer = trace.full_raise(out_tracer) out_val, out_dim = out_tracer.val, out_tracer.batch_dim del master - yield (out_val, out_dim) + yield moveaxis(size, out_dim_dst, out_dim, out_val) @transformation_with_aux @@ -308,13 +307,21 @@ def move_dim_to_front(x, dim): def dimsize(dim, x): aval = get_aval(x) if type(aval) is AbstractTuple: - return reduce(set.union, map(partial(dimsize, dim), x)) - elif type(dim) is int: - return {x.shape[dim]} - elif dim is None: - return set() + if type(dim) is tuple: + return reduce(set.union, map(dimsize, dim, x)) + elif type(dim) is int: + return reduce(set.union, map(partial(dimsize, dim), x)) + elif dim is None: + return set() + else: + raise TypeError(type(dim)) else: - raise TypeError(type(dim)) + if type(dim) is int: + return {x.shape[dim]} + elif dim is None: + return set() + else: + raise TypeError(type(dim)) def moveaxis(sz, dst, src, x): aval = get_aval(x) diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -3665,10 +3665,56 @@ def _while_loop_translation_rule(c, init_val, cond_consts, body_consts, full_ans = c.While(cond_computation, body_computation, loop_carry) return c.GetTupleElement(full_ans, 0) +def _while_loop_batching_rule(batched_args, batch_dims, aval_out, cond_jaxpr, + body_jaxpr): + # See https://github.com/google/jax/issues/441 for a discussion. + # To batch a while_loop, we need to do some masking, since the elements of the + # batch may run for different numbers of iterations. We perform that masking + # using lax.select, and keep the loop running so long as any of the batch + # elements need by effectively using an np.any(...) in the cond_fun. + # The basic strategy here is to lift `cond_jaxpr` and `body_jaxpr` back into + # traceable Python functions using `core.eval_jaxpr`. Then we can batch them + # using `batching.batch_transform` (the transform underlying `api.vmap`). This + # code also avoids broadcasting `cond_consts` and `body_consts`. + init_val, cond_consts, body_consts = batched_args + init_val_bd, cond_consts_bd, body_consts_bd = batch_dims + + sizes = _reduce(set.union, map(batching.dimsize, batch_dims, batched_args)) + size = sizes.pop() + assert not sizes + + if init_val_bd is None: + # TODO(mattjj): if cond_consts_bd is also None, we could keep cond_fun + # unbatched and avoid the masking logic, but we ignore that optimiztaion + init_val = batching.bdim_at_front(init_val, init_val_bd, size, + force_broadcast=True) + init_val_bd = 0 + + def batched_cond_fun(batched_loop_carry): + @lu.wrap_init + def lifted(loop_carry, cond_consts): + return core.eval_jaxpr(cond_jaxpr, cond_consts, (), loop_carry) + f = batching.batch_transform(lifted, size, (init_val_bd, cond_consts_bd), 0) + preds = f.call_wrapped((batched_loop_carry, cond_consts)) + return reduce(preds, onp.array(False), bitwise_or, [0]) + + def batched_body_fun(batched_loop_carry): + @lu.wrap_init + def lifted(loop_carry, cond_consts, body_consts): + pred = core.eval_jaxpr(cond_jaxpr, cond_consts, (), loop_carry) + new_loop_carry = core.eval_jaxpr(body_jaxpr, body_consts, (), loop_carry) + return select(pred, new_loop_carry, loop_carry) + f = batching.batch_transform( + lifted, size, (init_val_bd, cond_consts_bd, body_consts_bd), init_val_bd) + return f.call_wrapped((batched_loop_carry, cond_consts, body_consts)) + + return while_loop(batched_cond_fun, batched_body_fun, init_val), init_val_bd + while_p = Primitive('while') while_p.def_impl(partial(xla.apply_primitive, while_p)) while_p.def_abstract_eval(_while_loop_abstract_eval) xla.translations[while_p] = _while_loop_translation_rule +batching.primitive_batchers[while_p] = _while_loop_batching_rule def _unpack_eqn(invar, outvars): return core.JaxprEqn([invar], outvars, core.identity_p, (), True, {})
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -833,6 +833,35 @@ def f(R): H = hessian(f)(R) # don't crash on UnshapedArray + def testWhileLoop(self): + def fun(x): + return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x) + + ans = vmap(fun)(onp.array([0, 1, 2, 3])) + expected = onp.array([4, 3, 4, 3]) + self.assertAllClose(ans, expected, check_dtypes=False) + + fun = jit(fun) + ans = vmap(fun)(onp.array([0, 1, 2, 3])) + expected = onp.array([4, 3, 4, 3]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testWhileLoopCondConstsBatched(self): + def fun(x, y): + return lax.while_loop(lambda x: x < y, lambda x: x + 2, x) + + ans = vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3])) + expected = onp.array([2, 4]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testWhileLoopBodyConstsBatched(self): + def fun(x, y): + return lax.while_loop(lambda x: x < 3, lambda x: x + y, x) + + ans = vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3])) + expected = onp.array([4, 3]) + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == '__main__': absltest.main()
[FR] support batching rule for `while` Currently, `vmap` seems do not support a function which involves `lax.while_loop`. For example, the following script will throw `NotImplementedError: Batching rule for 'while' not implemented`: ``` import jax.numpy as np from jax import jit, lax, vmap @jit def f(x): return lax.while_loop(lambda x: x <= 0, lambda x: x + 1, x) g = vmap(f) y = g(np.arange(3.)) ``` In case it is complicated to support `while_loop` for `vmap`, then could someone let me know other alternatives? I know some other alternatives from *gufuncs* tutorial such as using `onp.vectorize` or python for loop but they are slow. To make `g` fast, can I write `f` in C/Cython code and use vmap with it? Or should I write the whole `g` in C/Cython code? I have little experience with C/Cython but will try to learn if that is the only option. I just want to use `g` as a primitive function and will define `jvp_rule` for it separately. Thank you for any help in advance!
Thanks for requesting this! No need for C/Cython; we can add a batching rule for `lax.while_loop`. In general there are some rules we haven't gotten to implementing yet, and we tend to wait for users to ask for them as a natural way to prioritize our work. Thank you @mattjj ! That's would be a nice feature. I have been playing with JAX for a few days and really love it. Is it correct to assume that a batched `while_loop` would still require a `cond_fun` with scalar boolean output or do you intend to support a batched `cond_fun` that outputs a boolean for each sample in the batch for which the loop should be continued? That’s a great question! I was thinking the latter, but I haven’t thought through it all the way. If we wanted the whole loop to be vectorized then we should turn the condition into an `np.all` check along the batch dimension and use a mask to keep updating only those loops that are still running. Thoughts? That would be great! This is one of the reasons that I wasn't able to disentangle vectorization and masking in Matchbox; it might be reasonable to allow vmap of while (where the condition contains a vmapped dimension) to invoke the masking interpreter when that's ready. @jekbradbury Hmm perhaps, but in this case couldn't the masking stay at the level of the while loop primitive, rather than being pushed down into the body function (like a JAX interpreter would do)?
2019-03-08T01:21:09
google/jax
494
google__jax-494
[ "492" ]
fab4dde12e7f992fe347b14606248b1de6d06a2b
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1114,6 +1114,25 @@ def broadcasted_eye(dtype, shape, axes): def stop_gradient(x): + """Stops gradient computation. + + Operationally `stop_gradient` is the identity function, that is, it returns + argument `x` unchanged. However, `stop_gradient` prevents the flow of + gradients during forward or reverse-mode automatic differentiation. If there + are multiple nested gradient computations, `stop_gradient` stops gradients + for all of them. + + For example: + + >>> jax.grad(lambda x: x**2)(3.) + array(6., dtype=float32) + >>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.) + array(0., dtype=float32) + >>> jax.grad(jax.grad(lambda x: x**2))(3.) + array(2., dtype=float32) + >>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.) + array(0., dtype=float32) + """ return stop_gradient_p.bind(x)
Document `lax.stop_gradient` Is there a function similar to tensorflow's [tf.stop_gradient](https://www.tensorflow.org/api_docs/python/tf/stop_gradient)? How can I do this in jax??
It's not documented, but I think you're looking for `jax.lax.stop_gradient`. We should document it!
2019-03-10T22:09:10
google/jax
495
google__jax-495
[ "489" ]
1b408e3e28f516ff49787eb028c0c2261720154e
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -285,24 +285,11 @@ def zeros_like_batched(batched_args, batch_dims): def bdim_at_front(x, bdim, broadcast_size=1, force_broadcast=False): - if bdim is None: - return broadcast(x, broadcast_size, force_broadcast=force_broadcast) - else: - return move_dim_to_front(x, bdim) + return moveaxis(broadcast_size, 0, bdim, x, force_broadcast=force_broadcast) def move_dim_to_front(x, dim): - aval = get_aval(x) - if type(aval) is AbstractTuple: - return pack(map(partial(move_dim_to_front, dim=dim), x)) - elif isinstance(aval, ShapedArray): - assert 0 <= dim < onp.ndim(x) - if dim == 0: - return x - else: - perm = (dim,) + tuple(range(dim)) + tuple(range(dim + 1, onp.ndim(x))) - return x.transpose(perm) - else: - raise TypeError(type(x)) + assert dim is not None + return moveaxis(None, 0, dim, x) def dimsize(dim, x): aval = get_aval(x) @@ -323,7 +310,7 @@ def dimsize(dim, x): else: raise TypeError(type(dim)) -def moveaxis(sz, dst, src, x): +def moveaxis(sz, dst, src, x, force_broadcast=True): aval = get_aval(x) if type(aval) is AbstractTuple: if type(src) is tuple and type(dst) is tuple: @@ -341,7 +328,7 @@ def moveaxis(sz, dst, src, x): return x else: if src is None: - x = broadcast(x, sz, force_broadcast=True) + x = broadcast(x, sz, force_broadcast=force_broadcast) src = 0 dst_ = dst % (aval.ndim + 1) if src == dst_: diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -3683,12 +3683,11 @@ def _while_loop_batching_rule(batched_args, batch_dims, aval_out, cond_jaxpr, size = sizes.pop() assert not sizes - if init_val_bd is None: - # TODO(mattjj): if cond_consts_bd is also None, we could keep cond_fun - # unbatched and avoid the masking logic, but we ignore that optimiztaion - init_val = batching.bdim_at_front(init_val, init_val_bd, size, - force_broadcast=True) - init_val_bd = 0 + # TODO(mattjj): if cond_consts_bd is also None, we could keep cond_fun + # unbatched and avoid the masking logic, but we ignore that optimiztaion + init_val = batching.bdim_at_front(init_val, init_val_bd, size, + force_broadcast=True) + init_val_bd = 0 def batched_cond_fun(batched_loop_carry): @lu.wrap_init
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -913,6 +913,19 @@ def fun(x): expected = (onp.array([10, 11]), onp.array([20, 20])) self.assertAllClose(ans, expected, check_dtypes=False) + def testIssue489(self): + def f(key): + def body_fn(uk): + key = uk[1] + u = random.uniform(key, (), dtype=np.float64) + key, _ = random.split(key) + return u, key + + u, _ = lax.while_loop(lambda uk: uk[0] > 0.5, body_fn, (1., key)) + return u + + print(vmap(f)(random.split(random.PRNGKey(0), 2))) # no crash + if __name__ == '__main__': absltest.main()
while_loop vmap error This one is reported by @fehiepsi in [another thread](https://github.com/google/jax/pull/486#issuecomment-471140362). Quoting the repro from there: ``` # generate a random number in the interval [0, 0.5] def f(key): def body_fn(uk): key = uk[1] u = random.uniform(key, ()) key, _ = random.split(key) return u, key u, _ = lax.while_loop(lambda uk: uk[0] > 0.5, body_fn, (1., key)) #u = random.uniform(key, ()) # this is fine return u print(f(random.PRNGKey(0))) # no error print(vmap(f)(random.split(random.PRNGKey(0), 2))) # TypeError: 'NoneType' object cannot be interpreted as an integer ```
Thanks @mattjj !
2019-03-11T03:58:17
google/jax
509
google__jax-509
[ "461" ]
5592a063375a8feb4439e35edd8c335264dc1e6c
diff --git a/jax/experimental/stax.py b/jax/experimental/stax.py --- a/jax/experimental/stax.py +++ b/jax/experimental/stax.py @@ -130,12 +130,16 @@ def BatchNorm(axis=(0, 1, 2), epsilon=1e-5, center=True, scale=True, _gamma_init = lambda shape: gamma_init(shape) if scale else () axis = (axis,) if np.isscalar(axis) else axis def init_fun(input_shape): - shape = (1 if i in axis else d for i, d in enumerate(input_shape)) - shape = tuple(itertools.dropwhile(lambda x: x == 1, shape)) + shape = tuple(d for i, d in enumerate(input_shape) if i not in axis) beta, gamma = _beta_init(shape), _gamma_init(shape) return input_shape, (beta, gamma) def apply_fun(params, x, **kwargs): beta, gamma = params + # TODO(phawkins): np.expand_dims should accept an axis tuple. + # (https://github.com/numpy/numpy/issues/12290) + ed = tuple(None if i in axis else slice(None) for i in range(np.ndim(x))) + beta = beta[ed] + gamma = gamma[ed] mean, var = np.mean(x, axis, keepdims=True), fastvar(x, axis, keepdims=True) z = (x - mean) / np.sqrt(var + epsilon) if center and scale: return gamma * z + beta
diff --git a/tests/stax_test.py b/tests/stax_test.py --- a/tests/stax_test.py +++ b/tests/stax_test.py @@ -163,7 +163,7 @@ def testFanInConcat(self, input_shapes, axis): init_fun, apply_fun = stax.FanInConcat(axis) _CheckShapeAgreement(self, init_fun, apply_fun, input_shapes) - def testIsuse182(self): + def testIssue182(self): init_fun, apply_fun = stax.Softmax input_shape = (10, 3) inputs = onp.arange(30.).astype("float32").reshape(input_shape) @@ -175,5 +175,34 @@ def testIsuse182(self): assert onp.allclose(onp.sum(onp.asarray(out), -1), 1.) + def testBatchNormShapeNHWC(self): + init_fun, apply_fun = stax.BatchNorm(axis=(0, 1, 2)) + input_shape = (4, 5, 6, 7) + inputs = random_inputs(onp.random.RandomState(0), input_shape) + + out_shape, params = init_fun(input_shape) + out = apply_fun(params, inputs) + + self.assertEqual(out_shape, input_shape) + beta, gamma = params + self.assertEqual(beta.shape, (7,)) + self.assertEqual(gamma.shape, (7,)) + self.assertEqual(out_shape, out.shape) + + def testBatchNormShapeNCHW(self): + # Regression test for https://github.com/google/jax/issues/461 + init_fun, apply_fun = stax.BatchNorm(axis=(0, 2, 3)) + input_shape = (4, 5, 6, 7) + inputs = random_inputs(onp.random.RandomState(0), input_shape) + + out_shape, params = init_fun(input_shape) + out = apply_fun(params, inputs) + + self.assertEqual(out_shape, input_shape) + beta, gamma = params + self.assertEqual(beta.shape, (5,)) + self.assertEqual(gamma.shape, (5,)) + self.assertEqual(out_shape, out.shape) + if __name__ == "__main__": absltest.main()
BatchNorm breaks for non-default axis values The current BatchNorm implementation breaks if the `axis` argument takes values where the missing axis is not the last one, e.g. `axis=(0, 2, 3)`, because the axes are dropped for `gamma` and `beta` and not restored later. I guess this can be fixed by removing this line: https://github.com/google/jax/blob/7e93bff2b95e6f49be7e2cd310e8953bbbdcdc9d/jax/experimental/stax.py#L134 (and adding `tuple` in the line before)
opened issue based on discussions in #139
2019-03-14T13:34:50
google/jax
512
google__jax-512
[ "511" ]
5592a063375a8feb4439e35edd8c335264dc1e6c
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -2356,20 +2356,16 @@ def _pad_shape_rule(operand, padding_value, padding_config): def _pad_transpose(t, operand, padding_value, padding_config): lo, hi, interior = zip(*padding_config) - if onp.any(onp.less(lo, 0)) or onp.any(onp.less(hi, 0)): - msg = "pad transpose not implemented for negative padding, got {}." - raise NotImplementedError(msg.format(padding_config)) total = lambda x: _reduce_sum(x, list(range(t.ndim))) - t_op = lambda: slice(t, lo, onp.subtract(t.shape, hi), onp.add(interior, 1)) - t_operand = t_op() if operand is None else None + def t_op(): + unpad_config = zip(onp.negative(lo), onp.negative(hi), onp.zeros_like(interior)) + unpadded = pad(t, onp.array(0., t.dtype), unpad_config) + return slice(unpadded, onp.zeros_like(lo), unpadded.shape, onp.add(interior, 1)) - if padding_value is None: - t_operand = t_op() if t_operand is None else t_operand - t_padv = sub(total(t), total(t_operand)) - else: - t_padv = None + t_operand = t_op() if operand is None else None + t_padv = sub(total(t), total(t_operand)) if padding_value is None else None return [t_operand, t_padv]
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2007,7 +2007,7 @@ def testReshapeGrad(self, arg_shape, out_shape, dtype, rng): "shape": shape, "dtype": dtype, "pads": pads, "rng": jtu.rand_small()} for shape in [(2, 3)] for dtype in float_dtypes - for pads in [[(1, 2, 1), (0, 1, 0)]])) + for pads in [[(1, 2, 1), (0, 1, 0)], [(-1, 0, 0), (-1, 0, 2)]])) def testPadGrad(self, shape, dtype, pads, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None
Negative padding in pad_transpose This is a request to support negative padding in the gradient of `lax.pad`, see: https://github.com/google/jax/blob/5592a063375a8feb4439e35edd8c335264dc1e6c/jax/lax.py#L2360
2019-03-14T16:36:27
google/jax
516
google__jax-516
[ "514" ]
70f127b200386bae329a1e51c68d79c934e47615
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -296,7 +296,7 @@ def join_pvals(pval1, pval2): pvals1, pvals2 = zip(pv1, const1), zip(pv2, const2) join_pvs, join_consts = unzip2(map(join_pvals, pvals1, pvals2)) if all(isinstance(pv, AbstractValue) for pv in join_pvs): - return PartialVal(AbstractTuple(join_pvs), tuple(join_consts)) + return PartialVal((AbstractTuple(join_pvs), tuple(join_consts))) else: return PartialVal((JaxprTracerTuple(join_pvs), tuple(join_consts)))
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1421,6 +1421,12 @@ def cfun(x): self.assertEqual(fun(4), cfun(4)) self.assertEqual(cfun(4), (4, 2., 4.)) + def testIssue514(self): + # just check this doesn't crash + lax.cond(True, + (0, 0), lambda x: (x[0], 0), + (1, 1), lambda x: x) + def testScanAdd(self): def f(x, y): return x + y
[bug] using `lax.cond` with partial effect function gives TypeError In the master branch, the following script shows the error ```sh TypeError: could not merge true_fun and false_fun output pvals: ((ShapedArray(int32[]), None), JaxTuple(*,0)) and ((ShapedArray(int32[]),ShapedArray(int32[])), JaxTuple(*,*)). ``` ``` lax.cond(True, (0, 0), lambda x: (x[0], 0), (1, 1), lambda x: x) ``` While these versions works well: ``` lax.cond(True, (0, 0), lambda x: (0, 0), (1, 1), lambda x: x) ``` or ``` lax.cond(True, (0, 0), lambda x: (x[0], x[1]), (1, 1), lambda x: x) ```
2019-03-19T15:33:31
google/jax
527
google__jax-527
[ "526", "526" ]
cefbea6a4220d0d1c074f1ed70b12808827fac3f
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -523,10 +523,62 @@ def trim_arg(primal, tangent): out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat) return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent)) -def linearize(traceable, *primals): - fun = lu.wrap_init(traceable) +def linearize(fun, *primals): + """Produce a linear approximation to `fun` using `jvp` and partial evaluation. + + Args: + fun: Function to be differentiated. Its arguments should be arrays, scalars, + or standard Python containers of arrays or scalars. It should return an + array, scalar, or standard python container of arrays or scalars. + primals: The primal values at which the Jacobian of `fun` should be + evaluated. Should be a tuple of arrays, scalar, or standard Python + container thereof. The length of the tuple is equal to the number of + positional parameters of `fun`. + + Returns: + A pair where the first element is the value of `f(*primals)` and the second + element is a function that evaluates the (forward-mode) Jacobian-vector + product of `fun` evaluated at `primals` without re-doing the linearization + work. + + In terms of values computed, `linearize` behaves much like a curried `jvp`:: + y, out_tangent = jax.jvp(f, (x,), (in_tangent,)) + + y, f_jvp = jax.linearize(f, x) + out_tangent = f_jvp(in_tangent) + + However, the difference is that `linearize` uses partial evaluation so that + the function `f` is not re-linearized on calls to `f_jvp`. In general that + means the memory usage scales with the size of the computation, much like in + reverse-mode. (Indeed, `linearize` has a similar signature to `vjp`!) + + This function is mainly useful if you want to apply `f_jvp` multiple times, + i.e. to evaluate a pushforward for many different input tangent vectors at the + same linearization point. Moreover if all the input tangent vectors are known + at once, it can be more efficient to vectorize using `vmap`, as in:: + pushfwd = partial(jvp, f, (x,)) + y, out_tangents = vmap(pushfwd, out_axes=(None, 0))((in_tangents,)) + By using `vmap` and `jvp` together like this we avoid the stored-linearization + memory cost that scales with the depth of the computation, which is incurred + by both `linearize` and `vjp`. + + Here's a more complete example of using `linearize`: + + >>> def f(x): return 3. * np.sin(x) + np.cos(x / 2.) + ... + >>> jax.jvp(f, (2.,), (3.,)) + (array(3.2681944, dtype=float32), array(-5.007528, dtype=float32)) + >>> y, f_jvp = jax.linearize(f, 2.) + >>> y + array(3.2681944, dtype=float32) + >>> f_jvp(3.) + array(-5.007528, dtype=float32) + >>> f_jvp(4.) + array(-5.007528, dtype=float32) + """ + f = lu.wrap_init(fun) primals_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, primals)) - jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) + jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees) out_primal, out_pval, jaxpr, consts = ad.linearize(jaxtree_fun, *primals_flat) out_tree = out_tree() out_primal_py = build_tree(out_tree, out_primal)
Deferred pushforward evaluation in jvp Hi. I'm sorry if this question is due to a basic misunderstanding of how the AD in JAX works. I tried to work through the code but found it pretty tough to follow. Basically, for vjps we get back a function to evaluate the pullback of a cotangent vector without re-evaluating the original function (and, I imagine, recomputing intermediate values). I'm wondering if there is something similar for jvps? I'd like to get back a function to evaluate the pushforward of a tangent vector at a specific point, ideally making use of intermediate values computed when evaluating the function at the chosen point. My goal is to evaluate the pushforward at the same point but for multiple tangent vectors but where these tangent vectors are not known a priori as they are in jacfwd, so vmap isn't an option. Is there a better way to do this than simply calling jvp every time? I see that there is a linearize function that looks like it gives me the pushforward function I want but it's not documented so I'm wondering if it's meant for internal use? Thank you in advance Deferred pushforward evaluation in jvp Hi. I'm sorry if this question is due to a basic misunderstanding of how the AD in JAX works. I tried to work through the code but found it pretty tough to follow. Basically, for vjps we get back a function to evaluate the pullback of a cotangent vector without re-evaluating the original function (and, I imagine, recomputing intermediate values). I'm wondering if there is something similar for jvps? I'd like to get back a function to evaluate the pushforward of a tangent vector at a specific point, ideally making use of intermediate values computed when evaluating the function at the chosen point. My goal is to evaluate the pushforward at the same point but for multiple tangent vectors but where these tangent vectors are not known a priori as they are in jacfwd, so vmap isn't an option. Is there a better way to do this than simply calling jvp every time? I see that there is a linearize function that looks like it gives me the pushforward function I want but it's not documented so I'm wondering if it's meant for internal use? Thank you in advance
Thanks for asking this! You're right, that's exactly what `linearize` is for. It composes forward-mode autodiff with partial evaluation, so that all the linearization points are stored (which costs memory, but means you don't have to re-do FLOPs for future JVP evaluations). Everything in api.py (where the name doesn't start with an underscore) is public and meant to be used. We just haven't gotten to documenting some things. @sschoenholz anything to add? Maybe we can use this issue to add a docstring to `linearize`. Here's some quick example usage: ```python from __future__ import print_function import jax.numpy as np from jax import jvp, linearize def f(x): return 3. * np.sin(x) + np.cos(x / 2.) x = 2. t1 = 1. t2 = 3. print(jvp(f, (x,), (t1,))) print(jvp(f, (x,), (t2,))) # nothing saved from first evaluation y, f_jvp = linearize(f, x) print(y) print(f_jvp(t1)) # all the linearization work is already done! print(f_jvp(t2)) ``` Thanks for asking this! You're right, that's exactly what `linearize` is for. It composes forward-mode autodiff with partial evaluation, so that all the linearization points are stored (which costs memory, but means you don't have to re-do FLOPs for future JVP evaluations). Everything in api.py (where the name doesn't start with an underscore) is public and meant to be used. We just haven't gotten to documenting some things. @sschoenholz anything to add? Maybe we can use this issue to add a docstring to `linearize`. Here's some quick example usage: ```python from __future__ import print_function import jax.numpy as np from jax import jvp, linearize def f(x): return 3. * np.sin(x) + np.cos(x / 2.) x = 2. t1 = 1. t2 = 3. print(jvp(f, (x,), (t1,))) print(jvp(f, (x,), (t2,))) # nothing saved from first evaluation y, f_jvp = linearize(f, x) print(y) print(f_jvp(t1)) # all the linearization work is already done! print(f_jvp(t2)) ```
2019-03-25T17:37:55
google/jax
542
google__jax-542
[ "536" ]
925cff05e9e8d07bfc62262492b348759e556331
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -413,8 +413,7 @@ def vmap(fun, in_axes=0, out_axes=0): @wraps(fun, docstr=docstr) def batched_fun(*args, **kwargs): - if not isinstance(fun, lu.WrappedFun): - f = lu.wrap_init(fun, kwargs) + f = lu.wrap_init(fun, kwargs) if not isinstance(fun, lu.WrappedFun) else fun in_axes_ = in_axes if isinstance(in_axes, (list, tuple)) else (in_axes,) * len(args) in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
undefined f if vmap applied to lu.WrappedFun In `vmap`, `f` is not be defined whenever the if condition is false: https://github.com/google/jax/blob/f8eda9bb645a1ed489f08be57bae380bbcf5fa0c/jax/api.py#L414-L420
2019-03-29T15:04:29
google/jax
574
google__jax-574
[ "571" ]
394d3e2f23e4074c4eff9956608e9064d73d8841
diff --git a/jax/config.py b/jax/config.py --- a/jax/config.py +++ b/jax/config.py @@ -73,6 +73,8 @@ def config_with_absl(self): flag_type, meta_args, meta_kwargs = self.meta[name] absl_defs[flag_type](name, val, *meta_args, **meta_kwargs) + app.call_after_init(lambda: self.complete_absl_config(absl_flags)) + def complete_absl_config(self, absl_flags): for name, _ in self.values.items(): self.update(name, getattr(absl_flags.FLAGS, name)) @@ -83,6 +85,7 @@ def parse_flags_with_absl(self): import absl.flags self.config_with_absl() absl.flags.FLAGS(sys.argv) + self.complete_absl_config(absl.flags) already_configured_with_absl = True
jax.config doesn't report command-line flags correctly Another one from @jmgilmer and I - I don't think the jax config is parsing command line flags correctly. I don't know if this is functionally important or just a reporting error - but it is certainly important for user scripts knowing what flags have been set. If I run this script: ``` from absl import app, flags from jax.config import config FLAGS = flags.FLAGS def main(_): print("FLAGS.jax_enable_x64", FLAGS.jax_enable_x64) print("FLAGS.jax_debug_nans", FLAGS.jax_debug_nans) print(config.values) if __name__ == "__main__": config.config_with_absl() app.run(main) ``` I get the following problem: jax.config doesn't report the correct flag settings. ``` > python jaxtest.py --jax_enable_x64=1 --jax_debug_nans=1 FLAGS.jax_enable_x64 True FLAGS.jax_debug_nans True {'jax_enable_x64': 0, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 0, 'jax_disable_jit': 0} ``` if I run the same with envvars instead it works: ``` JAX_ENABLE_X64=1 JAX_DEBUG_NANS=1 python jaxtest.py FLAGS.jax_enable_x64 True FLAGS.jax_debug_nans True {'jax_enable_x64': 1, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 1, 'jax_disable_jit': 0} ``` I've tried parsing the flags in different ways but nothing seems to fix the issue.
2019-04-04T09:38:53
google/jax
580
google__jax-580
[ "579" ]
027c94da4c04521ed037b5b6753165477b23d05a
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1509,6 +1509,11 @@ def einsum(*operands): contractions = tuple(data[:3] for data in contractions) return _einsum(operands, contractions) +@_wraps(onp.einsum_path) +def einsum_path(subscripts, *operands, **kwargs): + optimize = kwargs.pop('optimize', 'greedy') + # using einsum_call=True here is an internal api for opt_einsum + return opt_einsum.contract_path(subscripts, *operands, optimize=optimize) @partial(jit, static_argnums=(1,)) def _einsum(operands, contractions):
diff --git a/tests/lax_numpy_einsum_test.py b/tests/lax_numpy_einsum_test.py --- a/tests/lax_numpy_einsum_test.py +++ b/tests/lax_numpy_einsum_test.py @@ -277,6 +277,22 @@ def test_ordered_front_batch_dim_case(self): s = 'ijkl,ijml->ijkm' self._check(s, x, y) + def test_einsum_path(self): + # just check examples from onp.einsum_path docstring + a = onp.random.rand(2, 2) + b = onp.random.rand(2, 5) + c = onp.random.rand(5, 2) + + path_info = onp.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + self.assertEqual(str(path_info[0]), "['einsum_path', (1, 2), (0, 1)]") + self.assertEqual(path_info[1].split('\n')[0], + ' Complete contraction: ij,jk,kl->il') + + # check this doesn't crash + I = onp.random.rand(10, 10, 10, 10) + C = onp.random.rand(10, 10) + onp.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, optimize='greedy') + if __name__ == '__main__': absltest.main()
add jax.numpy.einsum_path Requested by @murphyk.
2019-04-06T17:34:07
google/jax
586
google__jax-586
[ "459", "459" ]
108a2dbb9cbc7ad2809f339cf7930525f254b7ad
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -21,6 +21,7 @@ import re import string import warnings +import types import numpy as onp import opt_einsum @@ -1221,6 +1222,8 @@ def full_like(a, fill_value, dtype=None): @_wraps(onp.zeros) def zeros(shape, dtype=onp.dtype("float64")): + if isinstance(shape, types.GeneratorType): + raise TypeError("expected sequence object with len >= 0 or a single integer") shape = (shape,) if onp.isscalar(shape) else shape return lax.full(shape, 0, dtype)
generators should not be accepted as values for the shape argument of numpy functions ```python3 import numpy as np shape = (1 for _ in range(4)) np.zeros(shape) ``` results in `TypeError: expected sequence object with len >= 0 or a single integer` whereas ```python3 import jax.numpy as np shape = (1 for _ in range(4)) np.zeros(shape) ``` just runs through and produces `array([[[[0.]]]], dtype=float32)`. This however can lead to bugs that are hard to find, because calling `np.zeros(shape)` a second time will create `array(0., dtype=float32)` Conclusion: `jax.numpy` should behave like `numpy` and should also through a TypeError generators should not be accepted as values for the shape argument of numpy functions ```python3 import numpy as np shape = (1 for _ in range(4)) np.zeros(shape) ``` results in `TypeError: expected sequence object with len >= 0 or a single integer` whereas ```python3 import jax.numpy as np shape = (1 for _ in range(4)) np.zeros(shape) ``` just runs through and produces `array([[[[0.]]]], dtype=float32)`. This however can lead to bugs that are hard to find, because calling `np.zeros(shape)` a second time will create `array(0., dtype=float32)` Conclusion: `jax.numpy` should behave like `numpy` and should also through a TypeError
2019-04-08T17:33:38
google/jax
599
google__jax-599
[ "598" ]
8a15d57bf39e0d2c0e1b00e8ed8e475464f031ab
diff --git a/jax/lax.py b/jax/lax.py --- a/jax/lax.py +++ b/jax/lax.py @@ -1000,94 +1000,6 @@ def _revise_cond_jaxpr(new_pval, old_pval, jaxpr, consts): return new_jaxpr, new_consts -def scan(f, a, bs): - """Scans over the leading axis of an array. - - Arguments: - f: function with signature `a -> b -> a` - a: `a` value, or a pytree of `a` values. - bs: an array of `b` values, or a pytree of arrays of `b` values with the - same leading axis size. - - Returns: - An array of `a` values, or a pytree of arrays of `a` values, representing - the result of scanning the function `f` over the leading axis of `bs`, with - each application producing an `a` for the next and collecting the results. - """ - a, a_tree = pytree_to_flatjaxtuple(a) - bs, b_tree = pytree_to_flatjaxtuple(bs) # b_tree is the same as bs_tree - f, out_tree = pytree_fun_to_flatjaxtuple_fun(lu.wrap_init(f), (a_tree, b_tree)) - - if not bs: - raise TypeError("bs argument to scan does not contain any arrays") - if any([b.ndim == 0 for b in bs]): - msg = "bs argument arrays must be rank >=1, got shapes {}." - raise TypeError(msg.format(", ".format(str(b.shape) for b in bs))) - if len({b.shape[0] for b in bs}) != 1: - msg = "arrays in bs must have equal most-major dimensions, got shapes {}." - raise TypeError(msg.format(", ".format(str(b.shape) for b in bs))) - - a_pval = a_aval, _ = _abstractify(a) - bs_aval, _ = _abstractify(bs) - b_aval = core.AbstractTuple([ShapedArray(b.shape[1:], b.dtype) for b in bs_aval]) - b_pval = pe.PartialVal((b_aval, core.unit)) - jaxpr, pval_out, consts = pe.trace_to_jaxpr(f, (a_pval, b_pval)) - aval_out, _ = pval_out - - if a_tree != out_tree(): - msg = "scanned function input and output must have identical structure" - raise TypeError(msg) - if a_aval != aval_out: - msg = "output shape mismatch for scanned function: {} vs {}" - raise TypeError(msg.format(a_aval, aval_out)) - - out = scan_p.bind(a, bs, core.pack(consts), aval_out=aval_out, jaxpr=jaxpr) - return tree_unflatten(out_tree(), out) - -def _scan_impl(a, bs, consts, aval_out, jaxpr): - length = tuple(bs)[0].shape[0] - state = [full((length,) + elt.shape, 0, _dtype(elt)) for elt in a] - - def body_fun(i, vals): - a, state = vals - assert len(a) == len(state) - b = [dynamic_index_in_dim(b, i, keepdims=False) for b in bs] - a_out = core.eval_jaxpr(jaxpr, consts, (), a, core.pack(b)) - state_out = [dynamic_update_index_in_dim(s, a[None, ...], i, axis=0) - for a, s in zip(a_out, state)] - return a_out, state_out - - _, out = fori_loop(0, length, body_fun, (a, state)) - return core.pack(out) - -# TODO(mattjj, phawkins): figure out what to do with consts_tangents, and the -# jaxtuple packing issues -def _scan_jvp(primals, tangents, aval_out, jaxpr): - a, bs, consts_primals = primals - a_dot, bs_dot, consts_tangents = tangents - - primal_out = scan_p.bind(a, bs, consts_primals, - aval_out=aval_out, jaxpr=jaxpr) - - def f_jvp(a_pt, b_pt): - a, a_dot = a_pt - b, b_dot = b_pt - f = lambda a, b, c: core.eval_jaxpr(jaxpr, c, (), a, b) - return api.jvp(f, (a, b, consts), (b, b_dot, consts_tangents)) - tangent_out = scan(f_jvp, (a, a_dot), (b, b_dot)) - - return primal_out, tangent_out - -def _scan_abstract_eval(a, bs, consts, aval_out, jaxpr): - return maybe_tracer_tuple_to_abstract_tuple(aval_out) - -scan_p = core.Primitive("scan") -scan_p.def_impl(_scan_impl) -scan_p.def_abstract_eval(_scan_abstract_eval) -xla.translations[scan_p] = partial(xla.lower_fun, _scan_impl) -# ad.primitive_jvps[scan_p] = _scan_jvp # TODO(mattjj, phawkins) - - def tie_in(x, y): return tie_in_p.bind(x, y)
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1562,45 +1562,6 @@ def testIssue514(self): (0, 0), lambda x: (x[0], 0), (1, 1), lambda x: x) - def testScanAdd(self): - def f(x, y): - return x + y - - g = partial(lax.scan, f) - a = onp.array(7, onp.float32) - bs = onp.array([2, 4, -2, 6], onp.float32) - out = g(a, bs) - self.assertAllClose(out, onp.array([9, 13, 11, 17], onp.float32), - check_dtypes=True) - - # jtu.check_jvp(g, partial(api.jvp, g), (a, bs)) - - def testScanMul(self): - def f(x, y): - return x * y - - g = partial(lax.scan, f) - a = onp.array(7, onp.float32) - bs = onp.array([2, 4, -2, 6], onp.float32) - out = g(a, bs) - self.assertAllClose(out, onp.array([14, 56, -112, -672], onp.float32), - check_dtypes=True) - - # jtu.check_jvp(g, partial(api.jvp, g), (a, bs)) - - def testScanJit(self): - @api.jit - def f(x, yz): - y, z = yz - return 5. * lax.exp(lax.sin(x) * lax.cos(y)) + z - - a = onp.array(7, onp.float32) - bs = (onp.array([3., 1., -4., 1.], onp.float32), - onp.array([5., 9., -2., 6.], onp.float32)) - ans = lax.scan(f, a, bs) - expected = onp.array([7.609, 17.445, 7.52596, 14.3389172], onp.float32) - self.assertAllClose(ans, expected, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_lhs_shape={}_rhs_shape={}" .format(jtu.format_shape_dtype_string(lhs_shape, dtype),
Abstract shape computed incorrectly for `lax.scan` The abstract eval for scan ([`_scan_abstract_eval`](https://github.com/google/jax/blob/master/jax/lax.py#L1081-L1082)) returns `aval_out`, but in the case of scan, the `aval_out` should have an additional dimension corresponding to the dimension the scan is looping over (`scan` aggregates over this axis whereas `while_loop` and `fori_loop` don't). This crops up when using `jit` on `lax.scan` functions that use the output of `lax.scan` (if you just return the output of `lax.scan` it works fine). Here's how to reproduce an error: ```python import jax.numpy as np from jax import lax, jit def f1(bs): result = lax.scan(lambda a, _: a, np.ones(5), bs) return result def f2(bs): result = lax.scan(lambda a, _: a, np.ones(5), bs) return result.transpose((1, 0)).transpose((1, 0)) fast_f1 = jit(f1) fast_f2 = jit(f2) f1(np.ones(10)) #works f2(np.ones(10)) #works fast_f1(np.ones(10)) #works fast_f2(np.ones(10)) #errors ``` In theory all of these should be the same, but because we call `transpose` and the abstract result only has one dimension, it errors when it is traced. Any suggestions on the best way to fix? I came up with a hacky fix where I change ```python def _scan_abstract_eval(a, bs, consts, aval_out, jaxpr): return maybe_tracer_tuple_to_abstract_tuple(aval_out) ``` to ```python def _scan_abstract_eval(a, bs, consts, aval_out, jaxpr): aval_out = core.AbstractTuple(ShapedArray(shape=(bs[0].shape[0],) + a.shape, dtype=a.dtype) for a in aval_out) return maybe_tracer_tuple_to_abstract_tuple(aval_out) ```
Thanks for raising this! Actually we should probably delete the `lax.scan` on master. The real one is coming in the differentiable-scan branch. Still a work-in-progress, but we hope it will be ready in the next week or two. Unfortunately that means for now there really is no scan...
2019-04-12T03:32:52
google/jax
629
google__jax-629
[ "628" ]
9baf42d97841d015ab03f8960cffc2f81c877bcb
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3252,13 +3252,30 @@ def _select_and_scatter_add_translation( return c.SelectAndScatter(operand, select, window_dimensions, window_strides, padding, source, zero, scatter) +def _select_and_scatter_add_jvp( + primals, tangents, select_prim, window_dimensions, window_strides, + padding): + source, operand = primals + g_source, g_operand = tangents + val_out = _select_and_scatter_add( + source, operand, select_prim, window_dimensions, window_strides, + padding) + del g_operand + if g_source is ad_util.zero: + tangent_out = ad_util.zero + else: + tangent_out = _select_and_scatter_add( + g_source, operand, select_prim, window_dimensions, + window_strides, padding) + return val_out, tangent_out + def _select_and_scatter_add_transpose( t, source, operand, select_prim, window_dimensions, window_strides, padding): assert source is None and operand is not None - result = _select_and_gather_add(t, operand, select_prim, window_dimensions, - window_strides, padding) - return [result, None] + source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions, + window_strides, padding) + return [source_t, None] def _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs): source, operand = batched_args @@ -3295,6 +3312,7 @@ def _select_and_scatter_add_batch_rule(batched_args, batch_dims, **kwargs): _select_and_scatter_add_translation) ad.primitive_transposes[select_and_scatter_add_p] = \ _select_and_scatter_add_transpose +ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp batching.primitive_batchers[select_and_scatter_add_p] = \ _select_and_scatter_add_batch_rule @@ -3375,6 +3393,23 @@ def _select_and_gather_add_translation( out = c.ConvertElementType(out, uint_etype) return c.BitcastConvertType(out, etype) +def _select_and_gather_add_jvp( + primals, tangents, select_prim, window_dimensions, window_strides, + padding): + source, operand = primals + g_source, g_operand = tangents + val_out = _select_and_gather_add( + source, operand, select_prim, window_dimensions, window_strides, + padding) + del g_operand + if g_source is ad_util.zero: + tangent_out = ad_util.zero + else: + tangent_out = _select_and_gather_add( + g_source, operand, select_prim, window_dimensions, + window_strides, padding) + return val_out, tangent_out + def _select_and_gather_add_transpose( t, tangents, operand, select_prim, window_dimensions, window_strides, padding): @@ -3386,6 +3421,7 @@ def _select_and_gather_add_transpose( select_and_gather_add_p = standard_primitive( _select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add', _select_and_gather_add_translation) +ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp ad.primitive_transposes[select_and_gather_add_p] = \ _select_and_gather_add_transpose
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1945,9 +1945,7 @@ def fun(operand): self.assertEqual(onp.unique(operand).size, operand.size, msg="test requires operand elements to be unique.") jtu.check_vjp(fun, partial(api.vjp, fun), (operand,), 1e-2, 1e-2, 1e-2) - - # TODO(phawkins): enable both gradients after a jaxlib update. - # check_grads(fun, (operand,), 1, 1e-2, 1e-2, 1e-2) + check_grads(fun, (operand,), 3, 1e-2, 1e-2, 1e-2) # pylint: enable=cell-var-from-loop # TODO(b/205052657): enable more tests when supported
Forward-mode differentiation rule for 'select_and_scatter_add' not implemented Ran into a not implemented error when taking second order gradients through a max pool. Repro: ``` import jax.numpy as np from jax.api import jvp from jax import grad import jax.random from jax.experimental import stax import numpy as onp data = np.array(onp.random.normal(size=[1, 4, 4, 1])) model = stax.serial(stax.Conv(1, (1,1), (1,1), 'SAME'), stax.MaxPool((2,2), strides=(2,2))) model_init, model_predict = model key = jax.random.PRNGKey(0) params = model_init(key, list(data.shape))[1] def loss(params, data): return np.sum(model_predict(params, data)) def hvp(loss, params, batch, v): loss_fn = lambda x: loss(x, batch) return jvp(grad(loss_fn), [params], [v])[1] # Raises NotImplementedError: Forward-mode differentiation rule for 'select_and_scatter_add' not implemented hvp(loss, params, data, params) ```
2019-04-20T23:52:36
google/jax
633
google__jax-633
[ "106" ]
376ee6423f313ddb99f1cb28d8d30d53046ebe59
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -32,7 +32,7 @@ from . import numpy as np from . import tree_util from .api import jit, vmap -from .numpy.lax_numpy import _constant_like +from .numpy.lax_numpy import _constant_like, asarray from jax.lib import xla_bridge from jax import core @@ -455,6 +455,30 @@ def _cauchy(key, shape, dtype): return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5)))) +def dirichlet(key, alpha, shape=(), dtype=onp.float32): + """Sample Cauchy random values with given shape and float dtype. + + Args: + key: a PRNGKey used as the random key. + alpha: an array-like with `alpha.shape[:-1]` broadcastable to `shape` and + used as the concentration parameter of the random variables. + shape: optional, a tuple of nonnegative integers representing the batch + shape (defaults to `alpha.shape[:-1]`). + dtype: optional, a float dtype for the returned values (default float32). + + Returns: + A random array with the specified shape and dtype. + """ + return _dirichlet(key, alpha, shape, dtype) + +@partial(jit, static_argnums=(2, 3)) +def _dirichlet(key, alpha, shape, dtype): + alpha = asarray(alpha, dtype) + shape = shape or alpha.shape[:-1] + gamma_samples = gamma(key, alpha, shape + alpha.shape[-1:], dtype) + return gamma_samples / np.sum(gamma_samples, axis=-1, keepdims=True) + + def exponential(key, shape=(), dtype=onp.float32): """Sample Exponential random values with given shape and float dtype. diff --git a/jax/scipy/stats/dirichlet.py b/jax/scipy/stats/dirichlet.py new file mode 100644 --- /dev/null +++ b/jax/scipy/stats/dirichlet.py @@ -0,0 +1,45 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as onp +import scipy.stats as osp_stats + +from ... import lax +from ...numpy.lax_numpy import _promote_args_like, _constant_like, _wraps, all, sum +from ..special import gammaln, xlogy + + +def _is_simplex(x): + x_sum = sum(x, axis=-1) + return all(x > 0, axis=-1) & (x_sum <= 1) & (x_sum > 1 - 1e-6) + + +@_wraps(osp_stats.dirichlet.logpdf) +def logpdf(x, alpha): + args = (onp.ones((0,), lax.dtype(x)), onp.ones((1,), lax.dtype(alpha))) + to_dtype = lax.dtype(osp_stats.dirichlet.logpdf(*args)) + x, alpha = [lax.convert_element_type(arg, to_dtype) for arg in (x, alpha)] + one = _constant_like(x, 1) + normalize_term = sum(gammaln(alpha), axis=-1) - gammaln(sum(alpha, axis=-1)) + log_probs = lax.sub(sum(xlogy(lax.sub(alpha, one), x), axis=-1), normalize_term) + return where(_is_simplex(x), log_probs, -inf) + + +@_wraps(osp_stats.dirichlet.pdf) +def pdf(x, alpha): + return lax.exp(logpdf(x, alpha))
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -212,6 +212,26 @@ def testCauchy(self, dtype): for samples in [uncompiled_samples, compiled_samples]: self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.cauchy().cdf) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_alpha={}_{}".format(alpha, dtype), + "alpha": alpha, "dtype": onp.dtype(dtype).name} + for alpha in [[0.2, 1., 5.]] + for dtype in [onp.float32, onp.float64])) + @jtu.skip_on_devices("tpu") # TODO(phawkins): re-enable + def testDirichlet(self, alpha, dtype): + key = random.PRNGKey(0) + rand = lambda key, alpha: random.dirichlet(key, alpha, (10000,), dtype) + crand = api.jit(rand) + + uncompiled_samples = rand(key, alpha) + compiled_samples = crand(key, alpha) + + for samples in [uncompiled_samples, compiled_samples]: + self.assertAllClose(samples.sum(-1), onp.ones(10000, dtype=dtype), check_dtypes=True) + alpha_sum = sum(alpha) + for i, a in enumerate(alpha): + self._CheckKolmogorovSmirnovCDF(samples[..., i], scipy.stats.beta(a, alpha_sum - a).cdf) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name} for dtype in [onp.float32, onp.float64])) diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py --- a/tests/scipy_stats_test.py +++ b/tests/scipy_stats_test.py @@ -93,6 +93,21 @@ def args_maker(): self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + @genNamedParametersNArgs(2, jtu.rand_positive()) + def testDirichletLogPdf(self, rng, shapes, dtypes): + scipy_fun = osp_stats.cauchy.logpdf + lax_fun = lsp_stats.cauchy.logpdf + dim = 4 + shapes = (shapes[0] + (dim,), shapes[1] + (dim,)) + + def args_maker(): + x, alpha = map(rng, shapes, dtypes) + x = x / onp.sum(x, axis=-1, keepdims=True) + return [x, alpha] + + self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True) + @genNamedParametersNArgs(3, jtu.rand_positive()) def testExponLogPdf(self, rng, shapes, dtypes): scipy_fun = osp_stats.expon.logpdf
support for scipy.stats.dirichlet Registering interest in having support for this distribution (specifically logpdf() and rvs()).
2019-04-22T15:57:01
google/jax
638
google__jax-638
[ "637", "637" ]
1fd077c77e63284844ffd315c64e7d169396660d
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -91,11 +91,9 @@ def jit(fun, static_argnums=()): >>> >>> key = jax.random.PRNGKey(0) >>> x = jax.random.normal(key, (10,)) - >>> selu(x) - array([-0.54485154, 0.27744263, -0.29255125, -0.91421586, -0.62452525, - -0.2474813 , -0.8574326 , -0.7823267 , 0.7682731 , 0.59566754], - dtype=float32) - + >>> print(selu(x)) + [-0.54485154 0.27744263 -0.29255125 -0.91421586 -0.62452525 -0.2474813 + -0.8574326 -0.7823267 0.7682731 0.59566754] """ @wraps(fun) def f_jitted(*args, **kwargs): @@ -214,9 +212,8 @@ def grad(fun, argnums=0, has_aux=False, holomorphic=False): For example: >>> grad_tanh = jax.grad(jax.numpy.tanh) - >>> grad_tanh(0.2) - array(0.961043, dtype=float32) - + >>> print(grad_tanh(0.2)) + 0.961043 """ value_and_grad_f = value_and_grad(fun, argnums, has_aux=has_aux, holomorphic=holomorphic) @@ -320,11 +317,11 @@ def jacfwd(fun, argnums=0, holomorphic=False): >>> def f(x): >>> return jax.numpy.asarray( >>> [x[0], 5*x[2], 4*x[1]**2 - 2*x[2], x[2] * jax.numpy.sin(x[0])]) - >>> jax.jacfwd(f)(np.array([1., 2., 3.])) - array([[ 1. , 0. , 0. ], - [ 0. , 0. , 5. ], - [ 0. , 16. , -2. ], - [ 1.6209068 , 0. , 0.84147096]], dtype=float32) + >>> print(jax.jacfwd(f)(np.array([1., 2., 3.]))) + [[ 1. , 0. , 0. ], + [ 0. , 0. , 5. ], + [ 0. , 16. , -2. ], + [ 1.6209068 , 0. , 0.84147096]] """ def jacfun(*args, **kwargs): @@ -364,11 +361,11 @@ def jacrev(fun, argnums=0, holomorphic=False): >>> def f(x): >>> return jax.numpy.asarray( >>> [x[0], 5*x[2], 4*x[1]**2 - 2*x[2], x[2] * jax.numpy.sin(x[0])]) - >>> jax.jacrev(f)(np.array([1., 2., 3.])) - array([[ 1. , 0. , 0. ], - [ 0. , 0. , 5. ], - [ 0. , 16. , -2. ], - [ 1.6209068 , 0. , 0.84147096]], dtype=float32) + >>> print(jax.jacrev(f)(np.array([1., 2., 3.]))) + [[ 1. , 0. , 0. ], + [ 0. , 0. , 5. ], + [ 0. , 16. , -2. ], + [ 1.6209068 , 0. , 0.84147096]] """ def jacfun(*args, **kwargs): f = lu.wrap_init(fun, kwargs) @@ -408,9 +405,9 @@ def hessian(fun, argnums=0, holomorphic=False): `fun`. >>> g = lambda(x): x[0]**3 - 2*x[0]*x[1] - x[1]**6 - >>> jax.hessian(g)(jax.numpy.array([1., 2.])) - array([[ 6., -2.], - [ -2., -480.]], dtype=float32) + >>> print(jax.hessian(g)(jax.numpy.array([1., 2.]))) + [[ 6., -2.], + [ -2., -480.]] """ return jacfwd(jacrev(fun, argnums, holomorphic), argnums, holomorphic) @@ -463,7 +460,7 @@ def vmap(fun, in_axes=0, out_axes=0): >>> mv = vmap(vv, (0, None), 0) # ([a,b], [b]) -> [a] >>> mm = vmap(mv, (None, 1), 1) # ([a,b], [b,c]) -> [a,c] - (`[a,b]` indicates an array with shape (a,b)) + (here we use `[a,b]` to indicate an array with shape (a,b)) """ docstr = ("Vectorized version of {fun}. Takes similar arguments as {fun} " @@ -572,8 +569,11 @@ def jvp(fun, primals, tangents): For example: - >>> jax.jvp(jax.numpy.sin, (0.1,), (0.2,)) - (array(0.09983342, dtype=float32), array(0.19900084, dtype=float32)) + >>> y, v = jax.jvp(jax.numpy.sin, (0.1,), (0.2,)) + >>> print(y) + 0.09983342 + >>> print(v) + 0.19900084 """ def trim_arg(primal, tangent): primal_jtuple, tree_def = pytree_to_jaxtupletree(primal) @@ -635,12 +635,12 @@ def linearize(fun, *primals): >>> jax.jvp(f, (2.,), (3.,)) (array(3.2681944, dtype=float32), array(-5.007528, dtype=float32)) >>> y, f_jvp = jax.linearize(f, 2.) - >>> y - array(3.2681944, dtype=float32) - >>> f_jvp(3.) - array(-5.007528, dtype=float32) - >>> f_jvp(4.) - array(-6.676704, dtype=float32) + >>> print(y) + 3.2681944 + >>> print(f_jvp(3.)) + -5.007528 + >>> print(f_jvp(4.)) + -6.676704 """ f = lu.wrap_init(fun) primals_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, primals)) @@ -686,9 +686,12 @@ def vjp(fun, *primals, **kwargs): >>> def f(x, y): >>> return jax.numpy.sin(x), jax.numpy.cos(y) - >>> primals, g = jax.vjp(f, 0.5, 1.0) - >>> g((-0.7, 0.3)) - (array(-0.61430776, dtype=float32), array(-0.2524413, dtype=float32)) + >>> primals, f_vjp = jax.vjp(f, 0.5, 1.0) + >>> xbar, ybar = f_vjp((-0.7, 0.3)) + >>> print(xbar) + -0.61430776 + >>> print(ybar) + -0.2524413 """ has_aux = kwargs.pop('has_aux', False) assert not kwargs @@ -752,8 +755,8 @@ def make_jaxpr(fun): instead give a few examples. >>> def f(x): return jax.numpy.sin(jax.numpy.cos(x)) - >>> f(3.0) - array(-0.83602184, dtype=float32) + >>> print(f(3.0)) + -0.83602184 >>> jax.make_jaxpr(f)(3.0) { lambda ; ; a. let b = cos a diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -347,8 +347,7 @@ def copy(self): return onp.asarray(self) def __repr__(self): - shape_str = ",".join(map(str, self.shape)) - return "DeviceArray{{{}[{}]}}".format(onp.dtype(self.dtype).name, shape_str) + return onp.array_repr(self) def __len__(self): try:
Inconsistent output from documentation for hessian I was trying to follow this example: https://jax.readthedocs.io/en/latest/jax.html#jax.hessian And I don't get the same output as it suggests. Instead I get "DeviceArray{float32[2,2]}". It seems related to issue #173, but I would have thought the documentation would work as advertised. I had some similar issues in https://colab.research.google.com/github/google/jax/blob/master/notebooks/autodiff_cookbook.ipynb#scrollTo=fuz9E2vzro5E ``` import jax g = lambda x: x[0]**3 - 2*x[0]*x[1] - x[1]**6 jax.hessian(g)(jax.numpy.array([1., 2.])) ``` #+RESULTS: :results: # Out [11]: # text/plain : DeviceArray{float32[2,2]} :end: I am pretty sure I am running the latest jax version: ``` print(jax.version.__version__) import sys print(sys.version) ``` #+RESULTS: :results: # Out [20]: # output 0.1.25 3.6.0 |Anaconda custom (x86_64)| (default, Dec 23 2016, 13:19:00) [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] :end: Any ideas? Inconsistent output from documentation for hessian I was trying to follow this example: https://jax.readthedocs.io/en/latest/jax.html#jax.hessian And I don't get the same output as it suggests. Instead I get "DeviceArray{float32[2,2]}". It seems related to issue #173, but I would have thought the documentation would work as advertised. I had some similar issues in https://colab.research.google.com/github/google/jax/blob/master/notebooks/autodiff_cookbook.ipynb#scrollTo=fuz9E2vzro5E ``` import jax g = lambda x: x[0]**3 - 2*x[0]*x[1] - x[1]**6 jax.hessian(g)(jax.numpy.array([1., 2.])) ``` #+RESULTS: :results: # Out [11]: # text/plain : DeviceArray{float32[2,2]} :end: I am pretty sure I am running the latest jax version: ``` print(jax.version.__version__) import sys print(sys.version) ``` #+RESULTS: :results: # Out [20]: # output 0.1.25 3.6.0 |Anaconda custom (x86_64)| (default, Dec 23 2016, 13:19:00) [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] :end: Any ideas?
Thanks for raising this! We recently changed how these DeviceArray types are printed in a repl (i.e. we recently changed their `__repr__` method). DeviceArray is effectively a subclass of numpy.ndarray that is backed by device memory (e.g. GPU memory when you're using a GPU, or just CPU memory that XLA controls when using the CPU backend). When we wrote the documentation we had `__repr__` set up to print it out just like a regular ndarray, but we decided to make `__repr__` print out this other representation because (1) it's more explicit and less magical and (2) it avoids copying back to host memory. This is a documentation bug on our end, in that we should update the docstring to be less confusing. Thanks for raising this! We recently changed how these DeviceArray types are printed in a repl (i.e. we recently changed their `__repr__` method). DeviceArray is effectively a subclass of numpy.ndarray that is backed by device memory (e.g. GPU memory when you're using a GPU, or just CPU memory that XLA controls when using the CPU backend). When we wrote the documentation we had `__repr__` set up to print it out just like a regular ndarray, but we decided to make `__repr__` print out this other representation because (1) it's more explicit and less magical and (2) it avoids copying back to host memory. This is a documentation bug on our end, in that we should update the docstring to be less confusing.
2019-04-24T01:21:58
google/jax
640
google__jax-640
[ "634" ]
b2160fdc03b6f16e4c2dbbd8b66a8cc53dc7ea73
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1180,7 +1180,7 @@ def column_stack(tup): def atleast_1d(*arys): if len(arys) == 1: arr = array(arys[0]) - return arr if arr.ndim >= 1 else arr.reshape(-1) + return arr if ndim(arr) >= 1 else reshape(arr, -1) else: return [atleast_1d(arr) for arr in arys] @@ -1189,7 +1189,7 @@ def atleast_1d(*arys): def atleast_2d(*arys): if len(arys) == 1: arr = array(arys[0]) - return arr if arr.ndim >= 2 else arr.reshape((1, -1)) + return arr if ndim(arr) >= 2 else reshape(arr, (1, -1)) else: return [atleast_2d(arr) for arr in arys] @@ -1199,9 +1199,9 @@ def atleast_3d(*arys): if len(arys) == 1: arr = array(arys[0]) if ndim(arr) <= 1: - arr = arr.reshape((1, -1, 1)) + arr = reshape(arr, (1, -1, 1)) elif ndim(arr) == 2: - arr = arr.reshape(shape(arr) + (1,)) + arr = reshape(arr, shape(arr) + (1,)) return arr else: return [atleast_3d(arr) for arr in arys]
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1373,6 +1373,22 @@ def testIssue453(self): expected = onp.reshape(a, (3, 2), order='F') self.assertAllClose(ans, expected, check_dtypes=True) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_op={}_dtype={}".format( + op, {bool: "bool", int: "int", float: "float"}[dtype]), + "dtype": dtype, "op": op} + for dtype in [int, float, bool] + for op in ["atleast_1d", "atleast_2d", "atleast_3d"])) + def testAtLeastNdLiterals(self, dtype, op): + # Fixes: https://github.com/google/jax/issues/634 + onp_fun = lambda arg: getattr(onp, op)(arg) + lnp_fun = lambda arg: getattr(lnp, op)(arg) + args_maker = lambda: [dtype(2)] + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + + def testLongLong(self): # TODO(phawkins): enable after a Jaxlib update. return SkipTest("Test disabled until jaxlib 0.1.13 is released.")
atleast_{1,2,3}d fail for scalars `numpy.atleast_{1,2,3}d` add dimensions for scalars up to the necessary amount, but JAX implementations fail, since `jax.numpy.array` doesn't return 0-dim arrays for scalars (#121). Examples: ``` In [3]: onp.atleast_1d(1) Out[3]: array([1]) In [4]: np.atleast_1d(1) --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-4-84084c6642da> in <module> ----> 1 np.atleast_1d(1) ~/src/jax/jax/numpy/lax_numpy.py in atleast_1d(*arys) 1182 arr = array(arys[0]) -> 1183 return arr if arr.ndim >= 1 else arr.reshape(-1) 1184 else: 1185 return [atleast_1d(arr) for arr in arys] AttributeError: 'int' object has no attribute 'ndim' In [5]: np.atleast_1d(np.int64(1)) Out[5]: array([1]) ``` Numpy dtypes (like `np.int64`) have a `ndim` attribute, which allows them to work properly. The testing suite doesn't catch this because it seems to use Numpy dtypes instead of `int` or `float` literals.
2019-04-25T06:04:46
google/jax
643
google__jax-643
[ "276" ]
b2160fdc03b6f16e4c2dbbd8b66a8cc53dc7ea73
diff --git a/build/build.py b/build/build.py --- a/build/build.py +++ b/build/build.py @@ -161,8 +161,6 @@ def check_bazel_version(bazel_path, min_version): build --action_env PYTHON_BIN_PATH="{python_bin_path}" build --python_path="{python_bin_path}" build --action_env TF_NEED_CUDA="{tf_need_cuda}" -build --action_env CUDA_TOOLKIT_PATH="{cuda_toolkit_path}" -build --action_env CUDNN_INSTALL_PATH="{cudnn_install_path}" build --distinct_host_configuration=false build --copt=-Wno-sign-compare build -c opt @@ -186,9 +184,16 @@ def check_bazel_version(bazel_path, min_version): """ -def write_bazelrc(**kwargs): + +def write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None, **kwargs): f = open("../.bazelrc", "w") f.write(BAZELRC_TEMPLATE.format(**kwargs)) + if cuda_toolkit_path: + f.write("build --action_env CUDA_TOOLKIT_PATH=\"{cuda_toolkit_path}\"\n" + .format(cuda_toolkit_path=cuda_toolkit_path)) + if cudnn_install_path: + f.write("build --action_env CUDNN_INSTALL_PATH=\"{cudnn_install_path}\"\n" + .format(cudnn_install_path=cudnn_install_path)) f.close() @@ -265,11 +270,11 @@ def main(): help_str="Should we build with CUDA enabled? Requires CUDA and CuDNN.") parser.add_argument( "--cuda_path", - default="/usr/local/cuda", + default=None, help="Path to the CUDA toolkit.") parser.add_argument( "--cudnn_path", - default="/usr/local/cuda", + default=None, help="Path to CUDNN libraries.") args = parser.parse_args() @@ -291,8 +296,10 @@ def main(): cudnn_install_path = args.cudnn_path print("CUDA enabled: {}".format("yes" if args.enable_cuda else "no")) if args.enable_cuda: - print("CUDA toolkit path: {}".format(cuda_toolkit_path)) - print("CUDNN library path: {}".format(cudnn_install_path)) + if cuda_toolkit_path: + print("CUDA toolkit path: {}".format(cuda_toolkit_path)) + if cudnn_install_path: + print("CUDNN library path: {}".format(cudnn_install_path)) write_bazelrc( python_bin_path=python_bin_path, tf_need_cuda=1 if args.enable_cuda else 0,
np.isnan doesn't work on CPU in fast-math mode as a result np.nan_to_num, np.nanmean, etc all don't work ``` import jax.numpy as np a = np.zeros(1) / np.zeros(1) print(a.__array__()) print(np.isnan(a).__array__()) ``` `[nan]` `[False]` This bug only happens with the CPU-only build of JAX, when I see this warning: "warnings.warn('No GPU found, falling back to CPU.')"
This happens because XLA's CPU backend defaults to enabling fast math mode, which does not preserve nan/inf semantics. The GPU backend does not. Note the comment here: https://github.com/google/jax/blob/master/jax/numpy/lax_numpy.py#L699 ``` # Caution: If fast math mode is enabled, the semantics of inf and nan are not # preserved by XLA/LLVM, and the behavior of inf/nan values is unpredictable. # To disable fast math mode on CPU, set the environment variable # XLA_FLAGS=--xla_cpu_enable_fast_math=false ``` The `XLA_FLAGS` environment variable above makes your example pass. I guess the important question is: should we disable fast math mode by default? Are exact NaN semantics important to you? I think consistency between CPU and GPU is more important than performance in this case. There can still be a _performance tips_ section that explains how to activate _fast math_. I just got surprised by this, too. Maybe another option is to print a warning at startup, adding to the "No GPU found"? A brief update on this bug: we tried disabling fastmath in XLA/CPU by default, but found it regressed performance for some neural network benchmarks significantly because it prevents vectorization in some important cases. https://reviews.llvm.org/D57728 apparently fixes the performance problem, but it isn't in yet. I'm hoping we can simply disable fast math by default when that change makes it into LLVM. A warning makes sense until we do so, I guess. I also got surprised by this (I am using a CPU). Here is a simple example: import numpy as onp # original numpy import jax.numpy as np print(np.isnan(np.nan)) #F print(onp.isnan(np.nan)) #T print(np.isnan(onp.nan)) #F print(onp.isnan(onp.nan)) #T Maybe worth mentioning the issue on the jax homepage (the comment is currently buried deep in the gotchas colab) I also tried to set the environment flag but to no avail (is my syntax correct?) import os os.environ["XLA_FLAGS"]="--xla_cpu_enable_fast_math=false" print(np.isnan(np.nan)) #F print(onp.isnan(np.nan)) #T print(np.isnan(onp.nan)) #F print(onp.isnan(onp.nan)) #T Did that `os.environ` come before importing anything from jax? That might be necessary. Great idea re: mentioning it in the readme. I'll add it now. yes, I did the os.environ thing first. I am running inside Spyder IDE. Full script: import os os.environ["XLA_FLAGS"]="--xla_cpu_enable_fast_math=false" import numpy as onp # original numpy import jax.numpy as np print(np.isnan(np.nan)) #F print(onp.isnan(np.nan)) #T print(np.isnan(onp.nan)) #F print(onp.isnan(onp.nan)) #T Thanks. Hrm I was unable to repro in my local environment (which I tried before my previous guess about `os.environ` going first): ``` In [1]: import os In [2]: os.environ["XLA_FLAGS"] = "--xla_cpu_enable_fast_math=false" In [3]: import jax.numpy as np In [4]: print(np.isnan(np.nan)) True ``` Not sure how to chase that down further. In any case, we'll fix CPU nan issues ASAP. weird. I am using python 3.7 and jax 0.1.23 (latest pip binary) On Fri, Apr 5, 2019 at 1:03 PM Matthew Johnson <[email protected]> wrote: > Thanks. Hrm I was unable to repro in my local environment (which I tried > before my previous guess about os.environ going first): > > > In [1]: import os > > In [2]: os.environ["XLA_FLAGS"] = "--xla_cpu_enable_fast_math=false" > > In [3]: import jax.numpy as np > > In [4]: print(np.isnan(np.nan)) > True > > Not sure how to chase that down further. In any case, we'll fix CPU nan > issues ASAP. > > — > You are receiving this because you commented. > Reply to this email directly, view it on GitHub > <https://github.com/google/jax/issues/276#issuecomment-480405007>, or mute > the thread > <https://github.com/notifications/unsubscribe-auth/AEavEODkAa3GDDesn1Rq9P30sp7z75a8ks5vd6wmgaJpZM4aSH3b> > . >
2019-04-25T23:22:21
google/jax
661
google__jax-661
[ "655" ]
0f495b65adbc7d3603e148dd2d5b044962de9728
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -42,9 +42,12 @@ def cholesky(x, symmetrize_input=True): def eigh(x, lower=True, symmetrize_input=True): if symmetrize_input: x = symmetrize(x) - return eigh_p.bind(x, lower=lower) + v, w = eigh_p.bind(x, lower=lower) + return v, w -def lu(x): return lu_p.bind(x) +def lu(x): + lu, pivots = lu_p.bind(x) + return lu, pivots def qr(x, full_matrices=True): q, r = qr_p.bind(x, full_matrices=full_matrices) @@ -338,29 +341,31 @@ def lu_jvp_rule(primals, tangents): return core.pack((lu, pivots)), ad.TangentTuple((lu_dot, ad_util.zero)) +def lu_batching_rule(batched_args, batch_dims): + x, = batched_args + bd, = batch_dims + x = batching.bdim_at_front(x, bd) + return lu_p.bind(x), 0 + + lu_p = Primitive('lu') lu_p.def_impl(lu_impl) lu_p.def_abstract_eval(lu_abstract_eval) xla.translations[lu_p] = lu_translation_rule ad.primitive_jvps[lu_p] = lu_jvp_rule +batching.primitive_batchers[lu_p] = lu_batching_rule def lu_cpu_translation_rule(c, operand): shape = c.GetShape(operand) dtype = shape.element_type().type - if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types: - out = lapack.jax_getrf(c, operand) - lu = c.GetTupleElement(out, 0) - # Subtract 1 from the pivot to get 0-based indices. - pivot = c.Sub(c.GetTupleElement(out, 1), c.ConstantS32Scalar(1)) - # Throw away the `info` value, because we have no way to report errors. - return c.Tuple(lu, pivot) - else: - raise NotImplementedError("Only unbatched LU decomposition is implemented") + out = lapack.jax_getrf(c, operand) + lu = c.GetTupleElement(out, 0) + # Subtract 1 from the pivot to get 0-based indices. + pivot = c.Sub(c.GetTupleElement(out, 1), c.ConstantS32Scalar(1)) + # Throw away the `info` value, because we have no way to report errors. + return c.Tuple(lu, pivot) -# TODO(phawkins): The hasattr() test here is to avoid incompatibilities between -# jax and an older jaxlib. Remove after a jaxlib release includes jax_getrf. -if hasattr(lapack, "jax_getrf"): - xla.backend_specific_translations['cpu'][lu_p] = lu_cpu_translation_rule +xla.backend_specific_translations['cpu'][lu_p] = lu_cpu_translation_rule def lu_pivots_to_permutation(swaps, k): @@ -421,11 +426,19 @@ def qr_jvp_rule(primals, tangents, full_matrices): dr = np.matmul(qt_dx_rinv - domega, r) return core.pack((q, r)), core.pack((dq, dr)) +def qr_batching_rule(batched_args, batch_dims, full_matrices): + x, = batched_args + bd, = batch_dims + x = batching.bdim_at_front(x, bd) + q, r = qr(x, full_matrices=full_matrices) + return qr_p.bind(x, full_matrices=full_matrices), 0 + qr_p = Primitive('qr') qr_p.def_impl(qr_impl) qr_p.def_abstract_eval(qr_abstract_eval) xla.translations[qr_p] = qr_translation_rule ad.primitive_jvps[qr_p] = qr_jvp_rule +batching.primitive_batchers[qr_p] = qr_batching_rule # Singular value decomposition @@ -467,8 +480,15 @@ def svd_cpu_translation_rule(c, operand, full_matrices, compute_uv): raise NotImplementedError( "Only unbatched singular value decomposition is implemented on CPU") +def svd_batching_rule(batched_args, batch_dims, full_matrices, compute_uv): + x, = batched_args + bd, = batch_dims + x = batching.bdim_at_front(x, bd) + return svd_p.bind(x, full_matrices=full_matrices, compute_uv=compute_uv), 0 + svd_p = Primitive('svd') svd_p.def_impl(svd_impl) svd_p.def_abstract_eval(svd_abstract_eval) xla.translations[svd_p] = svd_translation_rule xla.backend_specific_translations['cpu'][svd_p] = svd_cpu_translation_rule +batching.primitive_batchers[svd_p] = svd_batching_rule
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -27,7 +27,7 @@ from absl.testing import absltest from absl.testing import parameterized -from jax import jvp +from jax import jvp, vmap from jax import numpy as np from jax import scipy as jsp from jax import test_util as jtu @@ -340,6 +340,15 @@ def compare_orthogonal(q1, q2): if not full_matrices and m >= n: jtu.check_jvp(np.linalg.qr, partial(jvp, np.linalg.qr), (a,)) + @jtu.skip_on_devices("gpu", "tpu") + def testQrBatching(self): + shape = (10, 4, 5) + dtype = np.float32 + rng = jtu.rand_default() + args = rng(shape, np.float32) + qs, rs = vmap(jsp.linalg.qr)(args) + self.assertTrue(onp.all(onp.linalg.norm(args - onp.matmul(qs, rs)) < 1e-3)) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_lhs={}_rhs={}".format( @@ -419,6 +428,22 @@ def testLuGrad(self, shape, dtype, rng): jtu.check_grads(jsp.linalg.lu, (a,), 2, rtol=1e-1) + @jtu.skip_on_devices("gpu", "tpu") + def testLuBatching(self): + self.skipTest("Test disabled until Jaxlib 0.1.14 is released") + shape = (4, 5) + dtype = np.float32 + rng = jtu.rand_default() + args = [rng(shape, np.float32) for _ in range(10)] + expected = list(osp.linalg.lu(x) for x in args) + ps = onp.stack([out[0] for out in expected]) + ls = onp.stack([out[1] for out in expected]) + us = onp.stack([out[2] for out in expected]) + + actual_ps, actual_ls, actual_us = vmap(jsp.linalg.lu)(np.stack(args)) + self.assertAllClose(ps, actual_ps, check_dtypes=True) + self.assertAllClose(ls, actual_ls, check_dtypes=True) + self.assertAllClose(us, actual_us, check_dtypes=True) # TODO(phawkins): enable when there is an LU implementation for GPU/TPU. @parameterized.named_parameters(jtu.cases_from_list(
Implement vmap/batching support for LU decomposition I'm trying to solve linear equations Ax = b with vmap: ```python import jax.numpy as np import jax.random as random from jax import vmap key = random.PRNGKey(0) A = random.normal(key, shape=(100, 2, 2)) x = random.normal(key, shape=(100, 2)) b = vmap(lambda A, x: A @ x)(A, x) # works! shape (100, 2) x_hat = vmap(lambda A, x: np.linalg.solve(A, x))(A, x) ``` Here, using `np.linalg.solve` results in the following error message: ``` NotImplementedError: Batching rule for 'lu' not implemented ``` Opening this issue to request batching support for LU decomposition. Thanks in advance!
I can add a simple implementation that batches by performing LU decompositions in a loop easily enough.
2019-04-30T17:22:59
google/jax
666
google__jax-666
[ "508" ]
a89fb264c2bd7675fc26caede053892460338c5e
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -468,6 +468,32 @@ def svd_abstract_eval(operand, full_matrices, compute_uv): vt = operand return core.AbstractTuple((s, u, vt)) +def svd_jvp_rule(primals, tangents, full_matrices, compute_uv): + if full_matrices: + #TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf + raise NotImplementedError("Singular value decomposition JVP not implemented for full matrices") + A, = primals + dA, = tangents + s, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True) + + k = s.shape[-1] + Ut, V = np.conj(U).T, np.conj(Vt).T + s_dim = s[..., None, :] + dS = np.dot(np.dot(Ut, dA), V) + ds = np.diag(dS) + F = 1 / (np.square(s_dim) - np.square(s_dim.T) + np.eye(k)) - np.eye(k) + dSS = s_dim * dS + SdS = s_dim.T * dS + dU = np.dot(U, F * (dSS + dSS.T)) + dV = np.dot(V, F * (SdS + SdS.T)) + + m, n = A.shape[-2], A.shape[-1] + if m > n: + dU = dU + np.dot(np.eye(m) - np.dot(U, Ut), np.dot(dA, V)) / s_dim + if n > m: + dV = dV + np.dot(np.eye(n) - np.dot(V, Vt), np.dot(np.conj(dA).T, U)) / s_dim + return core.pack((s, U, Vt)), core.pack((ds, dU, dV.T)) + def svd_cpu_translation_rule(c, operand, full_matrices, compute_uv): shape = c.GetShape(operand) dtype = shape.element_type().type @@ -491,4 +517,5 @@ def svd_batching_rule(batched_args, batch_dims, full_matrices, compute_uv): svd_p.def_abstract_eval(svd_abstract_eval) xla.translations[svd_p] = svd_translation_rule xla.backend_specific_translations['cpu'][svd_p] = svd_cpu_translation_rule +ad.primitive_jvps[svd_p] = svd_jvp_rule batching.primitive_batchers[svd_p] = svd_batching_rule
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -197,7 +197,7 @@ def testEighGradVectorComplex(self, shape, dtype, rng, lower, eps): # Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues. RTOL=1e-2 assert onp.max( - onp.abs((onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL + onp.abs((onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL # Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues. assert onp.max( onp.linalg.norm(onp.abs(new_w*(v+dv) - onp.dot(new_a, (v+dv))), axis=0) / @@ -284,6 +284,9 @@ def norm(x): self._CompileAndCheck(partial(np.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv), args_maker, check_dtypes=True) + if not full_matrices: + svd = partial(np.linalg.svd, full_matrices=False) + jtu.check_jvp(svd, partial(jvp, svd), (a,), atol=1e-1 if FLAGS.jax_enable_x64 else jtu.ATOL) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_fullmatrices={}".format(
Implement JVP for SVD when full_matrices=True
Eqns 16-18 of [this note](https://j-towns.github.io/papers/svd-derivative.pdf) give formulae for the jvp. I think it’s probably also been implemented in Julia somewhere. It’s been a while since I looked at this but just to warn anyone implementing this I think those formulae might be slightly more general than necessary because they hold for a ‘low rank svd’. This means that one or both of the terms at the end of 16 and at the end of 18 will be identically zero, and therefore not worth wasting computation on. You can infer which of the terms is zero by looking at the shapes of the inputs.
2019-05-03T00:48:27
google/jax
671
google__jax-671
[ "658" ]
b5d95f8b84a435aba511665b6f2f02e561b71c7a
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2153,9 +2153,7 @@ def _is_slice_none(idx): def _is_advanced_int_indexer(idx): """Returns True if idx should trigger int array indexing, False otherwise.""" # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing - if isinstance(idx, (tuple, list)): - # We assume this check comes *after* the check for non-advanced tuple index, - # and hence we already know at least one element is a sequence if it's a tuple + if isinstance(idx, (tuple, list)) and _any(onp.ndim(elt) != 0 for elt in idx): return _all(e is None or e is Ellipsis or isinstance(e, slice) or _is_int_arraylike(e) for e in idx) else: diff --git a/jax/ops/__init__.py b/jax/ops/__init__.py --- a/jax/ops/__init__.py +++ b/jax/ops/__init__.py @@ -14,4 +14,4 @@ from __future__ import absolute_import -from .scatter import index, index_add, index_update \ No newline at end of file +from .scatter import index, index_add, index_update, segment_sum diff --git a/jax/ops/scatter.py b/jax/ops/scatter.py --- a/jax/ops/scatter.py +++ b/jax/ops/scatter.py @@ -26,6 +26,19 @@ from ..numpy import lax_numpy as np +# TODO(mattjj): clean up this logic +def _is_advanced_int_indexer(idx): + _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer) + try: + abstract_idx = core.get_aval(idx) + except TypeError: + abstract_idx = None + out = not (isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx) or + isinstance(abstract_idx, ShapedArray) and _int(abstract_idx) or + isinstance(idx, slice) or + isinstance(idx, tuple) and all(onp.ndim(elt) == 0 for elt in idx)) + return out and np._is_advanced_int_indexer(idx) + def _scatter_update(x, idx, y, scatter_op): """Helper for indexed updates. @@ -33,11 +46,19 @@ def _scatter_update(x, idx, y, scatter_op): x[idx] op= y except in a pure functional way, with no in-place updating. - Support NumPy-style basic indexing only, i.e., `idx` must be - `None`, an integer, a `slice` object, or ellipses, or a tuple of the above. + Args: + x: ndarray to be updated. + idx: None, an integer, a slice, an ellipsis, an ndarray with integer dtype, + or a tuple of those indicating the locations of `x` into which to scatter- + update the values in `y`. + y: values to be scattered. + scatter_op: callable, either lax.scatter or lax.scatter_add. - TODO(phawkins): support advanced indexing. + Returns: + An ndarray representing an updated `x` after performing the scatter-update. """ + # For more clues on the logic of this implementation, see the code for + # jax.numpy._rewriting_take (which has links to NumPy docs). x = np.asarray(x) y = np.asarray(y) @@ -45,14 +66,45 @@ def _scatter_update(x, idx, y, scatter_op): y_shape = np.shape(y) y = lax.convert_element_type(y, lax.dtype(x)) + # Check if there's advanced indexing going on, and handle differently based on + # whether it is or isn't mixed with basic indexing. + if _is_advanced_int_indexer(idx): + if np._is_advanced_int_indexer_without_slices(idx): + if isinstance(idx, (tuple, list)): + if any(onp.shape(e) for e in idx): + # At least one sequence element in the index list means broadcasting. + idx = np.broadcast_arrays(*idx) + else: + # The index list is a flat list of integers. + idx = [lax.concatenate([lax.reshape(e, (1,)) for e in idx], 0)] + else: + # The indexer is just a single integer array. + idx = [idx] + + stacked_idx = np.concatenate( + [np.mod(np.reshape(a, (-1, 1)), np._constant_like(a, x.shape[i])) + for i, a in enumerate(idx)], axis=1) + + y = np.broadcast_to(y, idx[0].shape + onp.shape(x)[len(idx):]) + y = lax.reshape(y, (stacked_idx.shape[0],) + onp.shape(x)[len(idx):]) + + dnums = lax.ScatterDimensionNumbers( + update_window_dims=tuple(range(1, y.ndim)), + inserted_window_dims=tuple(range(len(idx))), + scatter_dims_to_operand_dims=tuple(range(len(idx)))) + return scatter_op(x, stacked_idx, y, dnums) + elif np._is_advanced_int_indexer(idx): + # TODO(mattjj, phawkins): one of us is going to implement this case someday + msg = "Unimplemented case for indexed update. Open a feature request!" + raise NotImplementedError(msg) + else: + assert False # unreachable + + # At this point there's no advanced indexing going on, so we process each + # element of the index one at a time to build up a scatter. if not isinstance(idx, tuple): idx = (idx,) - # Test for unsupported advanced indexing and report an error. - if any(onp.ndim(elt) != 0 for elt in idx): - raise NotImplementedError("Unimplemented case for indexed update. Advanced " - "indexing is not yet implemented.") - # Remove ellipses and add trailing slice(None)s. idx = np._canonicalize_tuple_index(x, idx) @@ -189,10 +241,11 @@ def index_add(x, idx, y): (e.g., due to concurrency on some hardware platforms). Args: - x: an array. - idx: a Numpy-style basic index, consisting of `None`, integers, `slice` - objects, ellipses, or a tuple of the above. A convenient syntactic sugar - for forming indices is via the :data:`jax.ops.index` object. + x: an array with the values to be updated. + idx: a Numpy-style index, consisting of `None`, integers, `slice` objects, + ellipses, ndarrays with integer dtypes, or a tuple of the above. A + convenient syntactic sugar for forming indices is via the + :data:`jax.ops.index` object. y: the array of updates. `y` must be broadcastable to the shape of the array that would be returned by `x[idx]`. @@ -225,10 +278,11 @@ def index_update(x, idx, y): updates on some hardware platforms). Args: - x: an array. - idx: a Numpy-style basic index, consisting of `None`, integers, `slice` - objects, ellipses, or a tuple of the above. A convenient syntactic sugar - for forming indices is via the :data:`jax.ops.index` object. + x: an array with the values to be updated. + idx: a Numpy-style index, consisting of `None`, integers, `slice` objects, + ellipses, ndarrays with integer dtypes, or a tuple of the above. A + convenient syntactic sugar for forming indices is via the + :data:`jax.ops.index` object. y: the array of updates. `y` must be broadcastable to the shape of the array that would be returned by `x[idx]`. @@ -244,3 +298,32 @@ def index_update(x, idx, y): [1., 1., 1., 6., 6., 6.]], dtype=float32) """ return _scatter_update(x, idx, y, lax.scatter) + +def segment_sum(data, segment_ids, num_segments=None): + """Computes the sum within segments of an array. + + Similar to TensorFlow's segment_sum: + https://www.tensorflow.org/api_docs/python/tf/math/segment_sum + + Args: + data: an array with the values to be summed. + segment_ids: an array with integer dtype that indicates the segments of + `data` (along its leading axis) to be summed. Values can be repeated and + need not be sorted. Values outside of the range [0, num_segments) are + wrapped into that range by applying np.mod. + num_segments: optional, an int with positive value indicating the number of + segments. The default is ``max(segment_ids % data.shape[0]) + 1`` but + since `num_segments` determines the size of the output, a static value + must be provided to use `segment_sum` in a `jit`-compiled function. + + Returns: + An array with shape ``(num_segments,) + data.shape[1:]`` representing the + segment sums. + """ + if num_segments is None: + num_segments = np.max(np.mod(segment_ids, data.shape[0])) + 1 + num_segments = int(num_segments) + + out = np.zeros((num_segments,) + data.shape[1:], dtype=data.dtype) + segment_ids = np.mod(segment_ids, num_segments) + return index_add(out, segment_ids, data)
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -216,6 +216,157 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): # ]), ] +ADVANCED_INDEXING_TESTS = [ + ("One1DIntArrayIndex", + [IndexSpec(shape=(3,), indexer=onp.array([0, 1])), + IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 1])), + IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])), + IndexSpec(shape=(3,), indexer=onp.array([-1, 1])), + IndexSpec(shape=(3,), indexer=onp.array([-2, -1])), + ]), + ("One2DIntArrayIndex", + [IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])), + IndexSpec(shape=(3, 3), indexer=onp.array([[1, 2, 1], + [0, 1, -1]])), + IndexSpec(shape=(3, 4, 5), indexer=onp.array([[0, 2, 0, 1], + [-1, -2, 1, 0]])), + ]), + ("Two1DIntArrayIndicesNoBroadcasting", + [IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]), + onp.array([1, 2])]), + IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2, 0, 1]), + onp.array([-1, 0, -1, 2])]), + ]), + ("Two1DIntArrayIndicesWithBroadcasting", + [IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]), + onp.array([1, 2])]), + IndexSpec(shape=(3, 4, 5), indexer=[onp.array([[0, 2, 0, 1]]), + onp.array([-1, 0, -1, 2])]), + ]), + ("ListOfPythonInts", + [IndexSpec(shape=(3,), indexer=[0, 1, 0]), + IndexSpec(shape=(3, 4, 5), indexer=[0, -1]), + ]), + ("ListOfListsOfPythonInts", + [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]), + IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]), + ]), + ("TupleOfListsOfPythonInts", + [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])), + IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]])), + ]), + ("ListOfPythonIntsAndIntArrays", + [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]), + IndexSpec(shape=(3, 4, 5), indexer=[0, 1, + onp.array([[2, 3, 0, 3]])]), + ]), + ("ListOfListsOfPythonIntsAndIntArrays", + [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]), + IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], + onp.array([[2, 3, 0, 3]])]), + ]), +] + +ADVANCED_INDEXING_TESTS_NO_REPEATS = [ + ("One1DIntArrayIndex", + [IndexSpec(shape=(3,), indexer=onp.array([0, 1])), + IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 0])), + IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 1])), + IndexSpec(shape=(3,), indexer=onp.array([-1, 1])), + IndexSpec(shape=(3,), indexer=onp.array([-2, -1])), + ]), + ("One2DIntArrayIndex", + [IndexSpec(shape=(3,), indexer=onp.array([[0, 1]])), + IndexSpec(shape=(6, 6), indexer=onp.array([[1, 2, 0], + [3, 4, -1]])), + ]), + ("Two1DIntArrayIndicesNoBroadcasting", + [IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]), + onp.array([1, 2])]), + IndexSpec(shape=(4, 5, 6), indexer=[onp.array([0, 2, 1, 3]), + onp.array([-1, 0, -2, 1])]), + ]), + ("Two1DIntArrayIndicesWithBroadcasting", + [IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]), + onp.array([1, 2])]), + IndexSpec(shape=(4, 5, 6), indexer=[onp.array([[0, 2, -1, 1]]), + onp.array([-1, 0, -2, 2])]), + ]), + ("ListOfPythonInts", + [IndexSpec(shape=(3,), indexer=[0, 2, 1]), + IndexSpec(shape=(3, 4, 5), indexer=[0, -1]), + ]), + ("ListOfListsOfPythonInts", + [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]), + IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0]]]), + ]), + ("TupleOfListsOfPythonInts", + [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])), + IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]])), + ]), + ("ListOfPythonIntsAndIntArrays", + [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]), + IndexSpec(shape=(3, 4, 5), indexer=[0, 1, + onp.array([[2, 3, 0]])]), + ]), + ("ListOfListsOfPythonIntsAndIntArrays", + [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]), + IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], + onp.array([[2, 3, 0]])]), + ]), +] + +MIXED_ADVANCED_INDEXING_TESTS = [ + ("SlicesAndOneIntArrayIndex", + [IndexSpec(shape=(2, 3), indexer=(onp.array([0, 1]), slice(1, 2))), + IndexSpec(shape=(2, 3), indexer=(slice(0, 2), + onp.array([0, 2]))), + IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, + onp.array([0, 2]), + slice(None))), + IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, + onp.array([[0, 2], [1, 1]]), + slice(None))), + ]), + ("SlicesAndTwoIntArrayIndices", + [IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, + onp.array([0, 2]), + onp.array([-1, 2]))), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), + Ellipsis, + onp.array([-1, 2]))), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), + onp.array([-1, 2]), + Ellipsis)), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), + onp.array([-1, 2]), + slice(1, 3))), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), + slice(1, 3), + onp.array([-1, 2]))), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]), + slice(None, None, 2), + onp.array([-1, 2, -1]))), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]), + Ellipsis, + onp.array([[1, 0], [1, 0]]))), + ]), + ("NonesAndIntArrayIndices", + [IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2]), + None, + onp.array([-1, 2])]), + IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), + None, + None, + onp.array([-1, 2]))), + IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, + onp.array([0, 2]), + None, + None, + onp.array([-1, 2]))), + ]), +] + class IndexingTest(jtu.JaxTestCase): """Tests for Numpy indexing translation rules.""" @@ -371,56 +522,7 @@ def fun(unpacked_indexer, x): {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} - for name, index_specs in [ - ("One1DIntArrayIndex", - [IndexSpec(shape=(3,), indexer=onp.array([0, 1])), - IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 1])), - IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])), - IndexSpec(shape=(3,), indexer=onp.array([-1, 1])), - IndexSpec(shape=(3,), indexer=onp.array([-2, -1])), - ]), - ("One2DIntArrayIndex", - [IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])), - IndexSpec(shape=(3, 3), indexer=onp.array([[1, 2, 1], - [0, 1, -1]])), - IndexSpec(shape=(3, 4, 5), indexer=onp.array([[0, 2, 0, 1], - [-1, -2, 1, 0]])), - ]), - ("Two1DIntArrayIndicesNoBroadcasting", - [IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]), - onp.array([1, 2])]), - IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2, 0, 1]), - onp.array([-1, 0, -1, 2])]), - ]), - ("Two1DIntArrayIndicesWithBroadcasting", - [IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]), - onp.array([1, 2])]), - IndexSpec(shape=(3, 4, 5), indexer=[onp.array([[0, 2, 0, 1]]), - onp.array([-1, 0, -1, 2])]), - ]), - ("ListOfPythonInts", - [IndexSpec(shape=(3,), indexer=[0, 1, 0]), - IndexSpec(shape=(3, 4, 5), indexer=[0, -1]), - ]), - ("ListOfListsOfPythonInts", - [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]), - IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]), - ]), - ("TupleOfListsOfPythonInts", - [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])), - IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]])), - ]), - ("ListOfPythonIntsAndIntArrays", - [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]), - IndexSpec(shape=(3, 4, 5), indexer=[0, 1, - onp.array([[2, 3, 0, 3]])]), - ]), - ("ListOfListsOfPythonIntsAndIntArrays", - [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]), - IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], - onp.array([[2, 3, 0, 3]])]), - ]), - ] + for name, index_specs in ADVANCED_INDEXING_TESTS for shape, indexer in index_specs for dtype in all_dtypes for rng in [jtu.rand_default()]) @@ -492,56 +594,7 @@ def testAdvancedIntegerIndexingGrads(self, shape, dtype, rng, indexer): {"testcase_name": "{}_inshape={}_indexer={}" .format(name, jtu.format_shape_dtype_string(shape, dtype), indexer), "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer} - for name, index_specs in [ - ("SlicesAndOneIntArrayIndex", - [IndexSpec(shape=(2, 3), indexer=(onp.array([0, 1]), slice(1, 2))), - IndexSpec(shape=(2, 3), indexer=(slice(0, 2), - onp.array([0, 2]))), - IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, - onp.array([0, 2]), - slice(None))), - IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, - onp.array([[0, 2], [1, 1]]), - slice(None))), - ]), - ("SlicesAndTwoIntArrayIndices", - [IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, - onp.array([0, 2]), - onp.array([-1, 2]))), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), - Ellipsis, - onp.array([-1, 2]))), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), - onp.array([-1, 2]), - Ellipsis)), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), - onp.array([-1, 2]), - slice(1, 3))), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), - slice(1, 3), - onp.array([-1, 2]))), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]), - slice(None, None, 2), - onp.array([-1, 2, -1]))), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]), - Ellipsis, - onp.array([[1, 0], [1, 0]]))), - ]), - ("NonesAndIntArrayIndices", - [IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2]), - None, - onp.array([-1, 2])]), - IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]), - None, - None, - onp.array([-1, 2]))), - IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, - onp.array([0, 2]), - None, - None, - onp.array([-1, 2]))), - ]), - ] + for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS for shape, indexer in index_specs for dtype in all_dtypes for rng in [jtu.rand_default()]) @@ -706,6 +759,35 @@ def onp_fn(x, y): self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True) self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list({ + "testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format( + name, jtu.format_shape_dtype_string(shape, dtype), indexer, + jtu.format_shape_dtype_string(update_shape, update_dtype), op.name), + "shape": shape, "dtype": dtype, "rng": rng, "indexer": indexer, + "update_shape": update_shape, "update_dtype": update_dtype, + "op": op + } for name, index_specs in ADVANCED_INDEXING_TESTS_NO_REPEATS + for shape, indexer in index_specs + for op in [UpdateOps.UPDATE, UpdateOps.ADD] + for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes) + for update_shape in _broadcastable_shapes(_update_shape(shape, indexer)) + for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes) + for rng in [jtu.rand_default()])) + def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype, + rng, indexer, op): + args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)] + def onp_fn(x, y): + x = x.copy() + if op == UpdateOps.UPDATE: + x[indexer] = y + else: + x[indexer] += y + return x + + jax_op = ops.index_update if op == UpdateOps.UPDATE else ops.index_add + jax_fn = lambda x, y: jax_op(x, indexer, y) + self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True) + self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list({ "testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format( @@ -729,6 +811,32 @@ def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype, y = rng(update_shape, update_dtype) check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.) + def testSegmentSumBehavior(self): + # testAdvancedIndexing compares against NumPy, and as a result doesn't check + # repeated indices. This test is just a simple manual check, based on + # https://www.tensorflow.org/api_docs/python/tf/math/segment_sum + data = onp.array([5, 1, 7, 2, 3, 4, 1, 3]) + segment_ids = onp.array([0, 0, 0, 1, 2, 2, 3, 3]) + + ans = ops.index_add(onp.zeros(onp.max(segment_ids) + 1), segment_ids, data) + expected = onp.array([13, 2, 7, 4]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testSegmentSum(self): + data = onp.array([5, 1, 7, 2, 3, 4, 1, 3]) + segment_ids = onp.array([0, 0, 0, 1, 2, 2, 3, 3]) + + # test with explicit num_segments + ans = ops.segment_sum(data, segment_ids, num_segments=4) + expected = onp.array([13, 2, 7, 4]) + self.assertAllClose(ans, expected, check_dtypes=False) + + # test without explicit num_segments + ans = ops.segment_sum(data, segment_ids) + expected = onp.array([13, 2, 7, 4]) + self.assertAllClose(ans, expected, check_dtypes=False) + + if __name__ == "__main__": absltest.main()
segment_sum primitives / advanced indexing in jax.ops.index_add Would it be useful to others (aside from me) to have better support of sorted and unsorted segment_sums? https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum https://www.tensorflow.org/api_docs/python/tf/math/segment_sum In numpy one way to do unsorted segment sums is to sort then call np.add.reduceat, but this doesn't seem to be in jax or autograd: ``` >>> import jax.numpy as np np.add >>> np.add <function _one_to_one_binop.<locals>.<lambda> at 0x1430cdbf8> >>> np.add.reduceat Traceback (most recent call last): File "<stdin>", line 1, in <module> AttributeError: 'function' object has no attribute 'reduceat' ```
Does `jax.ops.index_add` do what you need? It seems roughly equivalent to `unsorted_segment_sum`. https://jax.readthedocs.io/en/latest/_autosummary/jax.ops.index_add.html#jax.ops.index_add Ah, I guess it does not *yet* because it does not support advanced indexing. We should fix that. Nice! Would it be hard to support the basic operators +,-,*,/ on top of add? Not sure if there's associativity assumptions here in the underlying implementation or not Edit: Nevermind: pretty sure - and / will be very hard to do correctly now that I think about it a little more for more than a single operation (since JAX says that it applies all the updates) After playing around a little bit with the numpy version of advanced index assignment, I take it the idea is to implement the unsorted/sorted segment sums using something similar to: ``` python ys = np.arange(5) idxs = [0,0,1,0,1] sums = np.zeros(2) # two slots seg_sum = jax.ops.index_add(sums, jax.ops.index[idxs], ys) ``` edit: of course the jax version will work because it actually accumulates properly as opposed to just applying the last element Yup, that's the idea!
2019-05-04T23:27:01
google/jax
675
google__jax-675
[ "672" ]
8173e671ae55eb8d71ab4edef3a36904573e659b
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -707,6 +707,8 @@ def _get_monoid_reducer(monoid_op, x): if (type(aval) is ConcreteArray) and aval.shape == (): if monoid_op is add: return aval.val == 0 and _reduce_sum + if monoid_op is mul: + return aval.val == 1 and _reduce_prod elif monoid_op is max: return aval.val == _get_max_identity(aval.dtype) and _reduce_max elif monoid_op is min: @@ -735,6 +737,9 @@ def _get_min_identity(dtype): def _reduce_sum(operand, axes): return reduce_sum_p.bind(operand, axes=tuple(axes), input_shape=operand.shape) +def _reduce_prod(operand, axes): + return reduce_prod_p.bind(operand, axes=tuple(axes)) + def _reduce_max(operand, axes): return reduce_max_p.bind(operand, axes=tuple(axes)) @@ -3029,6 +3034,54 @@ def _reduce_sum_transpose_rule(cotangent, input_shape, axes): batching.defreducer(reduce_sum_p) + + +def _reduce_prod_shape_rule(operand, axes): + return tuple(onp.delete(operand.shape, axes)) + +def _reduce_prod_translation_rule(c, operand, axes): + dtype = c.GetShape(operand).numpy_dtype() + scalar = xla_bridge.Shape.array_shape(dtype, ()) + return c.Reduce(operand, c.Constant(onp.array(1, dtype)), + xla.primitive_computation(mul_p, scalar, scalar), + axes) + +def _reduce_prod_jvp_rule(tangent, operand, axes): + input_shape = onp.array(operand.shape) + + n = onp.prod(input_shape[list(axes)]) + non_axes = onp.delete(onp.arange(len(input_shape)), axes) + + # Move the reduced axes to the front, and flatten them to 1D. + permutation = axes + tuple(non_axes) + new_shape = (n,) + tuple(input_shape[non_axes]) + operand = reshape(operand, new_shape, permutation) + tangent = reshape(tangent, new_shape, permutation) + + one = _const(operand, 1) + window_dims = [n] + [1] * len(non_axes) + window_strides = [1] * (len(non_axes) + 1) + + # Form the partial products of all elements to the left and right of each + # element. + left_padding = [(n, -1, 0)] + [(0, 0, 0)] * len(non_axes) + right_padding = [(-1, n, 0)] + [(0, 0, 0)] * len(non_axes) + left_products = _reduce_window_prod(pad(operand, one, left_padding), + window_dims, window_strides, + xla_client.PaddingType.VALID) + right_products = _reduce_window_prod(pad(operand, one, right_padding), + window_dims, window_strides, + xla_client.PaddingType.VALID) + + # Multiply partial products with the tangents and sum. + return _reduce_sum(mul(tangent, mul(left_products, right_products)), (0,)) + +reduce_prod_p = standard_primitive(_reduce_prod_shape_rule, _input_dtype, + 'reduce_prod', _reduce_prod_translation_rule) +ad.defjvp(reduce_prod_p, _reduce_prod_jvp_rule) +batching.defreducer(reduce_prod_p) + + def _reduce_chooser_shape_rule(operand, axes): return tuple(onp.delete(operand.shape, axes))
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1881,11 +1881,13 @@ def testTransposeGrad(self, shape, dtype, perm, rng): "dims": dims, "rng": rng} for init_val, op, dtypes in [ (0, lax.add, inexact_dtypes), + (1, lax.mul, inexact_dtypes), (-onp.inf, lax.max, inexact_dtypes), (onp.inf, lax.min, inexact_dtypes), ] for dtype in dtypes for shape, dims in [ + [(3, 4, 5), ()], [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)], [(3, 4, 5), (0, 2)],
Cannot take gradient of np.prod With the latest git version of jax, I get an error trying to `grad` a basic `np.prod` because some underlying `reduce` is not implemented. Perhaps this is a known limitation, but I could not find it in the documentation and it's what currently blocks me. Here is a minimal example: ``` import jax.numpy as np from jax import grad g = grad(lambda x: np.prod(x)) g(np.ones((10,8))) ``` > Traceback (most recent call last): > File "/home/guy/jax/jax/interpreters/ad.py", line 210, in process_primitive > jvp = primitive_jvps[primitive] > KeyError: reduce > ... > NotImplementedError: Forward-mode differentiation rule for 'reduce' not implemented >
This should be easy enough to add. Note that the gradient of `np.sum` is implemented, so one possible workaround in the meantime is to compute `exp(sum(log(x)))`.
2019-05-05T18:38:13
google/jax
682
google__jax-682
[ "244" ]
b226cbc5b51ffd8c390cf6f8657ea82262dffd29
diff --git a/jax/experimental/optimizers.py b/jax/experimental/optimizers.py --- a/jax/experimental/optimizers.py +++ b/jax/experimental/optimizers.py @@ -79,7 +79,8 @@ import jax.numpy as np from jax.util import partial, safe_zip, safe_map, unzip2 from jax import tree_util -from jax.tree_util import tree_flatten, tree_unflatten, register_pytree_node +from jax.tree_util import (tree_map, tree_flatten, tree_unflatten, + register_pytree_node) map = safe_map zip = safe_zip @@ -376,3 +377,17 @@ def make_schedule(scalar_or_schedule): return constant(scalar_or_schedule) else: raise TypeError(type(scalar_or_schedule)) + + +### utilities + +def l2_norm(tree): + """Compute the l2 norm of a pytree of arrays. Useful for weight decay.""" + leaves, _ = tree_flatten(tree) + return np.sqrt(sum(np.vdot(x, x) for x in leaves)) + +def clip_grads(grad_tree, max_norm): + """Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.""" + norm = l2_norm(grad_tree) + normalize = lambda g: np.where(norm < max_norm, g, g * (max_norm / norm)) + return tree_map(normalize, grad_tree)
diff --git a/tests/optimizers_test.py b/tests/optimizers_test.py --- a/tests/optimizers_test.py +++ b/tests/optimizers_test.py @@ -20,6 +20,8 @@ import functools from absl.testing import absltest +import numpy as onp + import jax.numpy as np import jax.test_util as jtu from jax import jit, grad @@ -213,6 +215,26 @@ def get_params(opt_state): opt_state = init_fun(np.zeros(3)) self.assertRaises(TypeError, lambda: update_fun(opt_state)) + def testUtilityNorm(self): + x0 = (np.ones(2), (np.ones(3), np.ones(4))) + norm = optimizers.l2_norm(x0) + expected = onp.sqrt(onp.sum(onp.ones(2+3+4)**2)) + self.assertAllClose(norm, expected, check_dtypes=False) + + def testUtilityClipGrads(self): + g = (np.ones(2), (np.ones(3), np.ones(4))) + norm = optimizers.l2_norm(g) + + ans = optimizers.clip_grads(g, 1.1 * norm) + expected = g + self.assertAllClose(ans, expected, check_dtypes=False) + + ans = optimizers.l2_norm(optimizers.clip_grads(g, 0.9 * norm)) + expected = 0.9 * norm + self.assertAllClose(ans, expected, check_dtypes=False) + + + if __name__ == '__main__': absltest.main()
Adding regularizers to Stax Some utilities for regularizing the cost functions would be useful in Stax. I have not seen this mentioned in #137. Is this planned? Also, I was wondering about the best way to implement this in pure JAX. For example considering L2 regularization, with the flattening utility from #190 I could write: ``` w_flat, _ = ravel_pytree(w) reg = np.sum(w_flat ** 2.0) ``` Looking a little bit on the code of tree_util I can also write: ``` reg= tree_reduce(lambda x, y: np.sum(x) + np.sum(y), tree_map(lambda wi: np.sum(wi**2.0), w)) ``` Is there a simpler solution I am missing? More in general it would be useful to document tree utilities in the main doc.
Thanks for suggesting this! The `tree_reduce` version is better because otherwise all the arrays have to be concatenated together. It's possible that XLA could optimize away that explicit concatenation when under an `@jit`, but I'm not sure it will. Here's another way to write it which avoids traversing the tree twice: ```python from jax.tree_util import tree_flatten import jax.numpy as np def l2_squared(pytree): leaves, _ = tree_flatten(pytree) return sum(np.vdot(x, x) for x in leaves) ``` We could add this to Stax, or perhaps to an MNIST example file. We'd like to avoid making Stax into a monolith; part of the point is to show how easy it is to write neural net "layers" libraries, so you can write one yourself with whatever features you want. A notebook with some "advanced" use cases would be terrific! Especially on the parts of the library that are less documented at the moment.
2019-05-06T23:10:45
google/jax
685
google__jax-685
[ "683" ]
535773fe89f56b30bf94ab79e36d39ee07b5a8f7
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -850,12 +850,6 @@ def fix(x, out=None): zero = lax._const(x, 0) return where(lax.ge(x, zero), lax.floor(x), lax.ceil(x)) - -# Caution: If fast math mode is enabled, the semantics of inf and nan are not -# preserved by XLA/LLVM, and the behavior of inf/nan values is unpredictable. -# To disable fast math mode on CPU, set the environment variable -# XLA_FLAGS=--xla_cpu_enable_fast_math=false. - @_wraps(onp.isfinite) def isfinite(x): dtype = _dtype(x) @@ -870,17 +864,19 @@ def isfinite(x): def isinf(x): dtype = _dtype(x) if issubdtype(dtype, floating): - return lax.eq(lax.abs(x), inf) + return lax.eq(lax.abs(x), _constant_like(x, inf)) elif issubdtype(dtype, complexfloating): - return lax.bitwise_or(lax.eq(lax.abs(real(x)), inf), - lax.eq(lax.abs(imag(x)), inf)) + re = lax.real(x) + im = lax.imag(x) + return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)), + lax.eq(lax.abs(im), _constant_like(im, inf))) else: return full_like(x, False, dtype=bool_) def _isposneginf(infinity, x): dtype = _dtype(x) if issubdtype(dtype, floating): - return lax.eq(x, infinity) + return lax.eq(x, _constant_like(x, infinity)) elif issubdtype(dtype, complexfloating): raise ValueError("isposinf/isneginf are not well defined for complex types") else: @@ -897,9 +893,10 @@ def isnan(x): @_wraps(onp.nan_to_num) def nan_to_num(x, copy=True): del copy - if iscomplexobj(x): - raise ValueError("nan_to_num is not well defined for complex types") - info = finfo(xla_bridge.canonicalize_dtype(_dtype(x))) + dtype = _dtype(x) + if issubdtype(dtype, complexfloating): + return lax.complex(nan_to_num(lax.real(x)), nan_to_num(lax.imag(x))) + info = finfo(xla_bridge.canonicalize_dtype(dtype)) x = where(isnan(x), _constant_like(x, 0), x) x = where(isposinf(x), _constant_like(x, info.max), x) x = where(isneginf(x), _constant_like(x, info.min), x)
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -322,7 +322,6 @@ def post(x): return partial(_rand_dtype, randn, scale=100., post=post) -# TODO(mattjj): doesn't handle complex types def rand_some_inf(): """Return a random sampler that produces infinities in floating types.""" rng = npr.RandomState(1) @@ -334,6 +333,10 @@ def rand(shape, dtype): # only float types have inf return base_rand(shape, dtype) + if onp.issubdtype(dtype, onp.complexfloating): + base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype + return rand(shape, base_dtype) + 1j * rand(shape, base_dtype) + dims = _dims_of_shape(shape) posinf_flips = rng.rand(*dims) < 0.1 neginf_flips = rng.rand(*dims) < 0.1 @@ -346,6 +349,34 @@ def rand(shape, dtype): return rand +def rand_some_inf_and_nan(): + """Return a random sampler that produces infinities in floating types.""" + rng = npr.RandomState(1) + base_rand = rand_default() + + def rand(shape, dtype): + """The random sampler function.""" + if not onp.issubdtype(dtype, onp.floating): + # only float types have inf + return base_rand(shape, dtype) + + if onp.issubdtype(dtype, onp.complexfloating): + base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype + return rand(shape, base_dtype) + 1j * rand(shape, base_dtype) + + dims = _dims_of_shape(shape) + posinf_flips = rng.rand(*dims) < 0.1 + neginf_flips = rng.rand(*dims) < 0.1 + nan_flips = rng.rand(*dims) < 0.1 + + vals = base_rand(shape, dtype) + vals = onp.where(posinf_flips, onp.inf, vals) + vals = onp.where(neginf_flips, -onp.inf, vals) + vals = onp.where(nan_flips, onp.nan, vals) + + return _cast_to_shape(onp.asarray(vals, dtype=dtype), shape, dtype) + + return rand # TODO(mattjj): doesn't handle complex types def rand_some_zero(): diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -33,6 +33,7 @@ from jax import lax from jax import numpy as lnp from jax import test_util as jtu +from jax.lib import xla_bridge from jax.config import config config.parse_flags_with_absl() @@ -82,7 +83,6 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None, op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default(), []), op_record("greater", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []), op_record("greater_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []), - op_record("isfinite", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []), op_record("less", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []), op_record("less_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal(), []), op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), @@ -137,6 +137,11 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None, op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default(), []), op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []), op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []), + op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []), + op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []), + op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []), + op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []), + op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan(), []), op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []), op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []), op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive(), ["rev"]), @@ -1355,6 +1360,19 @@ def testVander(self, shape, dtype, n, increasing, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape], + [dtype]), + "rng": jtu.rand_some_inf_and_nan(), "shape": shape, "dtype": dtype} + for shape in all_shapes + for dtype in inexact_dtypes)) + def testNanToNum(self, rng, shape, dtype): + dtype = onp.dtype(xla_bridge.canonicalize_dtype(dtype)).type + args_maker = lambda: [rng(shape, dtype)] + self._CheckAgainstNumpy(onp.nan_to_num, lnp.nan_to_num, args_maker, + check_dtypes=True) + self._CompileAndCheck(lnp.nan_to_num, args_maker, check_dtypes=True) + def testIssue330(self): x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash self.assertEqual(x[0, 0], 1)
Bug in np.nan_to_num in 64-bit mode when x is float32. Here is a repro: ` np.nan_to_num(np.array([1, 2, 3], dtype=np.float32)) ` which gives the stack trace: google3/third_party/py/jax/numpy/lax_numpy.py in nan_to_num(***failed resolving arguments***) 893 raise ValueError("nan_to_num is not well defined for complex types") 894 info = finfo(xla_bridge.canonicalize_dtype(_dtype(x))) --> 895 x = where(isnan(x), _constant_like(x, 0), x) 896 x = where(isposinf(x), _constant_like(x, info.max), x) 897 x = where(isneginf(x), _constant_like(x, info.min), x) google3/third_party/py/jax/numpy/lax_numpy.py in isnan(x) 885 def isnan(x): 886 return lax.bitwise_and(lax.bitwise_not(isfinite(x)), --> 887 lax.bitwise_not(isinf(x))) 888 889 @_wraps(onp.nan_to_num) google3/third_party/py/jax/numpy/lax_numpy.py in isinf(x) 863 dtype = _dtype(x) 864 if issubdtype(dtype, floating): --> 865 return lax.eq(lax.abs(x), inf) 866 elif issubdtype(dtype, complexfloating): 867 return lax.bitwise_or(lax.eq(lax.abs(real(x)), inf), google3/third_party/py/jax/lax/lax.py in eq(x, y) 270 def eq(x, y): 271 r"""Elementwise equals: :math:`x = y`.""" --> 272 return eq_p.bind(x, y) 273 274 def ne(x, y): google3/third_party/py/jax/core.py in bind(self, *args, **kwargs) 73 top_trace = find_top_trace(args) 74 if top_trace is None: ---> 75 return self.impl(*args, **kwargs) 76 77 tracers = map(top_trace.full_raise, args) google3/third_party/py/jax/interpreters/xla.py in apply_primitive(prim, *args, **kwargs) 49 def apply_primitive(prim, *args, **kwargs): 50 abstract_args = map(abstractify, args) ---> 51 compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs) 52 return compiled_fun(*args) 53 google3/third_party/py/jax/util.py in memoized_fun(*args, **kwargs) 172 cache.popitem(last=False) 173 --> 174 ans = cache[key] = fun(*args, **kwargs) 175 return ans 176 return memoized_fun google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **kwargs) 59 handle_result = result_handler(result_shape) 60 compiled = built_c.Compile(shapes, xb.get_compile_options(), ---> 61 backend=xb.get_backend()) 62 return partial(execute_compiled_primitive, compiled, handle_result) 63 google3/third_party/tensorflow/compiler/xla/python/xla_client.py in Compile(self, argument_shapes, compile_options, backend) 695 if argument_shapes: 696 compile_options.argument_layouts = argument_shapes --> 697 c = backend.compile(self.computation, compile_options) 698 return Executable(c, backend=backend) 699 google3/third_party/tensorflow/compiler/xla/python/xla_client.py in compile(self, c_computation, compile_options) 147 options.debug_options.xla_cpu_fast_math_honor_nans = True 148 return _xla.LocalExecutable.Compile(c_computation, argument_layouts, --> 149 options, self.client) 150 151 def delete_executable(self, executable): RuntimeError: Internal: Seen floating point types of different precisions in %compare.4 = pred[3]{0} compare(f32[3]{0} %parameter.1, f64[3]{0} %broadcast.3), direction=EQ, but mixed precision is disallowed.
2019-05-07T19:08:44
google/jax
690
google__jax-690
[ "696" ]
91d9a064547870ad3a8deb0060bbf3f1b479b13c
diff --git a/jax/lax/lax_parallel.py b/jax/lax/lax_parallel.py --- a/jax/lax/lax_parallel.py +++ b/jax/lax/lax_parallel.py @@ -20,8 +20,10 @@ from jax.core import Primitive from jax.interpreters import ad from jax.interpreters import parallel +from jax.interpreters import xla from jax.interpreters import pxla -from jax.util import partial +from jax.util import partial, unzip2 +from jax.lib import xla_bridge ### parallel traceables @@ -32,6 +34,12 @@ def psum(x, axis_name): def pmax(x, axis_name): return pmax_p.bind(x, axis_name=axis_name) +def pmin(x, axis_name): + return pmin_p.bind(x, axis_name=axis_name) + +def ppermute(x, axis_name, perm): + return ppermute_p.bind(x, axis_name=axis_name, perm=perm) + def pswapaxes(x, axis_name, axis): """Analogue to `np.swapaxes` involving a hidden axis. @@ -72,43 +80,75 @@ def _unbound_name_error(primitive_name, *args, **kwargs): def PmapPrimitive(name): prim = Primitive(name) prim.def_impl(partial(_unbound_name_error, name)) - prim.def_abstract_eval(lambda x, *args, **kwargs: x) + prim.def_abstract_eval(lambda x, *args, **params: x) return prim -def _psum_serial_pmap_rule(vals, axes): +def _allreduce_serial_pmap_rule(reducer, vals, axes): val, = vals axis, = axes - return lax._reduce_sum(val, [axis]), None + return reducer(val, [axis]), None -def _psum_transpose_rule(t, axis_name): - return [t] +def _allreduce_translation_rule(prim, c, val, device_groups): + dtype = c.GetShape(val).numpy_dtype() + scalar = xla_bridge.Shape.array_shape(dtype, ()) + computation = xla.primitive_computation(prim, scalar, scalar) + return c.AllReduce(val, computation, replica_groups=device_groups) -def _psum_parallel_translation_rule(c, val, device_groups): - if len(device_groups) > 1: - return c.CrossReplicaSum(val, device_groups) - else: - return c.CrossReplicaSum(val) psum_p = PmapPrimitive('psum') -psum_p.def_impl(partial(_unbound_name_error, 'psum')) -psum_p.def_abstract_eval(lambda x, *args, **kwargs: x) -parallel.serial_pmap_primitive_rules[psum_p] = _psum_serial_pmap_rule -pxla.parallel_translation_rules[psum_p] = _psum_parallel_translation_rule -ad.deflinear(psum_p, _psum_transpose_rule) parallel.defreducer(lax.reduce_sum_p, psum_p) +parallel.serial_pmap_primitive_rules[psum_p] = \ + partial(_allreduce_serial_pmap_rule, lax._reduce_sum) +# TODO(mattjj): replace translation rule when we update jaxlib +# pxla.parallel_translation_rules[psum_p] = \ +# partial(_allreduce_translation_rule, lax.add_p) +pxla.parallel_translation_rules[psum_p] = \ + lambda c, val, device_groups: c.CrossReplicaSum(val, device_groups) +ad.deflinear(psum_p, lambda t, axis_name: [t]) -def _pmax_serial_pmap_rule(vals, axes): - val, = vals - axis, = axes - return lax._reduce_max(val, [axis]), None - pmax_p = PmapPrimitive('pmax') -pmax_p.def_impl(partial(_unbound_name_error, 'pmax')) -pmax_p.def_abstract_eval(lambda x, *args, **kwargs: x) -parallel.serial_pmap_primitive_rules[pmax_p] = _pmax_serial_pmap_rule parallel.defreducer(lax.reduce_max_p, pmax_p) +parallel.serial_pmap_primitive_rules[pmax_p] = \ + partial(_allreduce_serial_pmap_rule, lax._reduce_max) +pxla.parallel_translation_rules[pmax_p] = \ + partial(_allreduce_translation_rule, lax.max_p) + + +pmin_p = PmapPrimitive('pmin') +parallel.defreducer(lax.reduce_min_p, pmin_p) +parallel.serial_pmap_primitive_rules[pmin_p] = \ + partial(_allreduce_serial_pmap_rule, lax._reduce_min) +pxla.parallel_translation_rules[pmin_p] = \ + partial(_allreduce_translation_rule, lax.min_p) + + +def _ppermute_translation_rule(c, x, device_groups, perm): + group_size = len(device_groups[0]) + if not all(0 <= i < group_size and 0 <= j < group_size for i, j in perm): + msg = ("ppermute permutation elements must take on values between 0 and " + "the group size {}, but got {}.") + raise ValueError(msg.format(group_size, perm)) + sources, dests = unzip2(perm) + if not (len(sources) == len(set(sources)) and len(dests) == len(set(dests))): + msg = "ppermute sources and destinations must be unique, got {}." + raise ValueError(msg.format(perm)) + + full_perm = [] + for grp in device_groups: + grp = list(sorted(grp)) + full_perm.extend((grp[src], grp[dst]) for src, dst in perm) + return c.CollectivePermute(x, full_perm) + +def _ppermute_transpose_rule(t, perm, axis_name): + sources, dests = unzip2(perm) + inverse_perm = zip(dests, srcs) + return ppermute(t, axis_name=axis_name, perm=inverse_perm) + +ppermute_p = PmapPrimitive('ppermute') +# ad.deflinear(ppermute_p, _ppermute_transpose_rule) # TODO(mattjj): test this +pxla.parallel_translation_rules[ppermute_p] = _ppermute_translation_rule def _pswapaxes_serial_pmap_rule(vals, axes, axis): diff --git a/jax/lib/xla_bridge.py b/jax/lib/xla_bridge.py --- a/jax/lib/xla_bridge.py +++ b/jax/lib/xla_bridge.py @@ -65,7 +65,7 @@ def _check_jaxlib_version(): FLAGS = flags.FLAGS flags.DEFINE_bool('jax_enable_x64', - strtobool(os.getenv('JAX_ENABLE_X64', "False")), + strtobool(os.getenv('JAX_ENABLE_X64', 'False')), 'Enable 64-bit types to be used.') flags.DEFINE_string( 'jax_xla_backend', 'xla', @@ -74,12 +74,11 @@ def _check_jaxlib_version(): 'jax_backend_target', 'local', 'Either "local" or "rpc:address" to connect to a remote service target.') flags.DEFINE_string( - 'jax_platform_name', '', - 'Platform name for XLA. The default is to attempt to use a ' - 'GPU if available, but fall back to CPU otherwise. To set ' - 'the platform manually, pass "cpu" for CPU or "gpu" for ' - 'GPU.') - + 'jax_platform_name', + os.getenv('JAX_PLATFORM_NAME', ''), + 'Platform name for XLA. The default is to attempt to use a GPU if ' + 'available, but fall back to CPU otherwise. To set the platform manually, ' + 'pass "cpu" for CPU or "gpu" for GPU.') def get_compile_options(num_replicas=None):
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -20,6 +20,7 @@ import re import itertools as it import os +from unittest import SkipTest from absl.testing import absltest from absl.testing import parameterized @@ -33,15 +34,15 @@ from .config import flags from .util import partial from .tree_util import tree_multimap, tree_all, tree_map, tree_reduce +from .lib import xla_bridge # lbr tests placeholder FLAGS = flags.FLAGS flags.DEFINE_enum( - 'jax_test_dut', - 'cpu', - enum_values=['cpu', 'gpu', 'tpu'], + 'jax_test_dut', '', + enum_values=['', 'cpu', 'gpu', 'tpu'], help= 'Describes the device under test in case special consideration is required.' ) @@ -173,11 +174,11 @@ def skip_on_devices(*disabled_devices): def skip(test_method): @functools.wraps(test_method) def test_method_wrapper(self, *args, **kwargs): - device = FLAGS.jax_test_dut + device = FLAGS.jax_test_dut or xla_bridge.get_backend().platform if device in disabled_devices: test_name = getattr(test_method, '__name__', '[unknown test]') - return absltest.unittest.skip( - '{} not supported on {}.'.format(test_name, device.upper())) + raise SkipTest('{} not supported on {}.' + .format(test_name, device.upper())) return test_method(self, *args, **kwargs) return test_method_wrapper return skip @@ -191,9 +192,8 @@ def test_method_wrapper(self, *args, **kwargs): flag_value = getattr(FLAGS, flag_name) if flag_value == skip_value: test_name = getattr(test_method, '__name__', '[unknown test]') - return absltest.unittest.skip( - '{} not supported when FLAGS.{} is {}'.format( - test_name, flag_name, flag_value)) + raise SkipTest('{} not supported when FLAGS.{} is {}' + .format(test_name, flag_name, flag_value)) return test_method(self, *args, **kwargs) return test_method_wrapper return skip diff --git a/tests/pmap_test.py b/tests/pmap_test.py --- a/tests/pmap_test.py +++ b/tests/pmap_test.py @@ -280,6 +280,110 @@ def testShardedDeviceTuple(self): w = jit(lambda x: list(x)[0])(y) self.assertAllClose(w, x, check_dtypes=False) + @jtu.skip_on_devices("cpu", "gpu") + def testCollectivePermute(self): + device_count = xla_bridge.device_count() + rotation = [(i, i + 1 % device_count) for i in range(device_count)] + f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i') + f = pmap(f, 'i') + + x = np.arange(4 * device_count).reshape((device_count, 4)) + ans = f(x) + expected = onp.roll(x, shift=1, axis=0) + self.assertAllClose(ans, expected, check_dtypes=False) + + @jtu.skip_on_devices("cpu", "gpu") + def testRule30(self): + # This is a test of collective_permute implementing a simple halo exchange + # to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30 + # Halo exchange should be useful in spatially-sharded convolutions and in + # other simulations. + device_count = xla_bridge.device_count() + + def send_right(x, axis_name): + left_perm = [(i, (i + 1) % device_count) for i in range(device_count)] + return lax.ppermute(x, perm=left_perm, axis_name=axis_name) + + def send_left(x, axis_name): + left_perm = [((i + 1) % device_count, i) for i in range(device_count)] + return lax.ppermute(x, perm=left_perm, axis_name=axis_name) + + def update_board(board): + left = board[:-2] + right = board[2:] + center = board[1:-1] + return lax.bitwise_xor(left, lax.bitwise_or(center, right)) + + @partial(pmap, axis_name='i') + def step(board_slice): + left, right = board_slice[:1], board_slice[-1:] + right, left = send_left(left, 'i'), send_right(right, 'i') + enlarged_board_slice = np.concatenate([left, board_slice, right]) + return update_board(enlarged_board_slice) + + board = onp.zeros(40, dtype=bool) + board[board.shape[0] // 2] = True + reshaped_board = board.reshape((device_count, -1)) + + boards = [] + def print_board(board): + boards.append(''.join('*' if x else ' ' for x in board.ravel())) + + print_board(reshaped_board) + for _ in range(20): + reshaped_board = step(reshaped_board) + print_board(reshaped_board) + + ans = '\n'.join(boards) + expected = '\n'.join(( + ' * ', + ' *** ', + ' ** * ', + ' ** **** ', + ' ** * * ', + ' ** **** *** ', + ' ** * * * ', + ' ** **** ****** ', + ' ** * *** * ', + ' ** **** ** * *** ', + ' ** * * **** ** * ', + ' ** **** ** * * **** ', + ' ** * *** ** ** * * ', + ' ** **** ** *** *** ** *** ', + ' ** * * *** * *** * * ', + ' ** **** ** * * ***** ******* ', + ' ** * *** **** * *** * ', + ' ** **** ** *** ** ** * *** ', + ' ** * * *** * ** *** **** ** * ', + ' ** **** ** * ****** * * *** ****', + ' * * *** **** **** *** ** * ', + )) + + print(ans) + self.assertEqual(ans, expected) + + @jtu.skip_on_devices("cpu", "gpu") + def testReduceMax(self): + f = pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i') + + shape = (xla_bridge.device_count(), 4) + x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) + expected = x - onp.max(x, 0) + + ans = f(x) + self.assertAllClose(ans, expected, check_dtypes=False) + + @jtu.skip_on_devices("cpu", "gpu") + def testReduceMin(self): + f = pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i') + + shape = (xla_bridge.device_count(), 4) + x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) + expected = x - onp.min(x, 0) + + ans = f(x) + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == '__main__': absltest.main()
Run tests with CPU mode when on CPU+GPU version I have built the package with ``` python build/build.py --enable_march_native --enable_mkl_dnn --enable_cuda pip install -e build pip install -e . ``` I think this should build a CPU + GPU version. When I tried to run `pytest -n 2 tests examples -W ignore`, some of the tests failed. It appeared to me that some of the functions are only support in CPU mode. I also tried `JAX_ENABLE_X64=1 JAX_NUM_GENERATED_CASES=100 pytest -n auto tests` and `JAX_ENABLE_X64=0 JAX_NUM_GENERATED_CASES=100 pytest -n auto tests`, but neither of them worked.
2019-05-09T22:48:32
google/jax
692
google__jax-692
[ "691" ]
91d9a064547870ad3a8deb0060bbf3f1b479b13c
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -46,7 +46,7 @@ tree_map, tree_flatten, tree_unflatten, tree_structure, tree_transpose, leaf) from .util import (unzip2, unzip3, curry, partial, safe_map, safe_zip, - WrapHashably, prod) + WrapHashably, Hashable, prod) from .lib.xla_bridge import canonicalize_dtype, device_count from .abstract_arrays import ShapedArray from .interpreters import partial_eval as pe @@ -824,11 +824,19 @@ def _argnums_partial(f, dyn_argnums, args): dyn_argnums = (dyn_argnums,) else: dyn_argnums = tuple(dyn_argnums) - fixed_args = tuple([None if i in dyn_argnums else WrapHashably(arg) + fixed_args = tuple([None if i in dyn_argnums else _wrap_hashably(arg) for i, arg in enumerate(args)]) dyn_args = tuple(args[i] for i in dyn_argnums) return _argnums_partial_(f, dyn_argnums, fixed_args), dyn_args +def _wrap_hashably(arg): + try: + hash(arg) + except TypeError: + return WrapHashably(arg) + else: + return Hashable(arg) + @lu.transformation def _argnums_partial_(dyn_argnums, fixed_args, *dyn_args, **kwargs): args = [None if arg is None else arg.val for arg in fixed_args] diff --git a/jax/util.py b/jax/util.py --- a/jax/util.py +++ b/jax/util.py @@ -189,6 +189,8 @@ def prod(xs): class WrapHashably(object): + __slots__ = ["val"] + def __init__(self, val): self.val = val @@ -198,6 +200,18 @@ def __hash__(self): def __eq__(self, other): return self.val is other.val +class Hashable(object): + __slots__ = ["val"] + + def __init__(self, val): + self.val = val + + def __hash__(self): + return hash(self.val) + + def __eq__(self, other): + return self.val == other.val + def get_module_functions(module):
Regression in performance of samplers With jax versions > 0.1.24, random samplers are slow. I think that it is due to recent changes on how jit works with static_args/kwargs. A script to reproduce, ``` import time from jax import random t = time.time() random.normal(random.PRNGKey(0), shape=(1,)) print(time.time() - t) t = time.time() random.normal(random.PRNGKey(0), shape=(1,)) print(time.time() - t) ``` which returns `0.12923526763916016` and `0.12221717834472656`. However, if we wrap these samplers in some function, then it is fast. For example, ``` def f(): return random.normal(random.PRNGKey(0), shape=(1,)) t = time.time() f() print(time.time() - t) t = time.time() f() print(time.time() - t) ``` will return `0.12787413597106934` and `0.0010831356048583984`. I think that there is a small bug elsewhere which forces the sampler recompile. If this is an expected behaviour, then which function we should use to wrap these samplers to make it not recompiled? cc @neerajprad
Thanks so much for catching this! We will fix it asap. (Longer term we aim to set up a continuous benchmarking solution to avoid regressions.)
2019-05-10T03:01:20
google/jax
699
google__jax-699
[ "693" ]
620f63df1b665cbd6fe08b7034f06cedb515b27d
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -143,14 +143,8 @@ def eigh_abstract_eval(operand, lower): return core.AbstractTuple((v, w)) def eigh_cpu_translation_rule(c, operand, lower): - shape = c.GetShape(operand) - dtype = shape.element_type().type - if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types: - out = lapack.jax_syevd(c, operand, lower=lower) - return c.Tuple(c.GetTupleElement(out, 0), c.GetTupleElement(out, 1)) - else: - raise NotImplementedError( - "Only unbatched eigendecomposition is implemented on CPU") + out = lapack.jax_syevd(c, operand, lower=lower) + return c.Tuple(c.GetTupleElement(out, 0), c.GetTupleElement(out, 1)) def eigh_jvp_rule(primals, tangents, lower): # Derivative for eigh in the simplest case of distinct eigenvalues. @@ -176,12 +170,19 @@ def eigh_jvp_rule(primals, tangents, lower): dw = np.diagonal(vdag_adot_v) return core.pack((v, w)), core.pack((dv, dw)) +def eigh_batching_rule(batched_args, batch_dims, lower): + x, = batched_args + bd, = batch_dims + x = batching.bdim_at_front(x, bd) + return eigh_p.bind(x, lower=lower), 0 + eigh_p = Primitive('eigh') eigh_p.def_impl(eigh_impl) eigh_p.def_abstract_eval(eigh_abstract_eval) xla.translations[eigh_p] = eigh_translation_rule ad.primitive_jvps[eigh_p] = eigh_jvp_rule xla.backend_specific_translations['cpu'][eigh_p] = eigh_cpu_translation_rule +batching.primitive_batchers[eigh_p] = eigh_batching_rule @@ -430,7 +431,6 @@ def qr_batching_rule(batched_args, batch_dims, full_matrices): x, = batched_args bd, = batch_dims x = batching.bdim_at_front(x, bd) - q, r = qr(x, full_matrices=full_matrices) return qr_p.bind(x, full_matrices=full_matrices), 0 qr_p = Primitive('qr')
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -204,6 +204,23 @@ def testEighGradVectorComplex(self, shape, dtype, rng, lower, eps): onp.linalg.norm(onp.abs(new_w*(v+dv)), axis=0) ) < RTOL + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype, "rng": rng} + for shape in [(1, 1), (4, 4), (5, 5)] + for dtype in float_types() + complex_types() + for rng in [jtu.rand_default()])) + @jtu.skip_on_devices("gpu", "tpu") + def testEighBatching(self, shape, dtype, rng): + self.skipTest("Test disabled until Jaxlib 0.1.15 is released") # TODO(phawkins) + shape = (10,) + shape + args = rng(shape, dtype) + args = (args + onp.conj(T(args))) / 2 + ws, vs = vmap(jsp.linalg.eigh)(args) + self.assertTrue(onp.all(onp.linalg.norm( + onp.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3)) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_ord={}_axis={}_keepdims={}".format( jtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims), @@ -444,7 +461,6 @@ def testLuGrad(self, shape, dtype, rng): @jtu.skip_on_devices("gpu", "tpu") def testLuBatching(self): - self.skipTest("Test disabled until Jaxlib 0.1.14 is released") shape = (4, 5) dtype = np.float32 rng = jtu.rand_default()
add batching (vmap) rule for eigh
2019-05-10T19:17:04
google/jax
703
google__jax-703
[ "702" ]
78c804772e47f98721acd31a7e0953504aa504a8
diff --git a/build/build.py b/build/build.py --- a/build/build.py +++ b/build/build.py @@ -60,19 +60,19 @@ def get_python_bin_path(python_bin_path_flag): # Bazel -BAZEL_BASE_URI = "https://github.com/bazelbuild/bazel/releases/download/0.25.1/" +BAZEL_BASE_URI = "https://github.com/bazelbuild/bazel/releases/download/0.24.1/" BazelPackage = collections.namedtuple("BazelPackage", ["file", "sha256"]) bazel_packages = { "Linux": BazelPackage( - file="bazel-0.25.1-linux-x86_64", + file="bazel-0.24.1-linux-x86_64", sha256= - "9fe5a74fa319e771b0328b42f79bf00496592fd9c0989247b4dd322ce9a082e9"), + "e18e2877e18a447eb5d94f5efbec375366d82af6443c6a83a93c62657a7b1c32"), "Darwin": BazelPackage( - file="bazel-0.25.1-darwin-x86_64", + file="bazel-0.24.1-darwin-x86_64", sha256= - "436e34cf8cf47f43620a70927e3fcdb1f23659e3e0ae22e42ff8b6d8b7626cfa"), + "cf763752550050d117e03659aaa6ccd6f97da1f983a6029300a497fdaeaaec46"), } @@ -140,8 +140,8 @@ def get_bazel_path(bazel_path_flag): sys.exit(-1) -def check_bazel_version(bazel_path, min_version): - """Checks Bazel's version is at least `min_version`.""" +def check_bazel_version(bazel_path, min_version, max_version): + """Checks Bazel's version is in the range [`min_version`, `max_version`).""" version_output = shell([bazel_path, "--bazelrc=/dev/null", "version"]) match = re.search("Build label: *([0-9\\.]+)[^0-9\\.]", version_output) if match is None: @@ -155,6 +155,12 @@ def check_bazel_version(bazel_path, min_version): print("Outdated bazel revision (>= {} required, found {})".format( min_version, version)) sys.exit(0) + if max_version is not None: + max_ints = [int(x) for x in max_version.split(".")] + if actual_ints >= max_ints: + print("Please downgrade your bazel revision to build JAX (>= {} and < {}" + " required, found {})".format(min_version, max_version, version)) + sys.exit(0) BAZELRC_TEMPLATE = """ @@ -283,7 +289,7 @@ def main(): # Find a working Bazel. bazel_path = get_bazel_path(args.bazel_path) - check_bazel_version(bazel_path, "0.25.0") + check_bazel_version(bazel_path, min_version="0.24.0", max_version="0.25.0") print("Bazel binary path: {}".format(bazel_path)) python_bin_path = get_python_bin_path(args.python_bin_path)
Build on linux, got "@local_config_cuda//crosstool:cc-compiler-windows" error Built jax on master 78c804772e47f98721acd31a7e0953504aa504a8 gives ``` ➜ jax git:(master) python build/build.py --enable_march_native --enable_mkl_dnn --enable_cuda _ _ __ __ | | / \ \ \/ / _ | |/ _ \ \ / | |_| / ___ \/ \ \___/_/ \/_/\_\ Bazel binary path: /home/titan/bin/bazel Python binary path: /home/titan/frameworks/miniconda3/envs/ml/bin/python MKL-DNN enabled: yes -march=native: yes CUDA enabled: yes Building XLA and installing it in the jaxlib source tree... INFO: An error occurred during the fetch of repository 'local_config_python' INFO: Call stack for the definition of repository 'local_config_python': - /home/titan/.cache/bazel/_bazel_titan/0fa25a5a75e40e18875f78640c2a6d59/external/org_tensorflow/tensorflow/workspace.bzl:69:5 - /home/titan/resources/oss/machine-learning/Google/jax/WORKSPACE:41:1 ERROR: /home/titan/.cache/bazel/_bazel_titan/0fa25a5a75e40e18875f78640c2a6d59/external/local_config_cuda/crosstool/BUILD:64:1: in cc_toolchain rule @local_config_cuda//crosstool:cc-compiler-windows: attributes 'cpu' and 'compiler' have been deprecated, please remove them. See https://github.com/bazelbuild/bazel/issues/7075 for details. ERROR: Analysis of target '//build:install_xla_in_source_tree' failed; build aborted: Analysis of target '@local_config_cuda//crosstool:cc-compiler-windows' failed; build aborted INFO: Elapsed time: 0.095s INFO: 0 processes. FAILED: Build did NOT complete successfully (0 packages loaded, 1 target configured) FAILED: Build did NOT complete successfully (0 packages loaded, 1 target configured) Traceback (most recent call last): File "build/build.py", line 324, in <module> main() File "build/build.py", line 320, in main [":install_xla_in_source_tree", os.getcwd()]) File "build/build.py", line 50, in shell output = subprocess.check_output(cmd) File "/home/titan/frameworks/miniconda3/envs/ml/lib/python3.7/subprocess.py", line 395, in check_output **kwargs).stdout File "/home/titan/frameworks/miniconda3/envs/ml/lib/python3.7/subprocess.py", line 487, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['/home/titan/bin/bazel', 'run', '--verbose_failures=true', '--config=opt', '--config=mkl_open_source_only', '--config=cuda', ':install_xla_in_source_tree', '/home/titan/resources/oss/machine-learning/Google/jax/build']' returned non-zero exit status 1. ``` I have successfully built jax before without encounter this error.
If I'm parsing the log correctly, it's picking up your own version of bazel (/home/titan/bin/bazel), rather than one it automatically downloads. What version of bazel is that? This is my bazel version: ``` Build label: 0.25.0 Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar Build time: Wed May 1 21:45:01 2019 (1556747101) Build timestamp: 1556747101 Build timestamp as int: 1556747101 ``` Also try to remove my local bazel to let jax use a download bazel, still an issue: ``` jax git:(master) python build/build.py --enable_march_native --enable_mkl_dnn --enable_cuda _ _ __ __ | | / \ \ \/ / _ | |/ _ \ \ / | |_| / ___ \/ \ \___/_/ \/_/\_\ Downloading bazel from: https://github.com/bazelbuild/bazel/releases/download/0.25.1/bazel-0.25.1-linux-x86_64 bazel-0.25.1-linux-x86_64 [########################################] 100% Extracting Bazel installation... Starting local Bazel server and connecting to it... Bazel binary path: ./bazel-0.25.1-linux-x86_64 Python binary path: /home/titan/frameworks/miniconda3/envs/ml/bin/python MKL-DNN enabled: yes -march=native: yes CUDA enabled: yes Building XLA and installing it in the jaxlib source tree... ERROR: /home/titan/.cache/bazel/_bazel_titan/0fa25a5a75e40e18875f78640c2a6d59/external/local_config_cuda/crosstool/BUILD:34:1: in cc_toolchain rule @local_config_cuda//crosstool:cc-compiler-local: attributes 'cpu' and 'compiler' have been deprecated, please remove them. See https://github.com/bazelbuild/bazel/issues/7075 for details. INFO: An error occurred during the fetch of repository 'local_config_python' INFO: Call stack for the definition of repository 'local_config_python': - /home/titan/.cache/bazel/_bazel_titan/0fa25a5a75e40e18875f78640c2a6d59/external/org_tensorflow/tensorflow/workspace.bzl:69:5 - /home/titan/resources/oss/machine-learning/Google/jax/WORKSPACE:41:1 INFO: An error occurred during the fetch of repository 'cython' INFO: Call stack for the definition of repository 'cython': - /home/titan/.cache/bazel/_bazel_titan/0fa25a5a75e40e18875f78640c2a6d59/external/org_tensorflow/tensorflow/workspace.bzl:727:5 - /home/titan/resources/oss/machine-learning/Google/jax/WORKSPACE:41:1 INFO: Repository 'cython' used the following cache hits instead of downloading the corresponding file. * Hash 'bccc9aa050ea02595b2440188813b936eaf345e85fb9692790cecfe095cf91aa' for http://mirror.tensorflow.org/github.com/cython/cython/archive/0.28.4.tar.gz If the definition of 'cython' was updated, verify that the hashes were also updated. ERROR: Analysis of target '//build:install_xla_in_source_tree' failed; build aborted: Analysis of target '@local_config_cuda//crosstool:cc-compiler-local' failed; build aborted INFO: Elapsed time: 1.291s INFO: 0 processes. FAILED: Build did NOT complete successfully (27 packages loaded, 76 targets configured) FAILED: Build did NOT complete successfully (27 packages loaded, 76 targets configured) currently loading: @org_tensorflow//tensorflow/core ... (5 packages) Traceback (most recent call last): File "build/build.py", line 324, in <module> main() File "build/build.py", line 320, in main [":install_xla_in_source_tree", os.getcwd()]) File "build/build.py", line 50, in shell output = subprocess.check_output(cmd) File "/home/titan/frameworks/miniconda3/envs/ml/lib/python3.6/subprocess.py", line 336, in check_output **kwargs).stdout File "/home/titan/frameworks/miniconda3/envs/ml/lib/python3.6/subprocess.py", line 418, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['./bazel-0.25.1-linux-x86_64', 'run', '--verbose_failures=true', '--config=opt', '--config=mkl_open_source_only', '--config=cuda', ':install_xla_in_source_tree', '/home/titan/resources/oss/machine-learning/Google/jax/build']' returned non-zero exit status 1. ``` I also tried to clean `.cache/bazel`, still the same error as above. Thanks for raising this, and for looking into it so thoroughly! Sure looks like jaxlib's building process is broken. I'm not very familiar with bazel, but [this recent pr by @hawkinsp](https://github.com/google/jax/pull/698/files) might have some clues. Presumably the build worked then... 7f9e1809bbd035b1998268cde68c1b9567c26a21 seems to be the issue. Hard reset to 60918077c285a787c697b694d8d7bb04da56a9e5 works for me.
2019-05-12T13:26:39
google/jax
707
google__jax-707
[ "639" ]
8c8314f7a304f06a1e95fa26a998dada42012a5b
diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -39,6 +39,10 @@ def cholesky(x, symmetrize_input=True): x = symmetrize(x) return np.tril(cholesky_p.bind(x)) +def eig(x): + w, vl, vr = eig_p.bind(x) + return w, vl, vr + def eigh(x, lower=True, symmetrize_input=True): if symmetrize_input: x = symmetrize(x) @@ -117,6 +121,47 @@ def cholesky_cpu_translation_rule(c, operand): xla.backend_specific_translations['cpu'][cholesky_p] = cholesky_cpu_translation_rule +# Asymmetric eigendecomposition + +def eig_impl(operand): + return xla.apply_primitive(eig_p, operand) + +def eig_translation_rule(c, operand): + raise NotImplementedError( + "Nonsymmetric eigendecomposition is only implemented on the CPU backend") + +def eig_abstract_eval(operand): + if isinstance(operand, ShapedArray): + if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]: + raise ValueError("Argument to nonsymmetric eigendecomposition must have " + "shape [..., n, n], got shape {}".format(operand.shape)) + + batch_dims = operand.shape[:-2] + n = operand.shape[-1] + vl = vr = ShapedArray(batch_dims + (n, n), operand.dtype) + w = ShapedArray(batch_dims + (n,), lax.lax._complex_basetype(operand.dtype)) + else: + w = vl = vr = operand + return core.AbstractTuple((w, vl, vr)) + +def eig_cpu_translation_rule(c, operand): + out = lapack.jax_geev(c, operand) + return c.Tuple(c.GetTupleElement(out, 0), c.GetTupleElement(out, 1), + c.GetTupleElement(out, 2)) + +def eig_batching_rule(batched_args, batch_dims): + x, = batched_args + bd, = batch_dims + x = batching.bdim_at_front(x, bd) + return eig_p.bind(x), 0 + +eig_p = Primitive('eig') +eig_p.def_impl(eig_impl) +eig_p.def_abstract_eval(eig_abstract_eval) +xla.translations[eig_p] = eig_translation_rule +xla.backend_specific_translations['cpu'][eig_p] = eig_cpu_translation_rule +batching.primitive_batchers[eig_p] = eig_batching_rule + # Symmetric/Hermitian eigendecomposition @@ -132,7 +177,8 @@ def eigh_abstract_eval(operand, lower): if isinstance(operand, ShapedArray): if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]: raise ValueError( - "Argument to symmetric eigendecomposition must have shape [..., n, n]") + "Argument to symmetric eigendecomposition must have shape [..., n, n]," + "got shape {}".format(operand.shape)) batch_dims = operand.shape[:-2] n = operand.shape[-1] diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -92,6 +92,13 @@ def det(a): return sign * np.exp(logdet) +@_wraps(onp.linalg.eig) +def eig(a): + a = _promote_arg_dtypes(np.asarray(a)) + w, vl, vr = lax_linalg.eig(a) + return w, vr + + @_wraps(onp.linalg.eigh) def eigh(a, UPLO=None, symmetrize_input=True): if UPLO is None or UPLO == "L":
diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -104,6 +104,49 @@ def testSlogdet(self, n, dtype, rng): check_dtypes=True, tol=1e-3) self._CompileAndCheck(np.linalg.slogdet, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}".format( + jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype, "rng": rng} + for shape in [(0, 0), (4, 4), (5, 5), (50, 50), (2, 6, 6)] + for dtype in float_types() + complex_types() + for rng in [jtu.rand_default()])) + # TODO(phawkins): enable when there is an eigendecomposition implementation + # for GPU/TPU. + @jtu.skip_on_devices("gpu", "tpu") + def testEig(self, shape, dtype, rng): + self.skipTest("Test disabled until Jaxlib 0.1.15 is released") # TODO(phawkins) + n = shape[-1] + args_maker = lambda: [rng(shape, dtype)] + + # Norm, adjusted for dimension and type. + def norm(x): + norm = onp.linalg.norm(x, axis=(-2, -1)) + return norm / ((n + 1) * onp.finfo(dtype).eps) + + a, = args_maker() + w, v = np.linalg.eig(a) + self.assertTrue(onp.all(norm(onp.matmul(a, v) - w[..., None, :] * v) < 100)) + + self._CompileAndCheck(partial(np.linalg.eig), args_maker, + check_dtypes=True, rtol=1e-3) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)), + "shape": shape, "dtype": dtype, "rng": rng} + for shape in [(1, 1), (4, 4), (5, 5)] + for dtype in float_types() + complex_types() + for rng in [jtu.rand_default()])) + @jtu.skip_on_devices("gpu", "tpu") + def testEigBatching(self, shape, dtype, rng): + self.skipTest("Test disabled until Jaxlib 0.1.15 is released") # TODO(phawkins) + shape = (10,) + shape + args = rng(shape, dtype) + ws, vs = vmap(np.linalg.eig)(args) + self.assertTrue(onp.all(onp.linalg.norm( + onp.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3)) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_n={}_lower={}".format( jtu.format_shape_dtype_string((n,n), dtype), lower),
Feature request for computing eigenvalues of non-hermitian matrix. I'm basically looking for np.linalg.eig to be implemented in jax or jax.lax. I personally am looking for a speedup in the computation, and am not interested the derivative at this time. Thanks!
2019-05-13T20:01:22
google/jax
720
google__jax-720
[ "505" ]
c54ba8444d51cde3574fdab29970de1850a50c10
diff --git a/jax/lax/__init__.py b/jax/lax/__init__.py --- a/jax/lax/__init__.py +++ b/jax/lax/__init__.py @@ -19,4 +19,5 @@ _reduce_window_min, _reduce_window_prod, _float, _complex, _input_dtype, _const, _eq_meet, _safe_mul, _abstractify) from .lax_control_flow import * +from .lax_fft import * from .lax_parallel import * diff --git a/jax/lax/lax_fft.py b/jax/lax/lax_fft.py new file mode 100644 --- /dev/null +++ b/jax/lax/lax_fft.py @@ -0,0 +1,48 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from jax.abstract_arrays import ShapedArray +from jax.core import Primitive +from jax.interpreters import xla +from ..interpreters import ad + + +def fft(x, fft_type, fft_lengths=None): + if fft_lengths is None: + fft_lengths = x.shape + else: + fft_lengths = tuple(fft_lengths) + return fft_p.bind(x, fft_type=fft_type, fft_lengths=fft_lengths) + +def fft_impl(x, fft_type, fft_lengths): + return xla.apply_primitive(fft_p, x, fft_type=fft_type, fft_lengths=fft_lengths) + +def fft_abstract_eval(x, fft_type, fft_lengths): + return ShapedArray(x.shape, x.dtype) + +def fft_translation_rule(c, x, fft_type, fft_lengths): + return c.Fft(x, fft_type, fft_lengths) + +def fft_transpose_rule(t, fft_type, fft_lengths): + return fft(t, fft_type, fft_lengths), + +fft_p = Primitive('fft') +fft_p.def_impl(fft_impl) +fft_p.def_abstract_eval(fft_abstract_eval) +xla.translations[fft_p] = fft_translation_rule +ad.deflinear(fft_p, fft_transpose_rule) diff --git a/jax/numpy/fft.py b/jax/numpy/fft.py --- a/jax/numpy/fft.py +++ b/jax/numpy/fft.py @@ -18,8 +18,56 @@ import numpy as onp +from .. import lax +from ..lib.xla_bridge import xla_client, canonicalize_dtype from ..util import get_module_functions from .lax_numpy import _not_implemented +from .lax_numpy import _wraps +from . import lax_numpy as np + + +def _promote_to_complex(arg): + dtype = np.result_type(arg, onp.complex64) + # XLA's FFT op only supports C64. + if dtype == onp.complex128: + dtype = onp.complex64 + return lax.convert_element_type(arg, dtype) + +@_wraps(onp.fft.fftn) +def fftn(a, s=None, axes=None, norm=None): + # TODO(skye): implement padding/cropping based on 's'. + if s is not None: + raise NotImplementedError("jax.np.fftn only supports s=None, got %s" % s) + if norm is not None: + raise NotImplementedError("jax.np.fftn only supports norm=None, got %s" % norm) + if s is not None and axes is not None and len(s) != len(axes): + # Same error as numpy. + raise ValueError("Shape and axes have different lengths.") + + orig_axes = axes + if axes is None: + if s is None: + axes = range(a.ndim) + else: + axes = range(a.ndim - len(s), a.ndim) + + # XLA doesn't support 0-rank axes. + if len(axes) == 0: + return a + + if len(axes) != len(set(axes)): + raise ValueError( + "jax.np.fftn does not support repeated axes. Got axes %s." % axes) + + if any(axis in range(a.ndim - 3) for axis in axes): + raise ValueError( + "jax.np.fftn only supports 1D, 2D, and 3D FFTs over the innermost axes." + " Got axes %s with input rank %s." % (orig_axes, a.ndim)) + + if s is None: + s = [a.shape[axis] for axis in axes] + a = _promote_to_complex(a) + return lax.fft(a, xla_client.FftType.FFT, s) for func in get_module_functions(onp.fft): if func.__name__ not in globals():
diff --git a/tests/fft_test.py b/tests/fft_test.py new file mode 100644 --- /dev/null +++ b/tests/fft_test.py @@ -0,0 +1,90 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as onp + +from absl.testing import absltest +from absl.testing import parameterized + +from jax import numpy as np +from jax import test_util as jtu + +float_dtypes = [onp.float32, onp.float64] +complex_dtypes = [onp.complex64, onp.complex128] +inexact_dtypes = float_dtypes + complex_dtypes +int_dtypes = [onp.int32, onp.int64] +bool_dtypes = [onp.bool_] +all_dtypes = float_dtypes + complex_dtypes + int_dtypes + bool_dtypes + + +def _get_fftn_test_axes(shape): + axes = [[]] + ndims = len(shape) + # XLA's FFT op only supports up to 3 innermost dimensions. + if ndims <= 3: axes.append(None) + for naxes in range(1, min(ndims, 3) + 1): + axes.append(range(ndims - naxes, ndims)) + return axes + + +class FftTest(jtu.JaxTestCase): + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_axes={}".format( + jtu.format_shape_dtype_string(shape, dtype), axes), + "axes": axes, "shape": shape, "dtype": dtype, "rng": rng} + for rng in [jtu.rand_default()] + for dtype in all_dtypes + for shape in [(10,), (10, 10), (2, 3, 4), (2, 3, 4, 5)] + for axes in _get_fftn_test_axes(shape))) + def testFftn(self, shape, dtype, axes, rng): + args_maker = lambda: (rng(shape, dtype),) + np_fn = lambda a: np.fft.fftn(a, axes=axes) + onp_fn = lambda a: onp.fft.fftn(a, axes=axes) + self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=True, + tol=1e-4) + self._CompileAndCheck(np_fn, args_maker, check_dtypes=True) + # Test gradient for differentiable types. + if dtype in inexact_dtypes: + # TODO(skye): can we be more precise? + tol = 1e-1 + jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol) + jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol) + + def testFftnErrors(self): + rng = jtu.rand_default() + self.assertRaisesRegexp( + ValueError, + "jax.np.fftn only supports 1D, 2D, and 3D FFTs over the innermost axes. " + "Got axes None with input rank 4.", + lambda: np.fft.fftn(rng([2, 3, 4, 5], dtype=onp.float64), axes=None)) + self.assertRaisesRegexp( + ValueError, + "jax.np.fftn only supports 1D, 2D, and 3D FFTs over the innermost axes. " + "Got axes \[0\] with input rank 4.", + lambda: np.fft.fftn(rng([2, 3, 4, 5], dtype=onp.float64), axes=[0])) + self.assertRaisesRegexp( + ValueError, + "jax.np.fftn does not support repeated axes. Got axes \[1, 1\].", + lambda: np.fft.fftn(rng([2, 3], dtype=onp.float64), axes=[1, 1])) + self.assertRaises( + IndexError, lambda: np.fft.fftn(rng([2, 3], dtype=onp.float64), axes=[2])) + + +if __name__ == "__main__": + absltest.main()
numpy fft not yet implemented As a fairly fundamental operation I was surprised to find out this wasn't implemented. NotImplementedError: Numpy function <function fft at 0x7fdb56275510> not yet implemented In the short term is there a suggested workaround? I suppose I could use a full dft matrix.
We haven't implemented any FFT functionality yet. It's probably best that we just implement it. We're trying to be fairly user-driven in the things that we add. Do you just need to compute FFTs (easy enough), or do you also need to differentiate through them? Being able to differentiate through the fft and ifft would be amazing. In particular for end-to-end optimization of frequency-domain audio processing algorithms. I would like to second this functionality, it will make a lot of signal processing users happy. I started working on this earlier and got distracted by other things, I'll try to get something up for review soon!
2019-05-15T22:40:17
google/jax
730
google__jax-730
[ "728" ]
e3d4213e6d0c861a71f11fe9c090d9c7aa13a5de
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -3646,7 +3646,7 @@ class _IotaConstant(xla.DeviceConstant): def __init__(self, dtype, shape, axis): self.shape = shape - self.dtype = dtype + self.dtype = onp.dtype(dtype) self.ndim = len(shape) self.size = prod(shape) self._npy_value = None @@ -3675,7 +3675,7 @@ class _EyeConstant(xla.DeviceConstant): def __init__(self, shape, axes, dtype): self.shape = shape - self.dtype = dtype + self.dtype = onp.dtype(dtype) self.ndim = len(shape) self.size = prod(shape) self._npy_value = None @@ -3700,7 +3700,7 @@ def constant_handler(c, diag_const, canonicalize_types=True): else: etype = xla_bridge.dtype_to_etype_exact(diag_const.dtype) etype = xla_bridge.dtype_to_etype(diag_const.dtype) - iotas = [c.BroadcastedIota(onp.bool_, diag_const.shape, axis) + iotas = [c.BroadcastedIota(onp.uint32, diag_const.shape, axis) for axis in diag_const.axes] eyes = [c.Eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])] return c.ConvertElementType(_reduce(c.And, eyes), etype)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1481,6 +1481,10 @@ def testArange(self): self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77))) self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77))) + def testIssue728(self): + assert lnp.allclose(lnp.eye(5000), onp.eye(5000)) + self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050))) + if __name__ == "__main__": absltest.main() diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1343,13 +1343,16 @@ def _CheckDeviceConstant(self, make_const, expected): self.assertAllClose(argument_result, expected, check_dtypes=True) self.assertAllClose(jit_result, expected, check_dtypes=True) + # ensure repr doesn't crash + repr(make_const()) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_{}_fill={}".format( jtu.format_shape_dtype_string(shape, dtype) if dtype else shape, fill_value), "shape": shape, "dtype": dtype, "fill_value": fill_value} for dtype in itertools.chain(default_dtypes, [None]) - for shape in [(), (3,), (2, 3), (2, 3, 4)] + for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001)] for fill_value in [0, 1, onp.pi])) def testFilledConstant(self, shape, fill_value, dtype): make_const = lambda: lax.full(shape, fill_value, dtype) @@ -1361,7 +1364,7 @@ def testFilledConstant(self, shape, fill_value, dtype): jtu.format_shape_dtype_string(shape, dtype), dimension), "shape": shape, "dtype": dtype, "dimension": dimension} for dtype in default_dtypes - for shape in [(), (3,), (2, 3), (2, 3, 4)] + for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001), (101, 101, 101)] for dimension in range(len(shape)))) def testIotaConstant(self, dtype, shape, dimension): make_const = lambda: lax.broadcasted_iota(dtype, shape, dimension) @@ -1386,6 +1389,7 @@ def testIotaConstant(self, dtype, shape, dimension): [(2, 3, 4), (0, 1, 2)], [(2, 3, 4, 2), (0, 1, 2)], [(2, 3, 4, 2), (0, 2, 3)], + [(1001, 1001), (0, 1)], ])) def testEyeConstant(self, dtype, shape, axes): make_const = lambda: lax.broadcasted_eye(dtype, shape, axes)
Bug in np.eye. As suggested, it seems like np.eye broke at some point. To repro try, `np.allclose(onp.eye(N), np.eye(N))`. Upon closer inspection, it seems like this only kicks in at large N. Try e.g. 5000.
Interestingly, it seems that `onp.allclose` works and `np.allclose` doesn't...
2019-05-17T19:40:00
google/jax
733
google__jax-733
[ "732" ]
bb5cbaabaab033a78107987309161041cfa1780b
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -735,7 +735,8 @@ def _get_min_identity(dtype): return onp.array(True, onp.bool_) def _reduce_sum(operand, axes): - return reduce_sum_p.bind(operand, axes=tuple(axes), input_shape=operand.shape) + return reduce_sum_p.bind(operand, axes=tuple(axes), + input_shape=onp.shape(operand)) def _reduce_prod(operand, axes): return reduce_prod_p.bind(operand, axes=tuple(axes)) diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -150,17 +150,11 @@ def _promote_dtypes(*args): if len(args) < 2: return args else: - from_dtypes = (x if type(x) in _builtin_numeric_types else _dtype(x) - for x in args) + from_dtypes = map(_dtype, args) to_dtype = xla_bridge.canonicalize_dtype(result_type(*from_dtypes)) return [lax.convert_element_type(x, to_dtype) if _dtype(x) != to_dtype else x for x in args] -if six.PY3: - _builtin_numeric_types = (int, float, complex) -else: - _builtin_numeric_types = (int, float, long, complex) - def _promote_to_result_dtype(op, *args): """Convenience function to promote args directly to the op's result dtype.""" to_dtype = _result_dtype(op, *args) @@ -1163,10 +1157,11 @@ def pad(array, pad_width, mode, constant_values=0): raise NotImplementedError(msg.format(mode)) array = asarray(array) - pad_width = onp.broadcast_to(onp.asarray(pad_width), (array.ndim, 2)) - constant_values = broadcast_to(asarray(constant_values), (array.ndim, 2)) - for i in xrange(array.ndim): - widths = [(0, 0, 0)] * array.ndim + nd = ndim(array) + pad_width = onp.broadcast_to(onp.asarray(pad_width), (nd, 2)) + constant_values = broadcast_to(asarray(constant_values), (nd, 2)) + for i in xrange(nd): + widths = [(0, 0, 0)] * nd widths[i] = (pad_width[i, 0], 0, 0) array = lax.pad(array, constant_values[i, 0], widths) widths[i] = (0, pad_width[i, 1], 0) @@ -1191,13 +1186,13 @@ def stack(arrays, axis=0): @_wraps(onp.tile) def tile(a, reps): - if isinstance(reps, int): - reps = (reps,) - a = a[(None,) * (len(reps) - a.ndim)] - reps = (1,) * (a.ndim - len(reps)) + reps - for i, rep in enumerate(reps): - a = concatenate([a] * rep, axis=i) - return a + if isinstance(reps, int): + reps = (reps,) + a = reshape(a, (1,) * (len(reps) - ndim(a)) + shape(a)) + reps = (1,) * (ndim(a) - len(reps)) + reps + for i, rep in enumerate(reps): + a = concatenate([a] * rep, axis=i) + return a @_wraps(onp.concatenate) def concatenate(arrays, axis=0): @@ -1338,9 +1333,7 @@ def array_equal(a1, a2): a1, a2 = asarray(a1), asarray(a2) except Exception: return False - if a1.shape != a2.shape: - return False - return asarray(a1==a2).all() + return shape(a1) == shape(a2) and all(asarray(a1 == a2)) # We can't create uninitialized arrays in XLA; use zeros for empty. @@ -1864,37 +1857,15 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): @_wraps(onp.kron) def kron(a, b): - a_shape = shape(a) - b_shape = shape(b) - a_ndims = len(a_shape) - b_ndims = len(b_shape) - a = array(a) - b = array(b) - d = _min(a_ndims, b_ndims) - if d == 0: - return a * b - a_broadcast_dims = list(range(a_ndims - d, a_ndims + d, 2)) - a_broadcast_shape = onp.ones(a_ndims + d, dtype=onp.int64) - a_broadcast_shape[:-2*d] = a_shape[:-d] - a_broadcast_shape[a_broadcast_dims] = a_shape[-d:] - - b_broadcast_dims = list(range(b_ndims -d + 1, b_ndims + d + 1, 2)) - b_broadcast_shape = onp.ones(b_ndims + d, dtype=onp.int64) - b_broadcast_shape[:-2*d] = b_shape[:-d] - b_broadcast_shape[b_broadcast_dims] = b_shape[-d:] - - if a_ndims > b_ndims: - out_shape = onp.array(a_shape, dtype=onp.int64) - out_shape[-d:] *= onp.array(b_shape, dtype=onp.int64) - else: - out_shape = onp.array(b_shape, dtype=onp.int64) - out_shape[-d:] *= onp.array(a_shape, dtype=onp.int64) - - a_broadcast = lax.broadcast_in_dim( - a, a_broadcast_shape, list(range(a_ndims - d)) + a_broadcast_dims) - b_broadcast = lax.broadcast_in_dim( - b, b_broadcast_shape, list(range(b_ndims - d)) + b_broadcast_dims) - return lax.reshape(a_broadcast * b_broadcast, out_shape) + a, b = _promote_dtypes(a, b) + if ndim(a) < ndim(b): + a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a)) + elif ndim(b) < ndim(a): + b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b)) + a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)]) + b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)]) + out_shape = tuple(onp.multiply(shape(a), shape(b))) + return reshape(lax.mul(a_reshaped, b_reshaped), out_shape) @_wraps(onp.vander) diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -50,7 +50,7 @@ def expit(x): x = asarray(x) one = lax._const(x, 1) return lax.div(one, lax.add(one, lax.exp(lax.neg(x)))) -ad.defjvp2(expit.primitive, lambda g, ans, x: g * ans * (1 - ans)) +ad.defjvp2(expit.primitive, lambda g, ans, x: g * ans * (lax._const(ans, 1) - ans)) batching.defvectorized(expit.primitive)
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -205,29 +205,39 @@ def format_test_name_suffix(opname, shapes, dtypes): return '{}_{}'.format(opname.capitalize(), '_'.join(arg_descriptions)) -class _NumpyScalar(object): - - def __len__(self): - return 0 - -# A special singleton "shape" that denotes numpy scalars. Numpy scalars are not -# identical to 0-D arrays, and we want to write tests that exercise both paths. +# We use special symbols, represented as singleton objects, to distinguish +# between NumPy scalars, Python scalars, and 0-D arrays. +class ScalarShape(object): + def __len__(self): return 0 +class _NumpyScalar(ScalarShape): pass +class _PythonScalar(ScalarShape): pass NUMPY_SCALAR_SHAPE = _NumpyScalar() +PYTHON_SCALAR_SHAPE = _PythonScalar() def _dims_of_shape(shape): """Converts `shape` to a tuple of dimensions.""" - return shape if shape != NUMPY_SCALAR_SHAPE else () + if type(shape) in (list, tuple): + return shape + elif isinstance(shape, ScalarShape): + return () + else: + raise TypeError(type(shape)) def _cast_to_shape(value, shape, dtype): """Casts `value` to the correct Python type for `shape` and `dtype`.""" - if shape != NUMPY_SCALAR_SHAPE: + if shape is NUMPY_SCALAR_SHAPE: + # explicitly cast to NumPy scalar in case `value` is a Python scalar. + return dtype(value) + elif shape is PYTHON_SCALAR_SHAPE: + # explicitly cast to Python scalar via https://stackoverflow.com/a/11389998 + return onp.asarray(value).item() + elif type(shape) in (list, tuple): + assert onp.shape(value) == tuple(shape) return value else: - # A numpy scalar was requested. Explicitly cast in case `value` is a Python - # scalar. - return dtype(value) + raise TypeError(type(shape)) def dtype_str(dtype): @@ -235,14 +245,17 @@ def dtype_str(dtype): def format_shape_dtype_string(shape, dtype): - if shape == NUMPY_SCALAR_SHAPE: + if shape is NUMPY_SCALAR_SHAPE: return dtype_str(dtype) - - if onp.isscalar(shape): - shapestr = str(shape) + ',' - else: + elif shape is PYTHON_SCALAR_SHAPE: + return 'py' + dtype_str(dtype) + elif type(shape) in (list, tuple): shapestr = ','.join(str(dim) for dim in shape) - return '{}[{}]'.format(dtype_str(dtype), shapestr) + return '{}[{}]'.format(dtype_str(dtype), shapestr) + elif type(shape) is int: + return '{}[{},]'.format(dtype_str(dtype), shape) + else: + raise TypeError(type(shape)) def _rand_dtype(rand, shape, dtype, scale=1., post=lambda x: x): diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -20,6 +20,7 @@ import functools from functools import partial import itertools +import operator import unittest from unittest import SkipTest @@ -43,7 +44,7 @@ nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes empty_array_shapes = [(0,), (0, 4), (3, 0),] -scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE] +scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE] array_shapes = nonempty_array_shapes + empty_array_shapes nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes nonempty_shapes = scalar_shapes + nonempty_array_shapes @@ -201,40 +202,43 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None, JAX_OPERATOR_OVERLOADS = [ op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - op_record("__lt__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - op_record("__gt__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - op_record("__ge__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), + op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default(), []), + op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default(), []), + op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default(), []), op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default(), []), op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []), - op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []), op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), - op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), - op_record("__floordiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), - op_record("__rfloordiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), + op_record("__floordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), - op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default(), []), # TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2 op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default(), []), # TODO(mattjj): investigate these failures # op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []), - # op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []), # op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), - # op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), # op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []), - # op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []), # op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), - # op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), # TODO(mattjj): lshift, rshift ] +JAX_RIGHT_OPERATOR_OVERLOADS = [ + op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), + op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), + op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), + op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []), + op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), + op_record("__rfloordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []), + op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), + # op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []), + # op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default(), []), + # op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool(), []), + # op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), +] + numpy_version = tuple(map(int, onp.version.version.split('.'))) if numpy_version >= (1, 15): JAX_COMPOUND_OP_RECORDS += [ @@ -249,6 +253,8 @@ def op_record(name, nargs, dtypes, shapes, rng, diff_modes, test_name=None, if six.PY2: JAX_OPERATOR_OVERLOADS += [ op_record("__div__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), + ] + JAX_RIGHT_OPERATOR_OVERLOADS += [ op_record("__rdiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []), ] @@ -293,7 +299,7 @@ def _GetArgsMaker(self, rng, shapes, dtypes): dtypes), "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name), - "check_dtypes": rec.check_dtypes} + "check_dtypes": rec.check_dtypes} for shapes in filter( _shapes_are_broadcast_compatible, CombosWithReplacement(rec.shapes, rec.nargs)) @@ -302,8 +308,9 @@ def _GetArgsMaker(self, rng, shapes, dtypes): JAX_COMPOUND_OP_RECORDS))) def testOp(self, onp_op, lnp_op, rng, shapes, dtypes, check_dtypes): args_maker = self._GetArgsMaker(rng, shapes, dtypes) + py_scalar_arg = jtu.PYTHON_SCALAR_SHAPE in shapes self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, - check_dtypes=check_dtypes) + check_dtypes=check_dtypes and not py_scalar_arg) self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes) @parameterized.named_parameters(itertools.chain.from_iterable( @@ -318,8 +325,27 @@ def testOp(self, onp_op, lnp_op, rng, shapes, dtypes, check_dtypes): for rec in JAX_OPERATOR_OVERLOADS)) def testOperatorOverload(self, name, rng, shapes, dtypes): args_maker = self._GetArgsMaker(rng, shapes, dtypes) - fun = lambda x, *xs: getattr(x, name)(*xs) - self._CompileAndCheck(fun, args_maker, check_dtypes=True) + fun = lambda *xs: getattr(operator, name.strip('_'))(*xs) + self._CompileAndCheck(fun, args_maker, + check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes) + + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes, + dtypes), + "rng": rec.rng, "shapes": shapes, "dtypes": dtypes, "name": rec.name} + for shapes in filter( + _shapes_are_broadcast_compatible, + CombosWithReplacement(rec.shapes, rec.nargs)) + for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs)) + for rec in JAX_RIGHT_OPERATOR_OVERLOADS)) + def testRightOperatorOverload(self, name, rng, shapes, dtypes): + if shapes[1] is jtu.PYTHON_SCALAR_SHAPE: + raise SkipTest() # TODO(mattjj): clean up + args_maker = self._GetArgsMaker(rng, shapes, dtypes) + fun = lambda fst, snd: getattr(snd, name)(fst) + self._CompileAndCheck(fun, args_maker, + check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes) @parameterized.named_parameters(itertools.chain.from_iterable( jtu.cases_from_list( @@ -339,7 +365,8 @@ def testBitwiseOp(self, onp_op, lnp_op, rng, shapes, dtypes): onp.iinfo(dtype).bits == 64 for dtype in dtypes): self.skipTest("x64 types are disabled by jax_enable_x64") args_maker = self._GetArgsMaker(rng, shapes, dtypes) - self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True) + self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, + check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes) self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list( @@ -1405,6 +1432,10 @@ def testIssue330(self): self.assertEqual(x[0, 0], 1) def testScalarDtypePromotion(self): + # disabled this test after https://github.com/google/jax/issues/732 + msg = ("jax.numpy differs from numpy in promotion rules for Python scalars." + " See https://github.com/google/jax/issues/732.") + raise SkipTest(msg) orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype self.assertEqual(orig_numpy_result, jax_numpy_result) diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1364,7 +1364,10 @@ def testFilledConstant(self, shape, fill_value, dtype): jtu.format_shape_dtype_string(shape, dtype), dimension), "shape": shape, "dtype": dtype, "dimension": dimension} for dtype in default_dtypes - for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001), (101, 101, 101)] + for shape in [(), (3,), (2, 3), (2, 3, 4), + # TODO(mattjj): re-enable + # (1001, 1001), (101, 101, 101), + ] for dimension in range(len(shape)))) def testIotaConstant(self, dtype, shape, dimension): make_const = lambda: lax.broadcasted_iota(dtype, shape, dimension)
Inconsistent casting of floats with jit compilation. ``` def mul(R, c): return R * c R = np.array([1, 2, 3], dtype=np.float32) c = 0.1 print(mul(R, c).dtype) # Evaluates to np.float32 as expected. print(jit(mul)(R, c).dtype) # Evaluates to np.float64. ```
Thanks for catching this. I verified that this only happens with jax_enable_x64 switched on. I believe this is a Python scalars issue (which our tests don't cover).
2019-05-20T01:50:23
google/jax
735
google__jax-735
[ "711" ]
85881672158eeda9de2414776368d8d1ae5da599
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -428,16 +428,25 @@ def _maybe_tracer_tuple_to_abstract_tuple(tup): ### scan -def _convert_zeros(convert_symbolic, example, tangent): - if tangent is ad.zero: - if not convert_symbolic: +def _convert_zeros(instantiate, example, tangent): + t = type(instantiate) + if t is bool: + if instantiate: + return ad.instantiate_zeros(example, tangent) + elif tangent is ad_util.zero: return core.unit else: - return ad.zeros_like_jaxval(example) - elif type(tangent) is ad.TangentTuple: - return core.pack(map(_convert_zeros, convert_symbolic, example, tangent)) + raise TypeError(tangent) # not clear if ever reachable + elif t is tuple: + if type(tangent) is ad.TangentTuple: + return core.pack(map(_convert_zeros, instantiate, example, tangent)) + elif tangent is ad_util.zero: + zeros = [ad_util.zero] * len(instantiate) + return core.pack(map(_convert_zeros, instantiate, example, zeros)) + else: + raise TypeError(tangent) else: - return tangent + raise TypeError(t) def _demote_aval_rank(xs): assert isinstance(xs, core.AbstractValue) @@ -641,7 +650,7 @@ def _scan_partial_eval(trace, *tracers, **kwargs): length = kwargs.pop('length') forward = kwargs.pop('forward') assert not kwargs - in_pvs, in_consts = unzip2([t.pval for t in tracers]) + in_pvs, _ = unzip2([t.pval for t in tracers]) sc_consts, sc_init, sc_xs = map(pe.unknown, in_pvs) sc_carry = sc_init @@ -819,7 +828,19 @@ def _make_typed_jaxpr(traceable, in_avals): class FixedPointError(Exception): pass +# We use a custom bind for scan just to add some error checks +def scan_bind(consts, init, xs, forward, length, jaxpr): + if not core.skip_checks: + assert type(jaxpr.in_avals) is tuple + consts_aval, init_aval, xs_aval = jaxpr.in_avals + assert type(jaxpr.out_aval) is core.AbstractTuple + carry_aval, y_aval = jaxpr.out_aval + assert init_aval == carry_aval + return core.Primitive.bind(scan_p, consts, init, xs, + forward=forward, length=length, jaxpr=jaxpr) + scan_p = core.Primitive("scan") +scan_p.def_custom_bind(scan_bind) scan_p.def_impl(_scan_impl) ad.primitive_jvps[scan_p] = _scan_jvp ad.primitive_transposes[scan_p] = _scan_transpose
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -592,6 +592,37 @@ def loss(params, inputs, targets): expected = (onp.zeros_like(W_trans), onp.zeros_like(W_out)) self.assertAllClose(ans, expected, check_dtypes=False) + def testIssue711(self): + # Tests reverse-mode differentiation through a scan for which the scanned + # function also involves reverse-mode differentiation. + # See https://github.com/google/jax/issues/711 + def harmonic_bond(conf, params): + return np.sum(conf * params) + + def minimize_structure(test_params): + energy_fn = partial(harmonic_bond, params=test_params) + grad_fn = api.grad(energy_fn) + + def apply_carry(carry, _): + i, x = carry + new_x = x - 0.1 * api.grad(energy_fn)(x) + new_carry = (i+1, new_x) + return new_carry, _ + + x0 = np.array([1., 2., 3.]) + carry_final, _ = lax.scan(apply_carry, (0, x0), np.zeros((75, 0))) + _, x_final = carry_final + return x_final + + initial_params = 0.5 + minimize_structure(initial_params) # doesn't crash + + def loss(test_params): + x_final = minimize_structure(test_params) + return np.sum(np.sin(1.0 - x_final)) + + api.grad(loss)(0.25) # doesn't crash + if __name__ == '__main__': absltest.main()
lax_scan/lattice_join shape inconsistencies For certain native scalar types, the newly implemented lattice_join code doesn't seem to be able to match the types. This is a follow-up of #650 ``` python def lattice_join(x, y): if x is None: return y elif y is None: return x elif isinstance(x, type(y)): return y.join(x) elif isinstance(y, type(x)): return x.join(y) else: > raise TypeError((x, y)) E TypeError: (ShapedArray(int64[]), ()) ``` Code to reproduce: ``` python import unittest import numpy as onp import jax.numpy as np import functools import jax from jax.config import config; config.update("jax_enable_x64", True) from jax.experimental import optimizers from jax.test_util import check_grads def harmonic_bond(conf, params): return np.sum(conf * params) class TestOptimizeGeometry(unittest.TestCase): def test_case(self): opt_init, opt_update, get_params = optimizers.sgd(5e-2) x0 = onp.array([0.5], dtype=onp.float64) params = onp.array([0.3], dtype=onp.float64) def minimize_structure(test_params): energy_fn = functools.partial(harmonic_bond, params=test_params) grad_fn = jax.jit(jax.grad(energy_fn, argnums=(0,))) opt_state = opt_init(x0) # use lax.scan, way faster compilation times. def apply_carry(carry, _): i, x = carry g = grad_fn(get_params(x))[0] new_state = opt_update(i, g, x) new_carry = (i+1, new_state) return new_carry, _ carry_final, _ = jax.lax.scan(apply_carry, (np.array(0), opt_state), np.zeros((75, 0))) trip, opt_final = carry_final assert trip == 75 return opt_final initial_params = 0.5 minimize_structure(initial_params) def loss(test_params): opt_final = minimize_structure(test_params) return 1.0-opt_final loss_opt_init, loss_opt_update, loss_get_params = optimizers.sgd(5e-2) loss_grad_fn = jax.grad(loss, argnums=(0,)) loss_opt_state = loss_opt_init(initial_params) loss_params = loss_get_params(loss_opt_state) loss_grad = loss_grad_fn(loss_params)[0] ```
I'm seeing this happen in yet another test case. I'll keep looking. Thanks for raising this! Minimal test cases are easier for us to make progress on. Any chance you can pare that down? Sorry for being lazy/sloppy - I'll make more self contained repros in the future. I've updated the original issue with a much smaller test case. I think issue is that somewhere along the way the iteration count is being casted to a vanilla scalar (as opposed to a zero sized jax IntArray[]).
2019-05-20T16:10:35
google/jax
736
google__jax-736
[ "446" ]
5cbaf75d2860720ca782551476d58eab7621cf3e
diff --git a/jax/tree_util.py b/jax/tree_util.py --- a/jax/tree_util.py +++ b/jax/tree_util.py @@ -56,7 +56,7 @@ def tree_map(f, tree): leaf given by `f(x)` where `x` is the value at the corresponding leaf in `tree`. """ - node_type = node_types.get(type(tree)) + node_type = _get_node_type(tree) if node_type: children, node_spec = node_type.to_iterable(tree) new_children = [tree_map(f, child) for child in children] @@ -79,12 +79,12 @@ def tree_multimap(f, tree, *rest): leaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf in `tree` and `xs` is the tuple of values at corresponding leaves in `rest`. """ - node_type = node_types.get(type(tree)) + node_type = _get_node_type(tree) if node_type: children, aux_data = node_type.to_iterable(tree) all_children = [children] for other_tree in rest: - other_node_type = node_types.get(type(other_tree)) + other_node_type = _get_node_type(other_tree) if node_type != other_node_type: raise TypeError('Mismatch: {} != {}'.format(other_node_type, node_type)) other_children, other_aux_data = node_type.to_iterable(other_tree) @@ -113,7 +113,7 @@ def process_pytree(process_node, tree): def walk_pytree(f_node, f_leaf, tree): - node_type = node_types.get(type(tree)) + node_type = _get_node_type(tree) if node_type: children, node_spec = node_type.to_iterable(tree) proc_children, child_specs = unzip2([walk_pytree(f_node, f_leaf, child) @@ -236,3 +236,20 @@ def register_pytree_node(py_type, to_iterable, from_iterable): register_pytree_node(list, lambda xs: (tuple(xs), None), lambda _, xs: list(xs)) register_pytree_node(dict, dict_to_iterable, lambda keys, xs: dict(zip(keys, xs))) register_pytree_node(type(None), lambda z: ((), None), lambda _, xs: None) + + +# To handle namedtuples, we can't just use the standard table of node_types +# because every namedtuple creates its own type and thus would require its own +# entry in the table. Instead we use a heuristic check on the type itself to +# decide whether it's a namedtuple type, and if so treat it as a pytree node. +def _get_node_type(maybe_tree): + t = type(maybe_tree) + return node_types.get(t) or _namedtuple_node(t) + +def _namedtuple_node(t): + if t.__bases__ == (tuple,) and hasattr(t, '_fields'): + return NamedtupleNode + +NamedtupleNode = NodeType('namedtuple', + lambda xs: (tuple(xs), type(xs)), + lambda t, xs: t(*xs))
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -16,11 +16,11 @@ from __future__ import division from __future__ import print_function -import six +import collections -import numpy as onp from absl.testing import absltest -from jax import test_util as jtu +import numpy as onp +import six import jax.numpy as np from jax import jit, grad, device_get, device_put, jacfwd, jacrev, hessian @@ -29,6 +29,7 @@ from jax.interpreters.ad import defjvp, defvjp, defvjp2, defvjp_all from jax.interpreters.xla import DeviceArray, DeviceTuple from jax.abstract_arrays import concretization_err_msg +from jax import test_util as jtu from jax.config import config config.parse_flags_with_absl() @@ -568,6 +569,22 @@ def test_devicearray_repr(self): self.assertIsInstance(x, DeviceArray) repr(x) # doesn't crash + def test_namedtuple_transparency(self): + # See https://github.com/google/jax/issues/446 + Point = collections.namedtuple("Point", ["x", "y"]) + + def f(pt): + return np.sqrt(pt.x ** 2 + pt.y ** 2) + + pt = Point(1., 2.) + + f(pt) # doesn't crash + g = api.grad(f)(pt) + self.assertIsInstance(g, Point) + + f_jit = api.jit(f) + self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False) + if __name__ == '__main__': absltest.main()
namedtuple support in arguments to transformed functions It would be great if `xla.abstractify` would also accept namedtuples. Loop state's can consist of quite a lot of values and organizing them in a namedtuple rather than a tuple would make things nicer.
There's actually a convenient way to add support for custom container types throughout JAX, not just in loop carries but also for `grad`, `jit`, `vmap`, etc, all at once. Of course it's not documented at all... :) You can register a custom type as a "pytree" (tree-like Python container) like this: ```python from collections import namedtuple from jax.tree_util import register_pytree_node from jax import grad, jit import jax.numpy as np Point = namedtuple("Point", ["x", "y"]) register_pytree_node( Point, lambda xs: (tuple(xs), None), # tell JAX how to unpack to an iterable lambda _, xs: Point(*xs) # tell JAX how to pack back into a Point ) def f(pt): return np.sqrt(pt.x**2 + pt.y**2) pt = Point(1., 2.) print f(pt) # 2.236068 print grad(f)(pt) # Point(x=..., y=...) g = jit(f) print g(pt) # 2.236068 ``` So that's an easy and general way to get your code working now. It also means you can have your namedtuple classes contain nested tuples/lists/dicts, or have them nested in other tuples/lists/dicts. (By the way, the extra data that can be returned by the to-iterable function and consumed by the to-pytree fun is for things like dict keys. In the above example, we're just returning None when mapping to an iterable and then ignoring it when reconstructing.) However, we should consider making JAX work with all namedtuple classes by default, without having to register them. Any thoughts on that, or objections to it? I revised the issue title because we'd handle the issue in api.py and xla.abstractify would never need to see these types (just like it never sees tuples/lists/dicts). Ha, that's awesome! Regarding namedtuple support: Given that namedtuple's are real subclasses of tuples, I think supporting all namedtuples out of the box would be the most intuitive solution. +1 to having JAX work with all namedtuple classes +1 Our existing codebase has been heavily relying on namedtuple and it would be great to support it in JAX.
2019-05-20T17:10:22
google/jax
737
google__jax-737
[ "121" ]
adb15b7f4f53f7977d42f81514475328b8d8d46b
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -514,7 +514,7 @@ def reshape(operand, new_sizes, dimensions=None): """ same_shape = onp.shape(operand) == tuple(new_sizes) same_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand))) - if same_shape and same_dims: + if onp.shape(operand) and same_shape and same_dims: return operand else: return reshape_p.bind(
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1051,7 +1051,7 @@ def testAverage(self, shape, dtype, axis, weights_shape, returned, rng): @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_arg{}".format(i), "arg": arg} for i, arg in enumerate([ - [1, 2, 3], [1., 2., 3.], + 3., [1, 2, 3], [1., 2., 3.], [[1, 2], [3, 4], [5, 6]], [[1, 2.], [3, 4], [5, 6]], [[3, onp.array(2), 1], onp.arange(3.)], ]))) @@ -1060,6 +1060,9 @@ def testArray(self, arg): self._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True) self._CompileAndCheck(lnp.array, args_maker, check_dtypes=True) + def testIssue121(self): + assert not onp.isscalar(lnp.array(3)) + def testArrayMethod(self): class arraylike(object): dtype = onp.float32
Scalars passed into np.array should return 0-dim arrays import jax.numpy as np import numpy as onp onp.array(0).dtype # works, because onp.array(0) returns a 0-dim array np.array(0).dtype # doesn't work, because np.array(0) returns an int This showed up when trying to do `lax.dynamic_update_slice(array, value, np.array(0))`
#124 says this was fixed, but I still get an error: ``` In [1]: import numpy as onp In [2]: import jax.numpy as np In [3]: onp.array(0).dtype Out[3]: dtype('int64') In [4]: np.array(0).dtype --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-4-05f7ef02f43f> in <module> ----> 1 np.array(0).dtype AttributeError: 'int' object has no attribute 'dtype' ``` I'm on commit 6476d5ffc6bee0e8d9d7b54914565d798b8dda29. I am not sure if this is a regression, but we noticed this recently as well when we found that `scan` does not work well with scalar arrays (https://github.com/pyro-ppl/numpyro/pull/91). The reason is that `jax.numpy.array(1.)` returns a python float instead of a `DeviceArray` unlike numpy. cc. @mattjj in case this needs to be reopened. @kroq-gar78 that PR was closed but not merged, so this bug still isn't fixed and remains open. @neerajprad we're going to check in a from-scratch rewrite of `scan` in the next week or two, though that new version will need testing and could have similar issues at first. Thanks for pointing that out!
2019-05-20T18:52:19
google/jax
741
google__jax-741
[ "738" ]
6539c8e4fd8a89aeadc474ad8c5dadfad0233362
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1444,6 +1444,9 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): if out: raise NotImplementedError("The 'out' argument to trace is not supported.") + axis1 = axis1 % ndim(a) + axis2 = axis2 % ndim(a) + a_shape = shape(a) if dtype is None: dtype = _dtype(a)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -820,7 +820,9 @@ def testIdentity(self, n, dtype): for dtype in default_dtypes for out_dtype in [None] + number_dtypes for shape in [shape for shape in all_shapes if len(shape) >= 2] - for (axis1, axis2) in itertools.combinations(range(len(shape)), 2) + for axis1 in range(-len(shape), len(shape)) + for axis2 in range(-len(shape), len(shape)) + if (axis1 % len(shape)) != (axis2 % len(shape)) for offset in list(range(-4, 4)))) def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng): onp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype)
np.trace is broken ``` import jax.numpy as np import numpy as onp # This works print(onp.trace(onp.ones((2, 3, 4, 4)), axis1=-1, axis2=-2)) # This does not work print(np.trace(np.ones((2, 3, 4, 4)), axis1=-1, axis2=-2)) ``` Error message: ``` >>> print(np.trace(np.ones((2, 3, 4, 4)), axis1=-1, axis2=-2)) Traceback (most recent call last): File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 70, in primitive_computation return c.Build() File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lib/xla_bridge.py", line 267, in Build *args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jaxlib/xla_client.py", line 640, in Build return Computation(self._builder.Build(), backend=backend) RuntimeError: Invalid argument: Transpose dimensions [0,1,2,3,-1,-2] are not a permutation of the operand dimensions (operand shape is f32[2,3,4,4]).: During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/numpy/lax_numpy.py", line 1465, in trace a = lax.transpose(a, perm) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 685, in transpose return transpose_p.bind(operand, permutation=permutation) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/core.py", line 117, in bind return self.impl(*args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 51, in apply_primitive compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 57, in xla_primitive_callable built_c = primitive_computation(prim, *shapes, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 73, in primitive_computation prim.abstract_eval(*map(aval_from_xla_shape, shapes), **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1295, in standard_abstract_eval return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 2268, in _transpose_shape_rule raise TypeError(msg.format(permutation, operand.shape)) TypeError: transpose permutation isn't a permutation of operand dimensions, got permutation (0, 1, 2, 3, -1, -2) for operand shape (2, 3, 4, 4). ```
2019-05-21T00:11:42
google/jax
742
google__jax-742
[ "740" ]
6539c8e4fd8a89aeadc474ad8c5dadfad0233362
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1585,11 +1585,16 @@ def tensordot(a, b, axes=2): raise TypeError(msg.format(ndim(a), ndim(b))) if type(axes) is int: - a, b = _promote_dtypes(a, b) - a_reshape = lax.reshape(a, (_prod(a.shape[:-axes]), _prod(a.shape[-axes:]))) - b_reshape = lax.reshape(b, (_prod(b.shape[:axes]), _prod(b.shape[axes:]))) - out_reshape = lax.dot(a_reshape, b_reshape) - return lax.reshape(out_reshape, a.shape[:-axes] + b.shape[axes:]) + if axes == 0: + a, b = _promote_dtypes(a, b) + return lax.mul(lax.reshape(a, shape(a) + (1,) * ndim(b)), + lax.reshape(b, (1,) * ndim(a) + shape(b))) + else: + a, b = _promote_dtypes(a, b) + a_reshape = lax.reshape(a, (_prod(a.shape[:-axes]), _prod(a.shape[-axes:]))) + b_reshape = lax.reshape(b, (_prod(b.shape[:axes]), _prod(b.shape[axes:]))) + out_reshape = lax.dot(a_reshape, b_reshape) + return lax.reshape(out_reshape, a.shape[:-axes] + b.shape[axes:]) elif type(axes) in (list, tuple) and len(axes) == 2: ax1, ax2 = axes if type(ax1) == type(ax2) == int:
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -539,6 +539,7 @@ def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng): "axes": axes, "rng": rng} for rng in [jtu.rand_default()] for lhs_shape, rhs_shape, axes in [ + [(2, 3, 4), (5, 6, 7), 0], # from issue #740 [(2, 3, 4), (3, 4, 5, 6), 2], [(2, 3, 4), (5, 4, 3, 6), [1, 2]], [(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
np.tensordot crashes when axes=0 ```python import jax.numpy as np import numpy as onp result = onp.tensordot(onp.ones((2, 3, 4)), onp.ones((5, 6, 7)), 0) print(result.shape) # should print (2, 3, 4, 5, 6, 7) # This errors out. result = np.tensordot(np.ones((2, 3, 4)), np.ones((5, 6, 7)), 0) ``` This is the error message I receive. ``` >>> result = np.tensordot(np.ones((2, 3, 4)), np.ones((5, 6, 7)), 0) /usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lib/xla_bridge.py:130: UserWarning: No GPU/TPU found, falling back to CPU. warnings.warn('No GPU/TPU found, falling back to CPU.') Traceback (most recent call last): File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 70, in primitive_computation return c.Build() File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lib/xla_bridge.py", line 267, in Build *args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jaxlib/xla_client.py", line 640, in Build return Computation(self._builder.Build(), backend=backend) RuntimeError: Invalid argument: Cannot infer shape for dot operation: f32[1,24] <dot> f32[1,210]. Contracting dimension sizes do not match.: During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/numpy/lax_numpy.py", line 1598, in tensordot out_reshape = lax.dot(a_reshape, b_reshape) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 462, in dot return dot_p.bind(lhs, rhs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/core.py", line 117, in bind return self.impl(*args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 51, in apply_primitive compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 57, in xla_primitive_callable built_c = primitive_computation(prim, *shapes, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun ans = cache[key] = fun(*args, **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 73, in primitive_computation prim.abstract_eval(*map(aval_from_xla_shape, shapes), **kwargs) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1295, in standard_abstract_eval return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1824, in _dot_shape_rule require(lhs.shape[1] == rhs.shape[0]) File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1818, in require raise TypeError(msg.format(lhs.shape, rhs.shape)) TypeError: Incompatible shapes for dot: got (1, 24) and (1, 210). ```
2019-05-21T00:19:54
google/jax
745
google__jax-745
[ "744" ]
8cbc475a8b6f56130f7347a9a69e55568c3fc2eb
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -464,9 +464,21 @@ def _promote_aval_rank(n, xs): def _leading_dim_size(xs): if isinstance(xs, core.JaxTuple): - return _leading_dim_size(xs[0]) + sizes = set(map(_leading_dim_size, xs)) + if len(sizes) == 1: + return sizes.pop() + elif len(sizes) > 1: + msg = "scan got inconsistent leading axis sizes: {}" + raise ValueError(msg.format(sizes)) + else: + raise ValueError("scan found no leading axis to scan over") else: - return xs.shape[0] + shape = onp.shape(xs) + if shape: + return shape[0] + else: + msg = "scan got value with no leading axis to scan over: {}" + raise ValueError(msg.format(xs)) def _empty_arrays(aval): assert isinstance(aval, core.AbstractValue) @@ -556,7 +568,7 @@ def scan(f, init, xs): carry_aval_out, y_aval = pv_out if carry_aval != carry_aval_out: msg = ("scanned function carry output does not match carry input: " - "input carry is {} and output carry is {}") + "input carry is {} and output carry is {}.") raise TypeError(msg.format(carry_aval, carry_aval_out)) lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr) consts_aval, _ = _abstractify(core.pack(consts))
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -16,6 +16,7 @@ from __future__ import division from __future__ import print_function +import collections from functools import partial from absl.testing import absltest @@ -623,6 +624,18 @@ def loss(test_params): api.grad(loss)(0.25) # doesn't crash + def testIssue744(self): + Point = collections.namedtuple('Point', ['x', 'y']) + p0 = Point(x=np.array(1), y=np.array(2)) + + def plus_one(p, iter_idx): + return Point(p.x+1, p.y+1), iter_idx + + self.assertRaisesRegexp( + ValueError, + 'scan got value with no leading axis to scan over.*', + lambda: lax.scan(plus_one, p0, list(range(5)))) + if __name__ == '__main__': absltest.main()
lax.scan has error when input array is not a jax array lax.scan raise an TypeError: 'JaxTuple' object does not support indexing incorrectly A simple repro can be found here ``` import collections from jax import lax import jax.numpy as np Point = collections.namedtuple('Point', 'x y') p0 = Point(x=np.array(1), y=np.array(2)) def plus_one(p, iter_idx): return Point(p.x+1, p.y+1), iter_idx point, _ = lax.scan(plus_one, p0, list(range(5))) print(point) ``` or in this colab https://colab.research.google.com/drive/1_jmdeIjfxJwUElNpwEbBR1gnAQDe8WbP
2019-05-21T14:16:32
google/jax
749
google__jax-749
[ "748" ]
11c871b9a83b3b54ef77f823508a6bdcbb48d05a
diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -187,7 +187,11 @@ def write_primal(v, val): if cts_out is zero: cts_out = [zero for _ in eqn.invars] - map(write_cotangent, eqn.invars, cts_out) + if not eqn.restructure: + map(write_cotangent, eqn.invars, cts_out) + else: + [map(write_cotangent, v, ct) if type(v) is tuple + else write_cotangent(v, ct) for v, ct in zip(eqn.invars, cts_out)] freevar_cts = core.pat_fmap(read_cotangent, jaxpr.freevars) cotangents_out = core.pat_fmap(lambda v, _: read_cotangent(v), jaxpr.invars, None)
diff --git a/jax/test_util.py b/jax/test_util.py --- a/jax/test_util.py +++ b/jax/test_util.py @@ -152,21 +152,32 @@ def check_vjp(f, f_vjp, args, atol=ATOL, rtol=RTOL, eps=EPS): check_close(ip, ip_expected, atol=atol, rtol=rtol) -def check_grads(f, args, order, atol=None, rtol=None, eps=None): +def check_grads(f, args, order, + modes=["fwd", "rev"], atol=None, rtol=None, eps=None): args = tuple(args) - if order > 1: - def f_vjp(*args): - out_primal_py, vjp_py = api.vjp(f, *args) - return vjp_py(out_primal_py) - - check_grads(f_vjp, args, order - 1, atol=atol, rtol=rtol, eps=eps) - else: - default_tol = 1e-6 if FLAGS.jax_enable_x64 else 1e-2 - atol = atol or default_tol - rtol = rtol or default_tol - eps = eps or EPS - check_jvp(f, partial(api.jvp, f), args, atol, rtol, eps) - check_vjp(f, partial(api.vjp, f), args, atol, rtol, eps) + default_tol = 1e-6 if FLAGS.jax_enable_x64 else 1e-2 + atol = atol or default_tol + rtol = rtol or default_tol + eps = eps or EPS + + _check_jvp = partial(check_jvp, atol=atol, rtol=rtol, eps=eps) + _check_vjp = partial(check_vjp, atol=atol, rtol=rtol, eps=eps) + + def _check_grads(f, args, order): + if "fwd" in modes: + _check_jvp(f, partial(api.jvp, f), args) + if order > 1: + _check_grads(partial(api.jvp, f), (args, args), order - 1) + + if "rev" in modes: + _check_vjp(f, partial(api.vjp, f), args) + if order > 1: + def f_vjp(*args): + out_primal_py, vjp_py = api.vjp(f, *args) + return vjp_py(out_primal_py) + _check_grads(f_vjp, args, order - 1) + + _check_grads(f, args, order) def skip_on_devices(*disabled_devices): diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -636,6 +636,18 @@ def plus_one(p, iter_idx): 'scan got value with no leading axis to scan over.*', lambda: lax.scan(plus_one, p0, list(range(5)))) + def testScanHigherOrderDifferentiation(self): + d = 0.75 + def f(c, a): + b = np.sin(c * np.sum(np.cos(d * a))) + c = 0.9 * np.cos(d * np.sum(np.sin(c * a))) + return c, b + + as_ = np.arange(6.).reshape((3, 2)) + c = 1. + + jtu.check_grads(lambda c: lax.scan(f, c, as_), (c,), modes=["fwd", "rev"], order=2) + if __name__ == '__main__': absltest.main() diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1404,84 +1404,94 @@ def testEyeConstant(self, dtype, shape, axes): GradTestSpec = collections.namedtuple( - "GradTestSpec", ["op", "nargs", "order", "rng", "dtypes"]) + "GradTestSpec", ["op", "nargs", "order", "rng", "dtypes", "name"]) +def grad_test_spec(op, nargs, order, rng, dtypes, name=None): + return GradTestSpec(op, nargs, order, rng, dtypes, name or op.__name__) LAX_GRAD_OPS = [ - GradTestSpec(lax.neg, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.floor, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64]), - GradTestSpec(lax.ceil, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64]), - GradTestSpec(lax.round, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64]), - # GradTestSpec(lax.rem, nargs=2, order=2, rng=jtu.rand_default(), - # dtypes=[onp.float64]), # TODO(mattjj): enable - - GradTestSpec(lax.exp, nargs=1, order=2, rng=jtu.rand_small(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.expm1, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.log, nargs=1, order=2, rng=jtu.rand_positive(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.log1p, nargs=1, order=2, rng=jtu.rand_positive(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.tanh, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.sin, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.cos, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.neg, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.floor, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64]), + grad_test_spec(lax.ceil, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64]), + grad_test_spec(lax.round, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64]), + # grad_test_spec(lax.rem, nargs=2, order=2, rng=jtu.rand_default(), + # dtypes=[onp.float64]), # TODO(mattjj): enable + + grad_test_spec(lax.exp, nargs=1, order=2, rng=jtu.rand_small(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.expm1, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.log, nargs=1, order=2, rng=jtu.rand_positive(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.log1p, nargs=1, order=2, rng=jtu.rand_positive(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.tanh, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.sin, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.cos, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), # TODO(proteneer): atan2 input is already a representation of a # complex number. Need to think harder about what this even means # if each input itself is a complex number. - GradTestSpec(lax.atan2, nargs=2, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64]), - - GradTestSpec(lax.erf, nargs=1, order=2, rng=jtu.rand_small(), - dtypes=[onp.float64]), - GradTestSpec(lax.erfc, nargs=1, order=2, rng=jtu.rand_small(), - dtypes=[onp.float64]), - GradTestSpec(lax.erf_inv, nargs=1, order=2, rng=jtu.rand_small(), - dtypes=[onp.float64]), - # GradTestSpec(lax.lgamma, nargs=1, order=2, rng=jtu.rand_small(), - # dtypes=[onp.float64]), # TODO(mattjj): enable - - GradTestSpec(lax.real, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.complex64]), - GradTestSpec(lax.imag, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.complex64]), - # GradTestSpec(lax.complex, nargs=2, order=2, rng=jtu.rand_default(), - # dtypes=[onp.float32]), # TODO(mattjj): enable - GradTestSpec(lax.conj, nargs=1, order=2, rng=jtu.rand_default(), - dtypes=[onp.float32, onp.complex64]), - GradTestSpec(lax.abs, nargs=1, order=2, rng=jtu.rand_positive(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.pow, nargs=2, order=2, rng=jtu.rand_positive(), - dtypes=[onp.float64, onp.complex64]), - - GradTestSpec(lax.add, nargs=2, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.sub, nargs=2, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.mul, nargs=2, order=2, rng=jtu.rand_default(), - dtypes=[onp.float64, onp.complex64]), - GradTestSpec(lax.div, nargs=2, order=1, rng=jtu.rand_not_small(), - dtypes=[onp.float64, onp.complex64]), - - GradTestSpec(lax.max, nargs=2, order=2, rng=jtu.rand_some_equal(), - dtypes=[onp.float64]), - GradTestSpec(lax.min, nargs=2, order=2, rng=jtu.rand_some_equal(), - dtypes=[onp.float64]), + grad_test_spec(lax.atan2, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64]), + + grad_test_spec(lax.erf, nargs=1, order=2, rng=jtu.rand_small(), + dtypes=[onp.float64]), + grad_test_spec(lax.erfc, nargs=1, order=2, rng=jtu.rand_small(), + dtypes=[onp.float64]), + grad_test_spec(lax.erf_inv, nargs=1, order=2, rng=jtu.rand_small(), + dtypes=[onp.float64]), + # grad_test_spec(lax.lgamma, nargs=1, order=2, rng=jtu.rand_small(), + # dtypes=[onp.float64]), # TODO(mattjj): enable + + grad_test_spec(lax.real, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.complex64]), + # grad_test_spec(lax.imag, nargs=1, order=2, rng=jtu.rand_default(), + # dtypes=[onp.complex64]), # TODO(mattjj): enable + # grad_test_spec(lax.complex, nargs=2, order=2, rng=jtu.rand_default(), + # dtypes=[onp.float32]), # TODO(mattjj): enable + grad_test_spec(lax.conj, nargs=1, order=2, rng=jtu.rand_default(), + dtypes=[onp.float32, onp.complex64]), + grad_test_spec(lax.abs, nargs=1, order=2, rng=jtu.rand_positive(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.pow, nargs=2, order=2, rng=jtu.rand_positive(), + dtypes=[onp.float64, onp.complex64]), + + grad_test_spec(lax.add, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.sub, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.mul, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64, onp.complex64]), + grad_test_spec(lax.div, nargs=2, order=1, rng=jtu.rand_not_small(), + dtypes=[onp.float64, onp.complex64]), + + grad_test_spec(lax.max, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64]), + grad_test_spec(lax.min, nargs=2, order=2, rng=jtu.rand_default(), + dtypes=[onp.float64]), + # TODO(mattjj): make some-equal checks more robust, enable second-order + # grad_test_spec(lax.max, nargs=2, order=1, rng=jtu.rand_some_equal(), + # dtypes=[onp.float64], name="MaxSomeEqual"), + # grad_test_spec(lax.min, nargs=2, order=1, rng=jtu.rand_some_equal(), + # dtypes=[onp.float64], name="MinSomeEqual"), ] -def check_grads_bilinear(f, args, order, atol=None, rtol=None): +def check_grads_bilinear(f, args, order, + modes=["fwd", "rev"], atol=None, rtol=None): # Can use large eps to make up for numerical inaccuracies since the op is # bilinear (relying on the fact that we only check one arg at a time) lhs, rhs = args - check_grads(lambda lhs: f(lhs, rhs), (lhs,), order, atol, rtol, eps=1.) - check_grads(lambda rhs: f(lhs, rhs), (rhs,), order, atol, rtol, eps=1.) + check_grads(lambda lhs: f(lhs, rhs), (lhs,), order, + modes=modes, atol=atol, rtol=rtol, eps=1.) + check_grads(lambda rhs: f(lhs, rhs), (rhs,), order, + modes=modes, atol=atol, rtol=rtol, eps=1.) class LaxAutodiffTest(jtu.JaxTestCase): @@ -1489,7 +1499,7 @@ class LaxAutodiffTest(jtu.JaxTestCase): @parameterized.named_parameters(itertools.chain.from_iterable( jtu.cases_from_list( {"testcase_name": jtu.format_test_name_suffix( - rec.op.__name__, shapes, itertools.repeat(dtype)), + rec.name, shapes, itertools.repeat(dtype)), "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype, "order": rec.order} for shape_group in compatible_shapes @@ -1502,7 +1512,7 @@ def testOpGrad(self, op, rng, shapes, dtype, order): raise SkipTest("pow grad imprecise on tpu") tol = 1e-1 if num_float_bits(dtype) == 32 else None args = tuple(rng(shape, dtype) for shape in shapes) - check_grads(op, args, order, tol, tol) + check_grads(op, args, order, ["fwd", "rev"], tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_from_dtype={}_to_dtype={}".format( @@ -1514,8 +1524,7 @@ def testOpGrad(self, op, rng, shapes, dtype, order): def testConvertElementTypeGrad(self, from_dtype, to_dtype, rng): args = (rng((2, 3), from_dtype),) convert_element_type = lambda x: lax.convert_element_type(x, to_dtype) - check_grads(convert_element_type, args, 1, 1e-3, 1e-3, 1e-3) - check_grads(convert_element_type, args, 2, 1e-3, 1e-3, 1e-3) + check_grads(convert_element_type, args, 2, ["fwd", "rev"], 1e-3, 1e-3, 1e-3) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}".format( @@ -1536,7 +1545,7 @@ def testClampGrad(self, min_shape, operand_shape, max_shape, dtype, rng): shapes = [min_shape, operand_shape, max_shape] min, operand, max = (rng(shape, dtype) for shape in shapes) min, max = onp.minimum(min, max), onp.maximum(min, max) # broadcast - check_grads(lax.clamp, (min, operand, max), 2, tol, tol, tol) + check_grads(lax.clamp, (min, operand, max), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_dim={}_baseshape=[{}]_dtype={}_narrs={}".format( @@ -1555,7 +1564,7 @@ def testConcatenateGrad(self, dim, base_shape, dtype, num_arrs, rng): for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))] operands = tuple(rng(shape, dtype) for shape in shapes) concatenate = lambda *args: lax.concatenate(args, dim) - check_grads(concatenate, operands, 2, tol, tol, tol) + check_grads(concatenate, operands, 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": @@ -1578,7 +1587,8 @@ def testConvGrad(self, lhs_shape, rhs_shape, dtype, strides, padding, rng): lhs = rng(lhs_shape, dtype) rhs = rng(rhs_shape, dtype) conv = partial(lax.conv, window_strides=strides, padding=padding) - check_grads_bilinear(conv, (lhs, rhs), order=2, atol=1e-2, rtol=1e-2) + check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"], + atol=1e-2, rtol=1e-2) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": @@ -1611,7 +1621,8 @@ def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides, rhs = rng(rhs_shape, dtype) conv = partial(lax.conv_with_general_padding, window_strides=strides, padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil) - check_grads_bilinear(conv, (lhs, rhs), order=2, atol=1e-2, rtol=1e-2) + check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"], + atol=1e-2, rtol=1e-2) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": @@ -1654,7 +1665,8 @@ def testConvGeneralDilatedGrad(self, lhs_shape, rhs_shape, dtype, strides, conv = partial(lax.conv_general_dilated, window_strides=strides, padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil, dimension_numbers=dimension_numbers) - check_grads_bilinear(conv, (lhs, rhs), order=2, atol=tol, rtol=tol) + check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"], + atol=tol, rtol=tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_lhs_shape={}_rhs_shape={}".format( @@ -1670,7 +1682,8 @@ def testDotGrad(self, lhs_shape, rhs_shape, dtype, rng): tol = 1e-1 if num_float_bits(dtype) == 32 else 1e-3 lhs = rng(lhs_shape, dtype) rhs = rng(rhs_shape, dtype) - check_grads_bilinear(lax.dot, (lhs, rhs), order=2, atol=tol, rtol=tol) + check_grads_bilinear(lax.dot, (lhs, rhs), order=2, modes=["fwd", "rev"], + atol=tol, rtol=tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": @@ -1694,7 +1707,8 @@ def testDotGeneralContractAndBatchGrads(self, lhs_shape, rhs_shape, dtype, lhs = rng(lhs_shape, dtype) rhs = rng(rhs_shape, dtype) dot_general = partial(lax.dot_general, dimension_numbers=dimension_numbers) - check_grads_bilinear(dot_general, (lhs, rhs), order=2, atol=tol, rtol=tol) + check_grads_bilinear(dot_general, (lhs, rhs), order=2, modes=["fwd", "rev"], + atol=tol, rtol=tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}".format( @@ -1709,7 +1723,7 @@ def testBroadcastGrad(self, shape, dtype, broadcast_sizes, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None args = (rng(shape, dtype),) broadcast = lambda x: lax.broadcast(x, broadcast_sizes) - check_grads(broadcast, args, 2, tol, tol, tol) + check_grads(broadcast, args, 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_inshape={}_outshape={}_bcdims={}".format( @@ -1729,7 +1743,7 @@ def testBroadcastInDimGrad(self, inshape, dtype, outshape, dimensions, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None operand = rng(inshape, dtype) broadcast_in_dim = lambda x: lax.broadcast_in_dim(x, outshape, dimensions) - check_grads(broadcast_in_dim, (operand,), 2, tol, tol, tol) + check_grads(broadcast_in_dim, (operand,), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_inshape={}_outshape={}".format( @@ -1746,7 +1760,7 @@ def testReshapeGrad(self, arg_shape, out_shape, dtype, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None operand = rng(arg_shape, dtype) reshape = lambda x: lax.reshape(x, out_shape) - check_grads(reshape, (operand,), 2, tol, tol, tol) + check_grads(reshape, (operand,), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_inshape={}_pads={}" @@ -1760,12 +1774,12 @@ def testPadGrad(self, shape, dtype, pads, rng): operand = rng(shape, dtype) pad = lambda operand: lax.pad(operand, onp.array(0, dtype), pads) - check_grads(pad, (operand,), 2, tol, tol, tol) + check_grads(pad, (operand,), 2, ["fwd", "rev"], tol, tol, tol) operand = rng(shape, dtype) padding_value = onp.array(0., dtype) pad = lambda operand, padding_value: lax.pad(operand, padding_value, pads) - check_grads(pad, (operand, padding_value), 2, tol, tol, tol) + check_grads(pad, (operand, padding_value), 2, ["fwd", "rev"], tol, tol, tol) def testReverseGrad(self): rev = lambda operand: lax.rev(operand, dimensions) @@ -1792,7 +1806,7 @@ def testSelectGrad(self, pred_shape, arg_shape, dtype, rng): on_true = rng(arg_shape, dtype) on_false = rng(arg_shape, dtype) select = lambda on_true, on_false: lax.select(pred, on_true, on_false) - check_grads(select, (on_true, on_false), 2, tol, tol, tol) + check_grads(select, (on_true, on_false), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": @@ -1818,7 +1832,7 @@ def testSliceGrad(self, shape, dtype, starts, limits, strides, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None operand = rng(shape, dtype) slice = lambda x: lax.slice(x, starts, limits, strides) - check_grads(slice, (operand,), 2, tol, tol, tol) + check_grads(slice, (operand,), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_start_indices={}_size_indices={}".format( @@ -1838,7 +1852,7 @@ def testDynamicSliceGrad(self, shape, dtype, start_indices, size_indices, tol = 1e-2 if onp.finfo(dtype).bits == 32 else None operand = rng(shape, dtype) dynamic_slice = lambda x: lax.dynamic_slice(x, start_indices, size_indices) - check_grads(dynamic_slice, (operand,), 2, tol, tol, tol) + check_grads(dynamic_slice, (operand,), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_start_indices={}_update_shape={}".format( @@ -1861,13 +1875,13 @@ def testDynamicUpdateSliceGrad(self, shape, dtype, start_indices, start_indices = onp.array(start_indices) dus = lambda x, y: lax.dynamic_update_slice(x, y, start_indices) - check_grads(dus, (operand, update), 2, tol, tol, tol) + check_grads(dus, (operand, update), 2, ["fwd", "rev"], tol, tol, tol) dus = lambda x: lax.dynamic_update_slice(x, update, start_indices) - check_grads(dus, (operand,), 2, tol, tol, tol) + check_grads(dus, (operand,), 2, ["fwd", "rev"], tol, tol, tol) dus = lambda y: lax.dynamic_update_slice(operand, y, start_indices) - check_grads(dus, (update,), 2, tol, tol, tol) + check_grads(dus, (update,), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_perm={}".format( @@ -1885,7 +1899,7 @@ def testTransposeGrad(self, shape, dtype, perm, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None operand = rng(shape, dtype) transpose = lambda x: lax.transpose(x, perm) - check_grads(transpose, (operand,), 2, tol, tol, tol) + check_grads(transpose, (operand,), 2, ["fwd", "rev"], tol, tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_op={}_inshape={}_reducedims={}" @@ -1914,7 +1928,7 @@ def testReduceGrad(self, op, init_val, shape, dtype, dims, rng): operand = rng(shape, dtype) init_val = onp.asarray(init_val, dtype=dtype) reduce = lambda operand: lax.reduce(operand, init_val, op, dims) - check_grads(reduce, (operand,), 1, tol, tol) + check_grads(reduce, (operand,), 1, ["fwd", "rev"], tol, tol) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_op={}_dtype={}_padding={}" @@ -1965,7 +1979,7 @@ def fun(operand): msg="test requires operand elements to be unique.") jtu.check_vjp(fun, partial(api.vjp, fun), (operand,), 1e-2, 1e-2, 1e-2) if test_gradients: - check_grads(fun, (operand,), 3, 1e-2, 1e-2, 1e-2) + check_grads(fun, (operand,), 3, ["fwd", "rev"], 1e-2, 1e-2, 1e-2) # pylint: enable=cell-var-from-loop # TODO(b/205052657): enable more tests when supported @@ -1981,7 +1995,7 @@ def testSortGrad(self, shape, dtype, axis, rng): tol = 1e-2 if onp.finfo(dtype).bits == 32 else None operand = rng(shape, dtype) sort = lambda x: lax.sort(x, axis) - check_grads(sort, (operand,), 2, tol, tol, tol) + check_grads(sort, (operand,), 2, ["fwd", "rev"], tol, tol, tol) # TODO(b/205052657): enable more tests when supported @parameterized.named_parameters(jtu.cases_from_list( @@ -2009,7 +2023,7 @@ def args_maker(): keys, values = args_maker() fun = lambda keys, values: lax.sort_key_val(keys, values, axis) - check_grads(fun, (keys, values), 2, 1e-2, 1e-2, 1e-2) + check_grads(fun, (keys, values), 2, ["fwd", "rev"], 1e-2, 1e-2, 1e-2) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_idxs={}_axes={}".format( @@ -2027,7 +2041,9 @@ def testIndexTakeGrad(self, shape, dtype, idxs, axes, rng): idxs = tuple(rng(e.shape, e.dtype) for e in idxs) src = rng(shape, dtype) index_take = lambda src: lax.index_take(src, idxs, axes) - check_grads(index_take, (src,), 2, 1e-2, 1e-2, 1) + check_grads(index_take, (src,), 2, ["fwd"], 1e-2, 1e-2, 1) + # TODO(mattjj): fix rev mode failures here! + # check_grads(index_take, (src,), 2, ["fwd", "rev"], 1e-2, 1e-2, 1) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_idxs={}_dnums={}_slice_sizes={}".format( @@ -2054,7 +2070,7 @@ def testGatherGrad(self, shape, dtype, idxs, dnums, slice_sizes, rng, rng_idx): gather = lambda x: lax.gather(x, idxs, dimension_numbers=dnums, slice_sizes=slice_sizes) x = rng(shape, dtype) - check_grads(gather, (x,), 2, 1e-2, 1e-2, 1.) + check_grads(gather, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format( @@ -2084,7 +2100,7 @@ def testScatterAddGrad(self, arg_shape, dtype, idxs, update_shape, dnums, rng, dimension_numbers=dnums) x = rng(arg_shape, dtype) y = rng(update_shape, dtype) - check_grads(scatter_add, (x, y), 2, 1e-2, 1e-2, 1.) + check_grads(scatter_add, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format( @@ -2113,7 +2129,7 @@ def testScatterGrad(self, arg_shape, dtype, idxs, update_shape, dnums, rng, scatter = lambda x, y: lax.scatter(x, idxs, y, dimension_numbers=dnums) x = rng(arg_shape, dtype) y = rng(update_shape, dtype) - check_grads(scatter, (x, y), 2, 1e-2, 1e-2, 1.) + check_grads(scatter, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.) def testStopGradient(self): def f(x): diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -499,8 +499,7 @@ def testLu(self, shape, dtype, rng): @jtu.skip_on_devices("gpu", "tpu") def testLuGrad(self, shape, dtype, rng): a = rng(shape, dtype) - - jtu.check_grads(jsp.linalg.lu, (a,), 2, rtol=1e-1) + jtu.check_grads(jsp.linalg.lu, (a,), 2, atol=5e-2, rtol=1e-1) @jtu.skip_on_devices("gpu", "tpu") def testLuBatching(self):
bug in scan higher-order autodiff The test added in cf99673 shows a failing autodiff test for scan.
2019-05-22T01:10:06
google/jax
752
google__jax-752
[ "751" ]
42ea9fef4203d5acd73e732dbe0e4d8672e81d17
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -662,6 +662,7 @@ def scatter(operand, scatter_indices, updates, dimension_numbers): def index_take(src, idxs, axes): indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1) + indices = indices % onp.array([src.shape[ax] for ax in axes]) slice_sizes = list(src.shape) for ax in axes: slice_sizes[ax] = 1
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -2038,12 +2038,9 @@ def args_maker(): ] for rng in [jtu.rand_default()])) def testIndexTakeGrad(self, shape, dtype, idxs, axes, rng): - idxs = tuple(rng(e.shape, e.dtype) for e in idxs) src = rng(shape, dtype) index_take = lambda src: lax.index_take(src, idxs, axes) - check_grads(index_take, (src,), 2, ["fwd"], 1e-2, 1e-2, 1) - # TODO(mattjj): fix rev mode failures here! - # check_grads(index_take, (src,), 2, ["fwd", "rev"], 1e-2, 1e-2, 1) + check_grads(index_take, (src,), 2, ["fwd", "rev"], 1e-2, 1e-2, 1) @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_shape={}_idxs={}_dnums={}_slice_sizes={}".format(
fix higher-order rev-mode lax.index_take test failures With the new-and-more-thorough `check_grads`, we started getting some failures in higher-order tests of `lax.index_take`. See commented-out line in lax_test.py's testIndexTake.
2019-05-22T02:20:10
google/jax
754
google__jax-754
[ "746" ]
83a04c023b4a92b83f7a6a9c281c2017a2d460bb
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -649,6 +649,20 @@ def _reshape(a, newshape, order="C"): else: raise ValueError("Unexpected value for 'order' argument: {}.".format(order)) +def _reshape_method(a, *newshape, **kwargs): + order = kwargs.pop("order", "C") + if len(kwargs) == 1: + invalid_kwarg, = kwargs + msg = "'{}' is an invalid keyword argument for this function" + raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error + elif kwargs: + invalid_kwargs = "'{}'".format("'".join(kwargs)) + msg = "{} are invalid keyword arguments for this function" + raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error + if len(newshape) == 1 and not isinstance(newshape[0], int): + newshape = newshape[0] + return _reshape(a, newshape, order=order) + @_wraps(onp.ravel) def ravel(a, order="C"): @@ -2353,7 +2367,7 @@ def _swap_args(f): # Forward methods and properties using core.aval_method and core.aval_property: for method_name in _nondiff_methods + _diff_methods: setattr(ShapedArray, method_name, core.aval_method(globals()[method_name])) -setattr(ShapedArray, "reshape", core.aval_method(_reshape)) +setattr(ShapedArray, "reshape", core.aval_method(_reshape_method)) setattr(ShapedArray, "flatten", core.aval_method(ravel)) setattr(ShapedArray, "T", core.aval_property(transpose)) setattr(ShapedArray, "real", core.aval_property(real)) @@ -2367,7 +2381,7 @@ def _swap_args(f): setattr(DeviceArray, "__{}__".format(operator_name), function) for method_name in _nondiff_methods + _diff_methods: setattr(DeviceArray, method_name, globals()[method_name]) -setattr(DeviceArray, "reshape", _reshape) +setattr(DeviceArray, "reshape", _reshape_method) setattr(DeviceArray, "flatten", ravel) setattr(DeviceArray, "T", property(transpose)) setattr(DeviceArray, "real", property(real))
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -975,6 +975,25 @@ def testReshape(self, arg_shape, out_shape, dtype, order, rng): self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_inshape={}_outshape={}".format( + jtu.format_shape_dtype_string(arg_shape, dtype), + jtu.format_shape_dtype_string(out_shape, dtype)), + "arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype, + "rng": jtu.rand_default()} + for dtype in default_dtypes + for arg_shape, out_shape in [ + ((7, 0), (0, 42, 101)), + ((2, 1, 4), (-1,)), + ((2, 2, 4), (2, 8)) + ])) + def testReshapeMethod(self, arg_shape, out_shape, dtype, rng): + onp_fun = lambda x: onp.reshape(x, out_shape) + lnp_fun = lambda x: x.reshape(*out_shape) + args_maker = lambda: [rng(arg_shape, dtype)] + self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True) + self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_inshape={}_expanddim={}".format( jtu.format_shape_dtype_string(arg_shape, dtype), dim), @@ -1522,6 +1541,9 @@ def testIssue728(self): assert lnp.allclose(lnp.eye(5000), onp.eye(5000)) self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050))) + def testIssue746(self): + lnp.arange(12).reshape(3, 4) # doesn't crash + if __name__ == "__main__": absltest.main()
DeviceArray.reshape(*shape) does not work NumPy supports both `.reshape(*shape)` and `.reshape(shape)`, but only the later works with jax.numpy: ``` >>> import jax.numpy as jnp >>> jnp.arange(12).reshape(3, 4) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-27-144a56eb5cb3> in <module>() ----> 1 jnp.arange(12).reshape(3, 4) /usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in _reshape(a, newshape, order) 638 def _reshape(a, newshape, order="C"): 639 dummy_val = onp.broadcast_to(0, shape(a)) # zero strides --> 640 computed_newshape = onp.reshape(dummy_val, newshape).shape 641 642 if order == "C": /usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py in reshape(a, newshape, order) 290 [5, 6]]) 291 """ --> 292 return _wrapfunc(a, 'reshape', newshape, order=order) 293 294 /usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py in _wrapfunc(obj, method, *args, **kwds) 54 def _wrapfunc(obj, method, *args, **kwds): 55 try: ---> 56 return getattr(obj, method)(*args, **kwds) 57 58 # An AttributeError occurs if the object does not have ValueError: cannot reshape array of size 12 into shape (3,) ```
2019-05-22T04:39:11
google/jax
755
google__jax-755
[ "716" ]
ff481472de0f2544e1855a41a500f078f2738986
diff --git a/jax/interpreters/ad.py b/jax/interpreters/ad.py --- a/jax/interpreters/ad.py +++ b/jax/interpreters/ad.py @@ -557,9 +557,6 @@ def map_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct): freevar_cts = tree_map(lambda x: x.sum(0), freevar_cts) return cts_out, freevar_cts -def jaxpr_as_fun(jaxpr, consts, *args): - return core.eval_jaxpr(jaxpr, consts, (), *args) - def get_nonzeros(tangent): if tangent is zero: return False @@ -599,7 +596,7 @@ def jvp_jaxpr(jaxpr, nonzeros, instantiate): # jaxpr :: d -> a -> b -> (c1, c2) # avals = (d, a, b) # f :: d -> a -> b -> (c1, c2) - f = wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals)) + f = wrap_init(core.jaxpr_as_fun(jaxpr)) f_jvp, out_nonzeros = f_jvp_traceable(jvp(f, instantiate=instantiate), nonzeros) # f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2')) tangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple), diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -29,9 +29,9 @@ from ..abstract_arrays import ShapedArray, make_shaped_array, array_types, raise_to_shaped from ..ad_util import add_jaxvals_p, zeros_like_p, zeros_like_jaxval from ..linear_util import transformation, transformation_with_aux, wrap_init -from ..tree_util import register_pytree_node from ..util import unzip2, partial, safe_map from . import xla +from . import partial_eval as pe map = safe_map @@ -52,10 +52,10 @@ def batch_transform(size, in_dims, out_dim_dst, vals): with new_master(BatchTrace) as master: trace = BatchTrace(master, core.cur_sublevel()) in_tracers = map(partial(BatchTracer, trace), vals, in_dims) - out_tracer = yield in_tracers, {} - out_tracer = trace.full_raise(out_tracer) + ans = yield in_tracers, {} + out_tracer = trace.full_raise(ans) out_val, out_dim = out_tracer.val, out_tracer.batch_dim - del master + del master, out_tracer yield moveaxis(size, out_dim_dst, out_dim, out_val) @@ -75,6 +75,7 @@ class BatchTracer(Tracer): __slots__ = ['val', 'batch_dim'] def __init__(self, trace, val, batch_dim): + assert core.skip_checks or type(batch_dim) in (int, tuple) self.trace = trace self.val = val self.batch_dim = batch_dim @@ -365,3 +366,150 @@ def handle_scalar_broadcasting(nd, x, bdim): return x else: return x.reshape(x.shape + (1,) * (nd - x.ndim)) + + +# TODO(mattjj): try to de-duplicate utility functions with above + +def where_batched(bdim): + t = type(bdim) + if t is tuple: + return tuple(map(where_batched, bdim)) + elif t in (int, type(None)): + return bdim is not None + else: + raise TypeError(t) + +def bools_to_bdims(bdim, batched_indicator_tree): + t = type(batched_indicator_tree) + if t is tuple: + return tuple(map(partial(bools_to_bdims, bdim), batched_indicator_tree)) + elif t is bool: + return bdim if batched_indicator_tree else None + else: + raise TypeError(t) + +def instantiate_bdim(size, axis, instantiate, bdim, x): + """Instantiate or move a batch dimension to position `axis`. + + Ensures that `x` is at least as high on the batched lattice as `instantiate`. + + Args: + size: int, size of the axis to instantiate. + axis: int, where to instantiate or move the batch dimension. + instantiate: tuple-tree of booleans, where the tree structure is a prefix of + the tree structure in x, indicating whether to instantiate the batch + dimension at the corresponding subtree in x. + bdim: tuple-tree of ints or NoneTypes, with identical tree structure to + `instantaite`, indicating where the batch dimension exists in the + corresponding subtree of x. + x: JaxType value on which to instantiate or move batch dimensions. + + Returns: + A new version of `x` with instantiated batch dimensions. + """ + def _inst(instantiate, bdim, x): + if type(instantiate) is tuple: + assert type(bdim) is tuple # instantiate at same granularity as bdim + return core.pack(map(_inst, instantiate, bdim, x)) + elif type(instantiate) is bool: + if not instantiate: + if bdim is None: + return x + elif type(bdim) is int: + return moveaxis2(bdim, axis, x) + else: + raise TypeError(type(bdim)) + else: + if bdim is None: + return broadcast2(size, axis, x) + elif type(bdim) is int: + return moveaxis2(bdim, axis, x) + else: + raise TypeError(type(bdim)) + else: + raise TypeError(type(instantiate)) + + return _inst(instantiate, bdim, x) + +def moveaxis2(src, dst, x): + if src == dst: + return x + else: + return _moveaxis2(src, dst, x, get_aval(x)) + +def _moveaxis2(src, dst, x, aval): + if type(aval) is JaxTuple: + return core.pack(map(partial(_moveaxis2, src, dst), x, aval)) + else: + perm = [i for i in range(onp.ndim(x)) if i != src] + perm.insert(dst, src) + return x.transpose(perm) + +def broadcast2(size, axis, x): + return _broadcast2(size, axis, x, get_aval(x)) + +def _broadcast2(size, axis, x, aval): + if type(aval) is JaxTuple: + return core.pack(map(partial(_broadcast2, size, axis), x, aval)) + else: + # see comment at the top of this section + if isinstance(x, onp.ndarray) or onp.isscalar(x): + return onp.broadcast_to(x, (size,) + onp.shape(x)) + else: + return x.broadcast((size,)) # should be a JAX arraylike + +def _promote_aval_rank(n, batched, aval): + assert isinstance(aval, core.AbstractValue) + if isinstance(aval, core.AbstractTuple): + t = type(batched) + if t is tuple: + return core.AbstractTuple(map(partial(_promote_aval_rank, n), batched, aval)) + elif t is bool: + if batched: + return core.AbstractTuple(map(partial(_promote_aval_rank, n, batched), aval)) + else: + return aval + else: + raise TypeError(t) + else: + if batched: + return ShapedArray((n,) + aval.shape, aval.dtype) + else: + return aval + +def batch_jaxpr(jaxpr, size, is_batched, instantiate): + f = wrap_init(core.jaxpr_as_fun(jaxpr)) + f_batched, where_out_batched = batched_traceable(f, size, is_batched, instantiate) + in_avals = map(partial(_promote_aval_rank, size), is_batched, jaxpr.in_avals) + in_pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals] + jaxpr_out, pval_out, literals_out = pe.trace_to_jaxpr( + f_batched, in_pvals, instantiate=True) + out_aval, _ = pval_out + jaxpr_out = core.TypedJaxpr(jaxpr_out, literals_out, in_avals, out_aval) + return jaxpr_out, where_out_batched() + +@transformation_with_aux +def batched_traceable(size, is_batched, instantiate, *vals): + in_dims = bools_to_bdims(0, is_batched) + with new_master(BatchTrace) as master: + trace = BatchTrace(master, core.cur_sublevel()) + in_tracers = map(partial(BatchTracer, trace), vals, in_dims) + ans = yield in_tracers, {} + out_tracer = trace.full_raise(ans) + out_val, out_dim = out_tracer.val, out_tracer.batch_dim + del master, out_tracer + out_val = instantiate_bdim(size, 0, instantiate, out_dim, out_val) + yield out_val, _binary_lattice_join(where_batched(out_dim), instantiate) + +def _binary_lattice_join(a, b): + t = (type(a), type(b)) + if t == (tuple, tuple): + return tuple(map(_binary_lattice_join, a, b)) + elif t == (tuple, bool): + return tuple(map(_binary_lattice_join, a, (b,) * len(a))) + elif t == (bool, tuple): + return tuple(map(_binary_lattice_join, (a,) * len(b), b)) + elif t == (bool, bool): + return a or b + else: + raise TypeError((type(a), type(b))) diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -35,7 +35,7 @@ from jax.interpreters import xla from jax.interpreters import ad from jax.util import partial, unzip2, safe_map, safe_zip -from jax.tree_util import build_tree, tree_unflatten +from jax.tree_util import build_tree, tree_unflatten, tree_map from jax import ad_util map = safe_map @@ -236,6 +236,8 @@ def _while_loop_batching_rule(batched_args, batch_dims, cond_consts, # traceable Python functions using `core.eval_jaxpr`. Then we can batch them # using `batching.batch_transform` (the transform underlying `api.vmap`). This # code also avoids broadcasting `cond_tracer_consts` and `body_tracer_consts`. + # TODO(mattjj): Revise this using scan machinery (and fixed-point the loop + # carry instead of lifting it all the way!) init_val, cond_tracer_consts, body_tracer_consts = batched_args init_val_bd, cond_tracer_consts_bd, body_tracer_consts_bd = batch_dims @@ -501,6 +503,8 @@ def _update_arrays(i, aval, xs, x): else: return lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0) +class FixedPointError(Exception): pass + def scan(f, init, xs): """Scan a function over leading array axes while carrying along state. @@ -814,7 +818,6 @@ def _move_stuff_and_add_add(typed_jaxpr): def _add_any_eqn(tot, a, b): return core.JaxprEqn([a, b], [tot], ad_util.add_jaxvals_p, (), False, False, {}) - # transpose_jaxpr :: (res -> a -> b) -> (res -> CT b -> CT a) def _transpose_jaxpr(jaxpr): assert len(jaxpr.in_avals) == 2 @@ -837,7 +840,42 @@ def _make_typed_jaxpr(traceable, in_avals): return core.TypedJaxpr(jaxpr, consts, in_avals, out_aval) -class FixedPointError(Exception): pass +def _scan_batching_rule(batched_args, batch_dims, forward, length, jaxpr): + consts, init, xs = batched_args + consts_bdim, init_bdim, xs_bdim = batch_dims + + sizes = lax._reduce(set.union, map(batching.dimsize, batch_dims, batched_args)) + size = sizes.pop() + assert not sizes + + consts_batched = batching.where_batched(consts_bdim) + init_batched = batching.where_batched(init_bdim) + xs_batched = batching.where_batched(xs_bdim) + + carry_batched = init_batched + for _ in range(1000): + which_batched = (consts_batched, carry_batched, xs_batched) + jaxpr_batched, batched_out = batching.batch_jaxpr(jaxpr, size, which_batched, + instantiate=(carry_batched, False)) + carry_batched_out, ys_batched = batched_out + if carry_batched_out == carry_batched: + break + else: + carry_batched = _binary_lattice_join(carry_batched_out, carry_batched) + else: + raise FixedPointError + + consts_batched = batching.instantiate_bdim(size, 0, consts_batched, consts_bdim, consts) + init_batched = batching.instantiate_bdim(size, 0, carry_batched, init_bdim, init) + xs_batched = batching.instantiate_bdim(size, 1, xs_batched, xs_bdim, xs) + + carry_out, ys = scan_p.bind( + consts_batched, init_batched, xs_batched, + forward=forward, length=length, jaxpr=jaxpr_batched) + + carry_out_bdim = batching.bools_to_bdims(0, carry_batched) + ys_bdim = batching.bools_to_bdims(1, ys_batched) + return core.pack((carry_out, ys)), (carry_out_bdim, ys_bdim) # We use a custom bind for scan just to add some error checks @@ -858,3 +896,4 @@ def scan_bind(consts, init, xs, forward, length, jaxpr): ad.primitive_transposes[scan_p] = _scan_transpose pe.custom_partial_eval_rules[scan_p] = _scan_partial_eval xla.translations[scan_p] = partial(xla.lower_fun, _scan_impl) +batching.primitive_batchers[scan_p] = _scan_batching_rule
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -889,66 +889,6 @@ def f(R): H = hessian(f)(R) # don't crash on UnshapedArray - def testWhileLoop(self): - def fun(x): - return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x) - - ans = vmap(fun)(onp.array([0, 1, 2, 3])) - expected = onp.array([4, 3, 4, 3]) - self.assertAllClose(ans, expected, check_dtypes=False) - - fun = jit(fun) - ans = vmap(fun)(onp.array([0, 1, 2, 3])) - expected = onp.array([4, 3, 4, 3]) - self.assertAllClose(ans, expected, check_dtypes=False) - - def testWhileLoopCondConstsBatched(self): - def fun(x, y): - return lax.while_loop(lambda x: x < y, lambda x: x + 2, x) - - ans = vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3])) - expected = onp.array([2, 4]) - self.assertAllClose(ans, expected, check_dtypes=False) - - def testWhileLoopBodyConstsBatched(self): - def fun(x, y): - return lax.while_loop(lambda x: x < 3, lambda x: x + y, x) - - ans = vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3])) - expected = onp.array([4, 3]) - self.assertAllClose(ans, expected, check_dtypes=False) - - def testWhileLoopTuple(self): - def cond_fun(loop_carry): - x, y = loop_carry - return x + y < 5 - - def body_fun(loop_carry): - x, y = loop_carry - x = x + 1 - return x, y - - def fun(x, y): - return lax.while_loop(cond_fun, body_fun, (x, y)) - - ans = vmap(fun)(onp.array([0, 0]), onp.array([1, 2])) - expected = (onp.array([4, 3]), onp.array([1, 2])) - self.assertAllClose(ans, expected, check_dtypes=False) - - def testForiLoop(self): - def body_fun(i, loop_carry): - x, y = loop_carry - x = x + 1 - y = y + 2 - return x, y - - def fun(x): - return lax.fori_loop(0, 10, body_fun, (x, 0)) - - ans = vmap(fun)(onp.array([0, 1])) - expected = (onp.array([10, 11]), onp.array([20, 20])) - self.assertAllClose(ans, expected, check_dtypes=False) - def testIssue489(self): def f(key): def body_fn(uk): diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -18,6 +18,7 @@ import collections from functools import partial +import itertools from absl.testing import absltest from absl.testing import parameterized @@ -29,6 +30,7 @@ from jax import core from jax import lax from jax import test_util as jtu +from jax.util import unzip2 import jax.numpy as np # scan tests use numpy def scan_reference(f, init, xs): @@ -234,6 +236,66 @@ def body_fun(state): self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False) self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False) + def testWhileLoopBatched(self): + def fun(x): + return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x) + + ans = api.vmap(fun)(onp.array([0, 1, 2, 3])) + expected = onp.array([4, 3, 4, 3]) + self.assertAllClose(ans, expected, check_dtypes=False) + + fun = api.jit(fun) + ans = api.vmap(fun)(onp.array([0, 1, 2, 3])) + expected = onp.array([4, 3, 4, 3]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testWhileLoopCondConstsBatched(self): + def fun(x, y): + return lax.while_loop(lambda x: x < y, lambda x: x + 2, x) + + ans = api.vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3])) + expected = onp.array([2, 4]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testWhileLoopBodyConstsBatched(self): + def fun(x, y): + return lax.while_loop(lambda x: x < 3, lambda x: x + y, x) + + ans = api.vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3])) + expected = onp.array([4, 3]) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testWhileLoopTupleBatched(self): + def cond_fun(loop_carry): + x, y = loop_carry + return x + y < 5 + + def body_fun(loop_carry): + x, y = loop_carry + x = x + 1 + return x, y + + def fun(x, y): + return lax.while_loop(cond_fun, body_fun, (x, y)) + + ans = api.vmap(fun)(onp.array([0, 0]), onp.array([1, 2])) + expected = (onp.array([4, 3]), onp.array([1, 2])) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testForiLoopBatched(self): + def body_fun(i, loop_carry): + x, y = loop_carry + x = x + 1 + y = y + 2 + return x, y + + def fun(x): + return lax.fori_loop(0, 10, body_fun, (x, 0)) + + ans = api.vmap(fun)(onp.array([0, 1])) + expected = (onp.array([10, 11]), onp.array([20, 20])) + self.assertAllClose(ans, expected, check_dtypes=False) + def testForiLoopBasic(self): def count(num): def body_fun(i, tot): @@ -432,12 +494,12 @@ def cond(x): self.assertEqual(out, (7, 10)) @parameterized.named_parameters( - {"testcase_name": "jit_scan={}_jit_f={}".format(jit_scan, jit_f), + {"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f), "jit_scan": jit_scan, "jit_f": jit_f} for jit_scan in [False, True] for jit_f in [False, True]) def testScanImpl(self, jit_scan, jit_f): - d = np.zeros(2) + d = np.array([1., 2.]) def f(c, a): assert a.shape == (3,) assert c.shape == (4,) @@ -461,12 +523,12 @@ def f(c, a): self.assertAllClose(ans, expected, check_dtypes=False) @parameterized.named_parameters( - {"testcase_name": "jit_scan={}_jit_f={}".format(jit_scan, jit_f), + {"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f), "jit_scan": jit_scan, "jit_f": jit_f} for jit_scan in [False, True] for jit_f in [False, True]) def testScanJVP(self, jit_scan, jit_f): - d = np.zeros(2) + d = np.array([1., 2.]) def f(c, a): assert a.shape == (3,) assert c.shape == (4,) @@ -490,12 +552,12 @@ def f(c, a): self.assertAllClose(ans, expected, check_dtypes=False) @parameterized.named_parameters( - {"testcase_name": "jit_scan={}_jit_f={}".format(jit_scan, jit_f), + {"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f), "jit_scan": jit_scan, "jit_f": jit_f} for jit_scan in [False, True] for jit_f in [False, True]) def testScanLinearize(self, jit_scan, jit_f): - d = np.zeros(2) + d = np.array([1., 2.]) def f(c, a): assert a.shape == (3,) assert c.shape == (4,) @@ -519,12 +581,12 @@ def f(c, a): self.assertAllClose(ans, expected, check_dtypes=False) @parameterized.named_parameters( - {"testcase_name": "jit_scan={}_jit_f={}".format(jit_scan, jit_f), + {"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f), "jit_scan": jit_scan, "jit_f": jit_f} for jit_scan in [False, True] for jit_f in [False, True]) def testScanGrad(self, jit_scan, jit_f): - d = np.zeros(2) + d = np.ones(2) def f(c, a): assert a.shape == (3,) assert c.shape == (4,) @@ -551,9 +613,9 @@ def testScanRnn(self): r = npr.RandomState(0) n_in = 4 - n_hid = 3 - n_out = 2 - length = 5 + n_hid = 2 + n_out = 1 + length = 3 W_trans = r.randn(n_hid, n_hid + n_in) W_out = r.randn(n_out, n_hid + n_in) @@ -587,11 +649,18 @@ def loss(params, inputs, targets): # gradient evaluation doesn't crash api.grad(loss)(params, inputs, targets) - # gradient is zero in the right place - predictions = rnn(params, inputs) - ans = api.grad(loss)(params, inputs, predictions) - expected = (onp.zeros_like(W_trans), onp.zeros_like(W_out)) - self.assertAllClose(ans, expected, check_dtypes=False) + # gradient check passes + jtu.check_grads(loss, (params, inputs, targets), order=1) + + # we can vmap to batch things + batch_size = 7 + batched_inputs = r.randn(batch_size, length, n_in) + batched_targets = r.randn(batch_size, length, n_out) + batched_loss = api.vmap(lambda x, y: loss(params, x, y)) + losses = batched_loss(batched_inputs, batched_targets) + expected = onp.stack(list(map(lambda x, y: loss(params, x, y), + batched_inputs, batched_targets))) + self.assertAllClose(losses, expected, check_dtypes=False) def testIssue711(self): # Tests reverse-mode differentiation through a scan for which the scanned @@ -649,6 +718,75 @@ def f(c, a): jtu.check_grads(lambda c, as_: lax.scan(f, c, as_), (c, as_), modes=["fwd", "rev"], order=2) + @parameterized.named_parameters( + {"testcase_name": "_jit_scan={}_jit_f={}_in_axes={}".format( + jit_scan, jit_f, in_axes), + "jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes} + for jit_scan in [False, True] + for jit_f in [False, True] + for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2]) + if in_axes != (None, None)) + def testScanVmap(self, jit_scan, jit_f, in_axes): + d = np.array([1., 2.]) + def f(c, a): + assert a.shape == (3,) + assert c.shape == (4,) + b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d)) + c = np.sin(c * b) + assert b.shape == () + return c, b + + if jit_f: + f = api.jit(f) + if jit_scan: + scan = api.jit(lax.scan, (0,)) + else: + scan = lax.scan + + as_shape = [5, 3] + c_shape = [4] + + c_bdim, as_bdim = in_axes + if c_bdim is not None: + c_shape.insert(c_bdim, 7) + if as_bdim is not None: + as_shape.insert(as_bdim, 7) + + r = onp.random.RandomState(0) + as_ = r.randn(*as_shape) + c = r.randn(*c_shape) + + ans = api.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_) + expected = api.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_) + self.assertAllClose(ans, expected, check_dtypes=False) + + def testScanVmapTuples(self): + def f(c, a): + a1, a2 = a + c1, c2 = c + b = np.sum(np.cos(a1)) * np.sum(np.tan(c2 * a2)) + c = c1 * np.sin(np.sum(a1 * a2)), c2 * np.cos(np.sum(a1)) + return c, b + + in_axes = (0, (1, 2)) + + r = onp.random.RandomState(0) + as_ = (r.randn(3, 7), r.randn(3, 4, 7)) + c = (r.randn(7, 2), r.randn(7)) + + expected_c_out, expected_bs = [], [] + for i in range(7): + c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i])) + expected_c_out.append(c_out) + expected_bs.append(bs) + expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out) + expected_c_out = (np.stack(expected_c_out_0), np.stack(expected_c_out_1)) + expected_bs = np.stack(expected_bs) + expected = expected_c_out, expected_bs + + ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_) + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == '__main__': absltest.main()
Please enable vmap(scan())
The addition of scan() is great! I anticipate its usage will be much wider if one can call vmap() over scanned functions. It's not yet implemented. ```NotImplementedError: Batching rule for 'scan' not implemented``` Thank you!
2019-05-22T18:12:10
google/jax
759
google__jax-759
[ "757" ]
3815662abc59d156297ecdb658e38ea6d2f20e67
diff --git a/jax/lax/lax_control_flow.py b/jax/lax/lax_control_flow.py --- a/jax/lax/lax_control_flow.py +++ b/jax/lax/lax_control_flow.py @@ -19,6 +19,8 @@ from __future__ import division from __future__ import print_function +import operator + import numpy as onp from jax import api @@ -619,7 +621,7 @@ def _scan_jvp(primals, tangents, forward, length, jaxpr): jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(jaxpr, nonzeros, instantiate=(carry_nonzeros, False)) carry_nonzeros_out, ys_nonzeros = nonzeros_out - if carry_nonzeros_out == carry_nonzeros: + if _binary_lattice_eq(carry_nonzeros_out, carry_nonzeros): break else: carry_nonzeros = _binary_lattice_join(carry_nonzeros_out, carry_nonzeros) @@ -647,19 +649,23 @@ def _scan_jvp(primals, tangents, forward, length, jaxpr): carry_out_dot = ad.put_zeros(ad.TangentTuple, carry_nonzeros_out, carry_out_dot) return core.pack((carry_out, ys)), ad.TangentTuple((carry_out_dot, ys_dot)) -def _binary_lattice_join(a, b): +def _binary_lattice_fold(f, pack, a, b): + recur = partial(_binary_lattice_fold, f, pack) t = (type(a), type(b)) if t == (tuple, tuple): - return tuple(map(_binary_lattice_join, a, b)) + return pack(map(recur, a, b)) elif t == (tuple, bool): - return tuple(map(_binary_lattice_join, a, (b,) * len(a))) + return pack(map(recur, a, (b,) * len(a))) elif t == (bool, tuple): - return tuple(map(_binary_lattice_join, (a,) * len(b), b)) + return pack(map(recur, (a,) * len(b), b)) elif t == (bool, bool): - return a or b + return f(a, b) else: raise TypeError((type(a), type(b))) +_binary_lattice_join = partial(_binary_lattice_fold, operator.or_, tuple) +_binary_lattice_eq = partial(_binary_lattice_fold, operator.eq, all) + def _scan_partial_eval(trace, *tracers, **kwargs): jaxpr = kwargs.pop('jaxpr') @@ -675,7 +681,7 @@ def _scan_partial_eval(trace, *tracers, **kwargs): jaxpr_1, jaxpr_2, sc_out = pe.partial_eval_jaxpr(jaxpr, second_components, instantiate=(sc_carry, False)) sc_carry_out, sc_ys = sc_out - if sc_carry_out == sc_carry: + if _binary_lattice_eq(sc_carry_out, sc_carry): break else: sc_carry = _binary_lattice_join(sc_carry, sc_carry_out) @@ -858,7 +864,7 @@ def _scan_batching_rule(batched_args, batch_dims, forward, length, jaxpr): jaxpr_batched, batched_out = batching.batch_jaxpr(jaxpr, size, which_batched, instantiate=(carry_batched, False)) carry_batched_out, ys_batched = batched_out - if carry_batched_out == carry_batched: + if _binary_lattice_eq(carry_batched_out, carry_batched): break else: carry_batched = _binary_lattice_join(carry_batched_out, carry_batched)
diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py --- a/tests/lax_control_flow_test.py +++ b/tests/lax_control_flow_test.py @@ -787,6 +787,26 @@ def f(c, a): ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_) self.assertAllClose(ans, expected, check_dtypes=False) + def testIssue757(self): + # code from https://github.com/google/jax/issues/757 + def fn(a): + return np.cos(a) + + def loop(val): + iterations = 10 + def apply_carry(x, i): + return api.grad(fn, argnums=(0,))(x)[0], i + + final_val, _ = lax.scan( + apply_carry, + val, + np.arange(iterations) + ) + return final_val + + arg = 0.5 + print(api.jit(api.jacfwd(loop, argnums=(0,)))(arg)) + if __name__ == '__main__': absltest.main()
jit of vmapped scan results in fixed point error ``` python import time import jax import jax.numpy as jnp def fn(a): return jnp.cos(a) def loop(val): iterations = 10 def apply_carry(x, i): return jax.grad(fn, argnums=(0,))(x)[0], i final_val, _ = jax.lax.scan( apply_carry, val, jnp.arange(iterations) ) return final_val if __name__ == "__main__": arg = 0.5 print(loop(arg)) print(jax.grad(loop, argnums=(0,))(arg)) # okay print(jax.jacrev(loop, argnums=(0,))(arg)) # okay print(jax.jacfwd(loop, argnums=(0,))(arg)) # okay print(jax.jit(jax.jacfwd(loop, argnums=(0,)))(arg)) # error: ``` ``` File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 584, in scan forward=True, length=length, jaxpr=jaxpr) File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 890, in scan_bind forward=forward, length=length, jaxpr=jaxpr) File "/home/yutong/venv/lib/python3.6/site-packages/jax/core.py", line 120, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/home/yutong/venv/lib/python3.6/site-packages/jax/interpreters/ad.py", line 252, in process_primitive primal_out, tangent_out = jvp(primals_in, tangents_in, **params) File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 641, in _scan_jvp forward=forward, length=length, jaxpr=jaxpr_jvp) File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 890, in scan_bind forward=forward, length=length, jaxpr=jaxpr) File "/home/yutong/venv/lib/python3.6/site-packages/jax/core.py", line 120, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/home/yutong/venv/lib/python3.6/site-packages/jax/interpreters/batching.py", line 123, in process_primitive val_out, dim_out = batched_primitive(vals_in, dims_in, **params) File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 874, in _scan_batching_rule forward=forward, length=length, jaxpr=jaxpr_batched) File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 890, in scan_bind forward=forward, length=length, jaxpr=jaxpr) File "/home/yutong/venv/lib/python3.6/site-packages/jax/core.py", line 120, in bind out_tracer = top_trace.process_primitive(self, tracers, kwargs) File "/home/yutong/venv/lib/python3.6/site-packages/jax/interpreters/partial_eval.py", line 93, in process_primitive return partial_eval(self, *tracers, **params) File "/home/yutong/venv/lib/python3.6/site-packages/jax/lax/lax_control_flow.py", line 683, in _scan_partial_eval raise FixedPointError jax.lax.lax_control_flow.FixedPointError ```
These are fantastic test cases :) Thanks for writing these, and helping us to find and squash bugs! Thanks - bugs are best squashed when fresh
2019-05-22T20:34:56
google/jax
760
google__jax-760
[ "758" ]
a193b3592bba6cc62a7ad4076f37f2f18507a8d6
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -252,14 +252,16 @@ def reducer_batcher(prim, batched_args, batch_dims, axes, **params): def add_batched(batched_args, batch_dims): bdx, bdy = batch_dims - xs, ys = batched_args - if bdx == bdy: - return add_jaxvals_p.bind(xs, ys), bdx + x, y = batched_args + x_aval, y_aval = map(get_aval, batched_args) + assert core.skip_checks or x_aval == y_aval + if bdx == bdy or x_aval == AbstractTuple(()): + return add_jaxvals_p.bind(x, y), bdx else: - sz = (dimsize(bdx, xs) | dimsize(bdy, ys)).pop() + sz = (dimsize(bdx, x) | dimsize(bdy, y)).pop() move_bdim = partial(bdim_at_front, broadcast_size=sz, force_broadcast=True) - xs, ys = map(move_bdim, batched_args, batch_dims) - return add_jaxvals_p.bind(xs, ys), 0 + x, y = map(move_bdim, batched_args, batch_dims) + return add_jaxvals_p.bind(x, y), 0 primitive_batchers[add_jaxvals_p] = add_batched def zeros_like_batched(batched_args, batch_dims): @@ -409,23 +411,22 @@ def instantiate_bdim(size, axis, instantiate, bdim, x): """ def _inst(instantiate, bdim, x): if type(instantiate) is tuple: - assert type(bdim) is tuple # instantiate at same granularity as bdim - return core.pack(map(_inst, instantiate, bdim, x)) + if type(bdim) is tuple: + return core.pack(map(_inst, instantiate, bdim, x)) + elif type(bdim) is int or bdim is None: + bdims = (bdim,) * len(instantiate) + return core.pack(map(_inst, instantiate, bdims, x)) + else: + raise TypeError(type(bdim)) elif type(instantiate) is bool: - if not instantiate: - if bdim is None: - return x - elif type(bdim) is int: - return moveaxis2(bdim, axis, x) - else: - raise TypeError(type(bdim)) + if bdim is None: + return broadcast2(size, axis, x) if instantiate else x + elif type(bdim) is int: + return moveaxis2(bdim, axis, x) + elif type(bdim) is tuple: + return pack(map(partial(_inst, instantiate), bdim, x)) else: - if bdim is None: - return broadcast2(size, axis, x) - elif type(bdim) is int: - return moveaxis2(bdim, axis, x) - else: - raise TypeError(type(bdim)) + raise TypeError(type(bdim)) else: raise TypeError(type(instantiate)) @@ -449,7 +450,7 @@ def broadcast2(size, axis, x): return _broadcast2(size, axis, x, get_aval(x)) def _broadcast2(size, axis, x, aval): - if type(aval) is JaxTuple: + if type(aval) is AbstractTuple: return core.pack(map(partial(_broadcast2, size, axis), x, aval)) else: # see comment at the top of this section @@ -460,13 +461,13 @@ def _broadcast2(size, axis, x, aval): def _promote_aval_rank(n, batched, aval): assert isinstance(aval, core.AbstractValue) - if isinstance(aval, core.AbstractTuple): + if isinstance(aval, AbstractTuple): t = type(batched) if t is tuple: - return core.AbstractTuple(map(partial(_promote_aval_rank, n), batched, aval)) + return AbstractTuple(map(partial(_promote_aval_rank, n), batched, aval)) elif t is bool: if batched: - return core.AbstractTuple(map(partial(_promote_aval_rank, n, batched), aval)) + return AbstractTuple(map(partial(_promote_aval_rank, n, batched), aval)) else: return aval else:
diff --git a/tests/optimizers_test.py b/tests/optimizers_test.py --- a/tests/optimizers_test.py +++ b/tests/optimizers_test.py @@ -24,8 +24,9 @@ import jax.numpy as np import jax.test_util as jtu -from jax import jit, grad +from jax import jit, grad, jacfwd, jacrev from jax import core, tree_util +from jax import lax from jax.experimental import optimizers from jax.interpreters import xla @@ -244,7 +245,48 @@ def testUtilityClipGrads(self): expected = 0.9 * norm self.assertAllClose(ans, expected, check_dtypes=False) + def testIssue758(self): + # code from https://github.com/google/jax/issues/758 + # this is more of a scan + jacfwd/jacrev test, but it lives here to use the + # optimizers.py code + def harmonic_bond(conf, params): + return np.sum(conf * params) + + opt_init, opt_update, get_params = optimizers.sgd(5e-2) + + x0 = onp.array([0.5], dtype=onp.float64) + params = onp.array([0.3], dtype=onp.float64) + + def minimize_structure(test_params): + energy_fn = functools.partial(harmonic_bond, params=test_params) + grad_fn = grad(energy_fn, argnums=(0,)) + opt_state = opt_init(x0) + + def apply_carry(carry, _): + i, x = carry + g = grad_fn(get_params(x))[0] + new_state = opt_update(i, g, x) + new_carry = (i+1, new_state) + return new_carry, _ + + carry_final, _ = lax.scan(apply_carry, (0, opt_state), np.zeros((75, 0))) + trip, opt_final = carry_final + assert trip == 75 + return opt_final + + initial_params = 0.5 + minimize_structure(initial_params) + + def loss(test_params): + opt_final = minimize_structure(test_params) + return 1.0 - get_params(opt_final)[0] + + loss_opt_init, loss_opt_update, loss_get_params = optimizers.sgd(5e-2) + + J1 = jacrev(loss, argnums=(0,))(initial_params) + J2 = jacfwd(loss, argnums=(0,))(initial_params) + self.assertAllClose(J1, J2, check_dtypes=True) if __name__ == '__main__':
more scan jacrev/jacfwd bugs This is a follow up of #711 (different error from #757): ``` python import unittest import numpy as onp import jax.numpy as np import functools import jax from jax.config import config; config.update("jax_enable_x64", True) from jax.experimental import optimizers from jax.test_util import check_grads def harmonic_bond(conf, params): return np.sum(conf * params) opt_init, opt_update, get_params = optimizers.sgd(5e-2) x0 = onp.array([0.5], dtype=onp.float64) params = onp.array([0.3], dtype=onp.float64) def minimize_structure(test_params): energy_fn = functools.partial(harmonic_bond, params=test_params) grad_fn = jax.grad(energy_fn, argnums=(0,)) opt_state = opt_init(x0) # use lax.scan, way faster compilation times. def apply_carry(carry, _): i, x = carry g = grad_fn(get_params(x))[0] new_state = opt_update(i, g, x) new_carry = (i+1, new_state) return new_carry, _ carry_final, _ = jax.lax.scan(apply_carry, (0, opt_state), np.zeros((75, 0))) trip, opt_final = carry_final assert trip == 75 return opt_final initial_params = 0.5 minimize_structure(initial_params) def loss(test_params): opt_final = minimize_structure(test_params) return 1.0 - get_params(opt_final)[0] loss_opt_init, loss_opt_update, loss_get_params = optimizers.sgd(5e-2) jax.grad(loss, argnums=(0,))(initial_params) # okay jax.jacrev(loss, argnums=(0,))(initial_params) # AttributeError: 'JaxTuple' object has no attribute 'broadcast' jax.jacfwd(loss, argnums=(0,))(initial_params) # TypeError: <class 'tuple'> ```
2019-05-22T23:01:15
google/jax
761
google__jax-761
[ "756" ]
a193b3592bba6cc62a7ad4076f37f2f18507a8d6
diff --git a/jax/random.py b/jax/random.py --- a/jax/random.py +++ b/jax/random.py @@ -226,19 +226,21 @@ def _check_shape(name, shape): raise ValueError(msg.format(name, shape)) -def uniform(key, shape, dtype=onp.float32, minval=0., maxval=1.): +def uniform(key, shape=(), dtype=onp.float64, minval=0., maxval=1.): """Sample uniform random values in [minval, maxval) with given shape/dtype. Args: key: a PRNGKey used as the random key. shape: a tuple of nonnegative integers representing the shape. - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). minval: optional, a minimum (inclusive) value for the range (default 0). maxval: optional, a maximum (exclusive) value for the range (default 1). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _uniform(key, shape, dtype, minval, maxval) @partial(jit, static_argnums=(1, 2)) @@ -247,7 +249,6 @@ def _uniform(key, shape, dtype, minval, maxval): if not onp.issubdtype(dtype, onp.floating): raise TypeError("uniform only accepts floating point dtypes.") - dtype = xla_bridge.canonicalize_dtype(dtype) minval = lax.convert_element_type(minval, dtype) maxval = lax.convert_element_type(maxval, dtype) finfo = onp.finfo(dtype) @@ -271,7 +272,7 @@ def _uniform(key, shape, dtype, minval, maxval): lax.reshape(floats * (maxval - minval) + minval, shape)) -def randint(key, shape, minval, maxval, dtype=onp.int32): +def randint(key, shape, minval, maxval, dtype=onp.int64): """Sample uniform random values in [minval, maxval) with given shape/dtype. Args: @@ -281,20 +282,21 @@ def randint(key, shape, minval, maxval, dtype=onp.int32): (inclusive) value for the range. maxval: int or array of ints broadcast-compatible with ``shape``, a maximum (exclusive) value for the range. - dtype: optional, an int dtype for the returned values (default int32). + dtype: optional, an int dtype for the returned values (default int64 if + jax_enable_x64 is true, otherwise int32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _randint(key, shape, minval, maxval, dtype) @partial(jit, static_argnums=(1, 4)) -def _randint(key, shape, minval, maxval, dtype=onp.int32): +def _randint(key, shape, minval, maxval, dtype): _check_shape("randint", shape) if not onp.issubdtype(dtype, onp.integer): raise TypeError("randint only accepts integer dtypes.") - dtype = xla_bridge.canonicalize_dtype(dtype) minval = lax.convert_element_type(minval, dtype) maxval = lax.convert_element_type(maxval, dtype) nbits = onp.iinfo(dtype).bits @@ -371,17 +373,19 @@ def _shuffle(key, x, axis): return x -def normal(key, shape, dtype=onp.float32): +def normal(key, shape=(), dtype=onp.float64): """Sample standard normal random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. shape: a tuple of nonnegative integers representing the shape. - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _normal(key, shape, dtype) @partial(jit, static_argnums=(1, 2)) @@ -398,28 +402,31 @@ def bernoulli(key, p=onp.float32(0.5), shape=()): Args: key: a PRNGKey used as the random key. - p: optional, an array-like broadcastable to `shape` for the mean of the - random variables (default 0.5). + p: optional, an array-like of floating dtype broadcastable to `shape` for + the mean of the random variables (default 0.5). shape: optional, a tuple of nonnegative integers representing the shape (default scalar). Returns: A random array with the specified shape and boolean dtype. """ + dtype = xla_bridge.canonicalize_dtype(lax.dtype(p)) + if not onp.issubdtype(dtype, onp.floating): + msg = "bernoulli probability `p` must have a floating dtype, got {}." + raise TypeError(msg.format(dtype)) + p = lax.convert_element_type(p, dtype) return _bernoulli(key, p, shape) @partial(jit, static_argnums=(2,)) def _bernoulli(key, p, shape): _check_shape("bernoulli", shape) shape = shape or onp.shape(p) - if not onp.issubdtype(onp.float32, lax.dtype(p)): - p = lax.convert_element_type(p, onp.float32) if onp.shape(p) != shape: p = np.broadcast_to(p, shape) return lax.lt(uniform(key, shape, lax.dtype(p)), p) -def beta(key, a, b, shape=(), dtype=onp.float32): +def beta(key, a, b, shape=(), dtype=onp.float64): """Sample Bernoulli random values with given shape and mean. Args: @@ -430,11 +437,13 @@ def beta(key, a, b, shape=(), dtype=onp.float32): beta of the random variables. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _beta(key, a, b, shape, dtype) @partial(jit, static_argnums=(3, 4)) @@ -449,18 +458,20 @@ def _beta(key, a, b, shape, dtype): return gamma_a / (gamma_a + gamma_b) -def cauchy(key, shape=(), dtype=onp.float32): +def cauchy(key, shape=(), dtype=onp.float64): """Sample Cauchy random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _cauchy(key, shape, dtype) @partial(jit, static_argnums=(1, 2)) @@ -471,7 +482,7 @@ def _cauchy(key, shape, dtype): return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5)))) -def dirichlet(key, alpha, shape=(), dtype=onp.float32): +def dirichlet(key, alpha, shape=(), dtype=onp.float64): """Sample Cauchy random values with given shape and float dtype. Args: @@ -480,11 +491,13 @@ def dirichlet(key, alpha, shape=(), dtype=onp.float32): used as the concentration parameter of the random variables. shape: optional, a tuple of nonnegative integers representing the batch shape (defaults to `alpha.shape[:-1]`). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _dirichlet(key, alpha, shape, dtype) @partial(jit, static_argnums=(2, 3)) @@ -496,18 +509,20 @@ def _dirichlet(key, alpha, shape, dtype): return gamma_samples / np.sum(gamma_samples, axis=-1, keepdims=True) -def exponential(key, shape=(), dtype=onp.float32): +def exponential(key, shape=(), dtype=onp.float64): """Sample Exponential random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _exponential(key, shape, dtype) @partial(jit, static_argnums=(1, 2)) @@ -568,7 +583,7 @@ def _body_fn(kXVU): return lax.select(lax.eq(z, zero), onp.finfo(z.dtype).tiny, z) -def gamma(key, a, shape=(), dtype=onp.float32): +def gamma(key, a, shape=(), dtype=onp.float64): """Sample Gamma random values with given shape and float dtype. Args: @@ -577,15 +592,17 @@ def gamma(key, a, shape=(), dtype=onp.float32): of the random variables. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _gamma(key, a, shape, dtype) @partial(jit, static_argnums=(2, 3)) -def _gamma(key, a, shape=(), dtype=onp.float32): +def _gamma(key, a, shape, dtype): _check_shape("gamma", shape) a = lax.convert_element_type(a, dtype) shape = shape or onp.shape(a) @@ -597,18 +614,20 @@ def _gamma(key, a, shape=(), dtype=onp.float32): return np.reshape(samples, shape) -def gumbel(key, shape=(), dtype=onp.float32): +def gumbel(key, shape=(), dtype=onp.float64): """Sample Gumbel random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _gumbel(key, shape, dtype) @partial(jit, static_argnums=(1, 2)) @@ -617,18 +636,20 @@ def _gumbel(key, shape, dtype): return -np.log(-np.log(uniform(key, shape, dtype))) -def laplace(key, shape=(), dtype=onp.float32): +def laplace(key, shape=(), dtype=onp.float64): """Sample Laplace random values with given shape and float dtype. Args: key: a PRNGKey used as the random key. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _laplace(key, shape, dtype) @partial(jit, static_argnums=(1, 2)) @@ -638,7 +659,7 @@ def _laplace(key, shape, dtype): return lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u)))) -def pareto(key, b, shape=(), dtype=onp.float32): +def pareto(key, b, shape=(), dtype=onp.float64): """Sample Pareto random values with given shape and float dtype. Args: @@ -647,11 +668,13 @@ def pareto(key, b, shape=(), dtype=onp.float32): of the random variables. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _pareto(key, b, shape, dtype) @partial(jit, static_argnums=(2, 3)) @@ -665,7 +688,7 @@ def _pareto(key, b, shape, dtype): return lax.exp(lax.div(e, b)) -def t(key, df, shape=(), dtype=onp.float32): +def t(key, df, shape=(), dtype=onp.float64): """Sample Student's t random values with given shape and float dtype. Args: @@ -674,11 +697,13 @@ def t(key, df, shape=(), dtype=onp.float32): of the random variables. shape: optional, a tuple of nonnegative integers representing the shape (default scalar). - dtype: optional, a float dtype for the returned values (default float32). + dtype: optional, a float dtype for the returned values (default float64 if + jax_enable_x64 is true, otherwise float32). Returns: A random array with the specified shape and dtype. """ + dtype = xla_bridge.canonicalize_dtype(dtype) return _t(key, df, shape, dtype) @partial(jit, static_argnums=(2, 3))
diff --git a/tests/random_test.py b/tests/random_test.py --- a/tests/random_test.py +++ b/tests/random_test.py @@ -358,6 +358,14 @@ def feature_map(n, d, sigma=1.0, seed=123): self.assertRaisesRegex(ValueError, re.compile(r'.*requires a concrete.*'), lambda: feature_map(5, 3)) + def testIssue756(self): + key = random.PRNGKey(0) + w = random.normal(key, ()) + if FLAGS.jax_enable_x64: + self.assertEqual(onp.result_type(w), onp.float64) + else: + self.assertEqual(onp.result_type(w), onp.float32) + if __name__ == "__main__": absltest.main()
Make samplers in jax.random use float64 as default when jax_enable_x64=True Currently, the behavior of samplers in `jax.random` is somewhat inconsistent as compared to other array constructors when `jax_enable_x64=True` because it still uses `np.float32` as the default, unless the dtype argument is explicitly also set to `np.float64`. The following code demonstrates the issue: ```python In [1]: from jax.config import config In [2]: import jax.numpy as np In [3]: from jax import random In [4]: config.update('jax_enable_x64', True) In [5]: np.array([1.]).dtype /Users/npradhan/miniconda3/envs/numpyro/lib/python3.6/site-packages/jax/lib/xla_bridge.py:130: UserWarning: No GPU/TPU found, falling back to CPU. warnings.warn('No GPU/TPU found, falling back to CPU.') Out[5]: dtype('float64') In [6]: np.ones(1).dtype Out[6]: dtype('float64') In [7]: random.normal(random.PRNGKey(0), (1,)).dtype Out[7]: dtype('float32') # Expected: float64 (default as in other constructors) ``` This will be really useful to just change one line in the code and run all the computations in float64, instead of having to temporarily insert `dtype`s in calls to all the samplers. This should be a small change, and we are happy to put out a PR with the fix, but I was not sure if there are other design considerations involved. cc. @fehiepsi Related: #470.
2019-05-22T23:23:35
google/jax
767
google__jax-767
[ "764" ]
41c2e9d4475ca57156486b64066cf0e5e28cc0ff
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1437,7 +1437,7 @@ def _brcast_to(x, shape): ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x)))) tanh_p = standard_unop(_float | _complex, 'tanh') -ad.defjvp(tanh_p, lambda g, x: div(g, pow(cosh(x), _two(x)))) +ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans)))) sin_p = standard_unop(_float | _complex, 'sin') ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1544,6 +1544,14 @@ def testIssue728(self): def testIssue746(self): lnp.arange(12).reshape(3, 4) # doesn't crash + def testIssue764(self): + x = lnp.linspace(190, 200, 4) + f = api.grad(lambda x: lnp.sum(lnp.tanh(x))) + # Expected values computed with autograd in float64 precision. + expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171, + 7.66067839e-174], onp.float64) + self.assertAllClose(f(x), expected, check_dtypes=False) + if __name__ == "__main__": absltest.main()
Gradient of `tanh` sometimes causes invalid values Hi, I'm using jax 0.1.35 on python 3.6.8. When evaluating gradients, I sometimes get `nan`s where I don't think `nan`s should be. This happens for some input values and not for others: ``` import jax import jax.numpy as np from jax import grad print(jax.version.__version__) x = np.linspace(190, 200, 4) f = grad(lambda x: np.sum(np.tanh(x))) print(x) f(x) ``` ``` 0.1.35 [190. 193.33333333 196.66666667 200. ] DeviceArray([nan, nan, nan, nan], dtype=float32) ``` When tracing the `nan`s with `jax_debug_nans`, I get errors about divisions: ``` --------------------------------------------------------------------------- FloatingPointError Traceback (most recent call last) <ipython-input-3-c4aba5ddce5a> in <module> 6 f = grad(lambda x: np.sum(np.tanh(x))) 7 print(x) ----> 8 f(x) /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/api.py in grad_f(*args, **kwargs) 233 def grad_f(*args, **kwargs): 234 if not has_aux: --> 235 _, g = value_and_grad_f(*args, **kwargs) 236 return g 237 else: /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/api.py in value_and_grad_f(*args, **kwargs) 287 "differentiation, pass holomorphic=True.") 288 raise TypeError(msg.format(dtype)) --> 289 g = vjp_py(onp.ones((), dtype=dtype)) 290 g = g[0] if isinstance(argnums, int) else g 291 if not has_aux: /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/api_util.py in apply_jaxtree_fun(fun, io_tree, *py_args) 60 raise TypeError("Expected {}, got {}".format(expected, in_tree)) 61 ---> 62 ans = fun(*args) 63 return build_tree(out_tree, ans) 64 /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/api.py in out_vjp_packed(cotangent_in) 820 ct_out_tree = PyTreeDef(node_types[tuple], None, in_trees) 821 def out_vjp_packed(cotangent_in): --> 822 return out_vjp(cotangent_in) 823 vjp_py = partial(apply_jaxtree_fun, out_vjp_packed, (ct_in_trees, ct_out_tree)) 824 if not has_aux: /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/interpreters/ad.py in vjp_(ct) 110 dummy_primal_and_ct = pack((core.unit, ct)) 111 dummy_args = (None,) * len(jaxpr.invars) --> 112 _, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primal_and_ct) 113 return instantiate_zeros(pack(primals), arg_cts[1]) 114 /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/interpreters/ad.py in backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in) 184 map(write_cotangent, bound_vars, ct_free_vars_out) 185 else: --> 186 cts_out = get_primitive_transpose(eqn.primitive)(ct_in, *invals, **eqn.params) 187 188 if cts_out is zero: /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/lax/lax.py in _div_transpose_rule(cotangent, x, y) 1565 def _div_transpose_rule(cotangent, x, y): 1566 assert x is None and y is not None -> 1567 res = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y) 1568 return res, None 1569 div_p = standard_binop([_num, _num], 'div') /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/lax/lax.py in div(x, y) 236 def div(x, y): 237 r"""Elementwise division: :math:`x \over y`.""" --> 238 return div_p.bind(x, y) 239 240 def rem(x, y): /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/core.py in bind(self, *args, **kwargs) 115 top_trace = find_top_trace(args) 116 if top_trace is None: --> 117 return self.impl(*args, **kwargs) 118 119 tracers = map(top_trace.full_raise, args) /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/interpreters/xla.py in apply_primitive(prim, *args, **kwargs) 50 abstract_args = map(abstractify, args) 51 compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs) ---> 52 return compiled_fun(*args) 53 54 @memoize /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/interpreters/xla.py in execute_compiled_primitive(name, compiled, result_handler, *args) 83 input_bufs = [device_put(x) for x in args] 84 out_buf = compiled.Execute(input_bufs) ---> 85 check_nans(name, out_buf) 86 return result_handler(out_buf) 87 /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/interpreters/xla.py in check_nans(name, buf) 87 88 def check_nans(name, buf): ---> 89 FLAGS.jax_debug_nans and _check_nans(name, buf.shape(), buf) 90 91 def _check_nans(name, xla_shape, buf): /anaconda3/envs/py36_tf/lib/python3.6/site-packages/jax/interpreters/xla.py in _check_nans(name, xla_shape, buf) 97 if onp.any(onp.isnan(pyval)): 98 msg = "invalid value (nan) encountered in {}" ---> 99 raise FloatingPointError(msg.format(name)) 100 101 def device_put(x, device_num=0): FloatingPointError: invalid value (nan) encountered in div ```
I think this is a precision problem. If you enable 64-bit mode in JAX: ``` import jax.config jax.config.update('jax_enable_x64', True) ``` and then run the code above, we get reasonable answers: ``` DeviceArray([3.71669453e-165, 4.72999108e-168, 6.01954653e-171, 7.66067839e-174]) ``` I believe the `cosh` in the gradient is yielding `inf` in 32-bit mode, and this leads to the nan. Is there a more stable JVP formula we could use? Here's [the current one](https://github.com/google/jax/blob/d63b8a4714c274600032705b3dc0f482f3a74967/jax/lax/lax.py#L1440): ```python ad.defjvp(tanh_p, lambda g, x: div(g, pow(cosh(x), _two(x)))) ``` In fact, why aren't we getting a divide-by-inf = zero here? We are, I misspoke previously. TF uses `(1-ans**2)*g` for its VJP.
2019-05-24T15:09:10
google/jax
778
google__jax-778
[ "776" ]
90eda42e6efe6edab5bb8db330a88f173ef7c60f
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -2804,6 +2804,9 @@ def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, update_jaxpr, update_consts, dimension_numbers, updates_shape): assert scatter_indices is not None + if t is ad_util.zero: + return [ad_util.zero, None, ad_util.zero] + operand_t = update_t = None if operand is None: operand_t = t
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -30,6 +30,7 @@ import numpy as onp +import jax.ops from jax import api from jax import lax from jax import numpy as lnp @@ -1552,6 +1553,17 @@ def testIssue764(self): 7.66067839e-174], onp.float64) self.assertAllClose(f(x), expected, check_dtypes=False) + def testIssue776(self): + """Tests that the scatter-add transpose rule instantiates symbolic zeros.""" + def f(u): + y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u) + # The transpose rule for lax.tie_in returns a symbolic zero for its first + # argument. + return lax.tie_in(y, 7.) + + self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)), + check_dtypes=True) + if __name__ == "__main__": absltest.main()
Scatter transpose rule doesn’t handle symbolic zeros Unfortunately, I don't have a terribly concise repro for this error. From playing around with it, it seems as though the error probably involves index_update, but it's hard to be sure. However, given the function ```python def nvt_nose_hoover(energy_fn, dt, T, chain_length=5, tau=0.01): force = grad(lambda R, *args, **kwargs: -energy_fn(R, *args, **kwargs)) dt_2 = dt / 2.0 dt_4 = dt_2 / 2.0 dt_8 = dt_4 / 2.0 dt, dt_2, dt_4, dt_8, tau = static_cast(dt, dt_2, dt_4, dt_8, tau) def init_fun(key, R): V = random.normal(key, R.shape, dtype=R.dtype) KE = 0.5 * np.mean(V ** 2) # Nose-Hoover parameters. xi = np.zeros(chain_length, R.dtype) v_xi = np.zeros(chain_length, R.dtype) DOF = R.shape[0] * R.shape[1] Q = tau ** f32(2) * np.ones(chain_length, dtype=R.dtype) Q = ops.index_update(Q, 0, Q[0] * DOF) return R, V, KE, xi, v_xi, Q def step_chain(KE, V, xi, v_xi, Q, DOF, T): """Applies a single update to the chain parameters and rescales velocity.""" M = chain_length - 1 G = (Q[M - 1] * v_xi[M - 1] ** f32(2) - T) / Q[M] v_xi = ops.index_add(v_xi, M, dt_4 * G) G = (f32(2.0) * KE - DOF * T) / Q[0] scale = np.exp(-dt_8 * v_xi[1]) v_xi = ops.index_update(v_xi, 0, scale * (scale * v_xi[0] + dt_4 * G)) return KE, V, xi, v_xi def apply_fun(state, t=0.0, **kwargs): R, V, KE, xi, v_xi, Q = state DOF = R.shape[0] * R.shape[1] KE, V, xi, v_xi = step_chain(KE, V, xi, v_xi, Q, DOF, T) R = R + dt_2 * V F = force(R, t=t, **kwargs) V = V + dt * F KE = 0.5 * np.mean(V ** 2) R = R + dt_2 * V KE, V, xi, v_xi = step_chain(KE, V, xi, v_xi, Q, DOF, T) return R, V, KE, xi, v_xi, Q return init_fun, apply_fun ``` The following code proceeds without error, ```python def do_sim(scale): key = random.PRNGKey(0) R_key, R0_key, V_key = random.split(key, 3) R = random.normal( R_key, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R0 = random.normal( R0_key, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) E = functools.partial( lambda R, R0, **kwargs: scale * np.sum((R - R0) ** 2), R0=R0) init_fn, apply_fn = nvt_nose_hoover(E, 1e-3, 0.1) state = init_fn(V_key, R) for _ in range(2): state = apply_fn(state) return E(state[0]) assert grad(do_sim)(1.0) > 0.0 ``` However, when the apply_fn is jitted, as in ```python def do_sim(scale): key = random.PRNGKey(0) R_key, R0_key, V_key = random.split(key, 3) R = random.normal( R_key, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) R0 = random.normal( R0_key, (PARTICLE_COUNT, spatial_dimension), dtype=dtype) _, shift = space.free() E = functools.partial( lambda R, R0, **kwargs: scale * np.sum((R - R0) ** 2), R0=R0) init_fn, apply_fn = simulate.nvt_nose_hoover(E, 1e-3, 0.1) apply_fn = jit(apply_fn) state = init_fn(V_key, R) for _ in range(2): state = apply_fn(state) return E(state[0]) assert grad(do_sim)(1.0) > 0.0 ``` The code throws an error with the following stack trace, ``` Traceback (most recent call last): File "/usr/local/google/home/schsam/.local/lib/python2.7/site-packages/absl/third_party/unittest3_backport/case.py", line 37, in testPartExecutor yield File "/usr/local/google/home/schsam/.local/lib/python2.7/site-packages/absl/third_party/unittest3_backport/case.py", line 162, in run testMethod() File "/usr/local/google/home/schsam/.local/lib/python2.7/site-packages/absl/testing/parameterized.py", line 262, in bound_param_test test_method(self, **testcase_params) File "simulate_test.py", line 164, in test_grad_through_nvt assert grad(do_sim)(1.0) > 0.0 File "/usr/local/google/home/schsam/Source/jax/jax/api.py", line 235, in grad_f _, g = value_and_grad_f(*args, **kwargs) File "/usr/local/google/home/schsam/Source/jax/jax/api.py", line 289, in value_and_grad_f g = vjp_py(onp.ones((), dtype=dtype)) File "/usr/local/google/home/schsam/Source/jax/jax/api_util.py", line 62, in apply_jaxtree_fun ans = fun(*args) File "/usr/local/google/home/schsam/Source/jax/jax/api.py", line 822, in out_vjp_packed return out_vjp(cotangent_in) File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 112, in vjp_ _, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primal_and_ct) File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 180, in backward_pass eqn.params, subjaxprs, sub_consts, sub_freevar_vals, invals, ct_in) File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 536, in call_transpose ans = primitive.bind(fun, all_args, **params) File "/usr/local/google/home/schsam/Source/jax/jax/core.py", line 636, in call_bind ans = primitive.impl(f, *args, **params) File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/xla.py", line 591, in xla_call_impl compiled_fun = xla_callable(fun, device_values, *map(abstractify, args)) File "/usr/local/google/home/schsam/Source/jax/jax/linear_util.py", line 208, in memoized_fun ans = call(f, *args) File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/xla.py", line 604, in xla_callable jaxpr, (pval, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals) File "/usr/local/google/home/schsam/Source/jax/jax/linear_util.py", line 147, in call_wrapped ans = self.f(*args, **dict(self.params, **kwargs)) File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 186, in backward_pass cts_out = get_primitive_transpose(eqn.primitive)(ct_in, *invals, **eqn.params) File "/usr/local/google/home/schsam/Source/jax/jax/lax/lax.py", line 2818, in _scatter_add_transpose_rule for i in xrange(len(t.shape)): AttributeError: 'Zero' object has no attribute 'shape' ```
Looks like just a bug in the scatter transpose rule; it doesn’t handle symbolic zeros like transpose rules need to. Totally for my own curiosity, is there a reason why this only affects the version where apply_fn was jitted?
2019-05-28T14:31:47
google/jax
786
google__jax-786
[ "779" ]
117749b7548cad68a3f4ba8bb75326372aa9ac40
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -2860,8 +2860,10 @@ def _scatter_batching_rule( operand_bdim = 0 if scatter_indices_bdim is not None and updates_bdim is None: - raise NotImplementedError # TODO(mattjj,phawkins) - elif scatter_indices_bdim is None and updates_bdim is not None: + updates = broadcast(updates, (size,)) + updates_bdim = 0 + + if scatter_indices_bdim is None and updates_bdim is not None: updates = batching.move_dim_to_front(updates, updates_bdim) inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims)) update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -31,6 +31,7 @@ from jax.core import unit from jax.interpreters import partial_eval as pe from jax.util import partial, curry +import jax.ops from jax.config import config config.parse_flags_with_absl() @@ -911,6 +912,11 @@ def testEmptyTuples(self): self.assertAllClose(result, onp.array([1, 2]), check_dtypes=False) self.assertEqual((), empty_tuple) + def testIndexAddBatchedIndexesOnly(self): + f = lambda x, idx, y: jax.ops.index_add(x, jax.ops.index[idx], y) + result = vmap(f, (None, 0, None))(onp.zeros((10,)), onp.arange(10,), 1.) + self.assertAllClose(result, onp.eye(10), check_dtypes=False) + if __name__ == '__main__': absltest.main()
implement scatter batching rule when indices are batched and updates are not Implement _scatter_batching_rule when `scatter_indices_bdim is not None and updates_bdim is None`. See https://colab.corp.google.com/drive/1VBna662Pcz8zOL3SlzSJ6eETrN2x90FF#scrollTo=vLHx5BcHPykw (internal only) for the source code + context. When input_embeddings and unc_target do not have a batch dimension, `total_loss, grads = inner_loss_value_and_grad(net_params, input_embeddings, unc_target)` works fine. Adding a batch dimension to input_embeddings and unc_target and changing the line to `total_loss, grads = vmap(lambda args: inner_loss_value_and_grad(net_params, args[0], args[1]))([input_embeddings, unc_target])` gives this NotImplementedError error: """ ~/virtualenvs/_3/lib/python3.7/site-packages/jax/lax/lax.py in _scatter_batching_rule(***failed resolving arguments***) 2842 2843 if scatter_indices_bdim is not None and updates_bdim is None: -> 2844 raise NotImplementedError # TODO(mattjj,phawkins) 2845 elif scatter_indices_bdim is None and updates_bdim is not None: 2846 updates = batching.move_dim_to_front(updates, updates_bdim) NotImplementedError: """ Full stacktrace https://paste.googleplex.com/4748794925154304.
2019-05-29T21:14:50
google/jax
792
google__jax-792
[ "725" ]
6a59a567c30f9f5ecfa66f214e9eb98ce1c96a5b
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -469,6 +469,8 @@ def __init__(self, result_shape, device_buffer): # TODO make device_buffer a property, make the _npy_value writeable, invalidate @property def _value(self): + if self.device_buffer is None: + raise ValueError("Cannot fetch the value of a deleted DeviceArray.") if self._npy_value is None: self._npy_value = self.device_buffer.to_py() self._npy_value.flags.writeable = False @@ -478,6 +480,21 @@ def copy(self): """Returns an ndarray (backed by host memory, not device memory).""" return onp.asarray(self) + def delete(self): + """Deletes the device array and any cached copy on the host. + + It is an error to access the contents of a `DeviceArray` after it has + been deleted. + + Use of this method is optional; device buffers will be reclaimed + automatically by Python when a DeviceArray object is garbage collected. + However, it is sometimes useful to have more explicit control over the + time of deletion. + """ + self.device_buffer.delete() + self.device_buffer = None + self._npy_value = None + def __repr__(self): return onp.array_repr(self)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -571,6 +571,12 @@ def test_devicearray_repr(self): self.assertIsInstance(x, DeviceArray) repr(x) # doesn't crash + def test_devicearray_delete(self): + x = device_put(1.) + x.delete() + jtu.check_raises_regexp(lambda: repr(x), ValueError, + "Cannot fetch the value of a deleted DeviceArray.") + def test_namedtuple_transparency(self): # See https://github.com/google/jax/issues/446 Point = collections.namedtuple("Point", ["x", "y"]) @@ -587,6 +593,5 @@ def f(pt): f_jit = api.jit(f) self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False) - if __name__ == '__main__': absltest.main()
Cyclic references could lead to memory leaks Cyclic references (for example, in graph data structures) don't get garbage collected by python immediately. This can cause tensors to not be deallocated when all references are removed, causing memory leaks on device and possible OOM crashes.
Can you give a MWE of what you mean by in-graph data structures? You might be able to fix this with Python weakrefs if the cycles are created in data structures your code manages. Yeah that's what we ended up doing. But weakref is a fairly low level API to be using. Can you clarify what you mean? Is your concern that (a) references don't get collected by Python *immediately* or (b) that references don't get collected at all? I expect you mean the former, in which case there isn't a whole lot we can do about it at our level — it's something you'll have to fix in your code. The only thing I can possibly suggest is that we could add a Delete() method to our DeviceArrays to give you more explicit control over destruction. I'd be surprised if the latter is happening, but if you have a repro I'd be happy to look into it. Put another way: is this an issue with JAX specifically, or would e.g. regular NumPy have this problem? Yes adding a `Delete` method would be great. The issue is amplified with JAX when compared to numpy since python calls garbage collection when it starts to run out of runtime memory, but not when JAX runs out of device memory. Note this cyclic issue is only true outside of a `jit`.
2019-05-30T13:55:25
google/jax
800
google__jax-800
[ "798" ]
93e114337334fd0e42ac5bf0d5caecdab5344383
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -1062,3 +1062,59 @@ def graphviz_maker(*args, **kwargs): graphviz_maker.__name__ = "make_graphviz({})".format(graphviz_maker.__name__) return graphviz_maker + + +def eval_shape(fun, *args, **kwargs): + """Compute the shape of ``fun(*args, **kwargs)`` without incurring any FLOPs. + + This utility function is useful for performing shape inference. Its + input/output behavior is defined by: + + def eval_shape(fun, *args, **kwargs): + out = fun(*args, **kwargs) + return jax.tree_util.tree_map(np.shape, out) + + But instead of applying ``fun`` directly, which might be expensive, it uses + JAX's abstract interpretation machinery to evaluate the shapes without doing + any FLOPs. + + Using ``eval_shape`` can also catch shape errors, and will raise same shape + errors as evaluating ``fun(*args, **kwargs)``. + + Args: + *args: a positional argument tuple of arrays, scalars, or (nested) standard + Python containers (tuples, lists, dicts, namedtuples, i.e. pytrees) of + those types. Since only the ``shape`` and ``dtype`` attributes are + accessed, only values that duck-type arrays are required, rather than real + ndarrays. The duck-typed objects cannot be namedtuples because those are + treated as standard Python containers. See the example below. + **kwargs: a keyword argument dict of arrays, scalars, or (nested) standard + Python containers (pytrees) of those types. As in ``args``, array values + need only be duck-typed to have ``shape`` and ``dtype`` attributes. + + For example: + + >>> f = lambda A, x: np.tanh(np.dot(A, x)) + >>> class MyArgArray(object): + ... def __init__(self, shape, dtype): + ... self.shape = shape + ... self.dtype = dtype + ... + >>> A = MyArgArray((2000, 3000), np.float32) + >>> x = MyArgArray((3000, 1000), np.float32) + >>> out_shape = jax.eval_shape(f, A, x) # no FLOPs performed + >>> print(out_shape) + (2000, 1000) + """ + def abstractify(x): + if type(x) is core.JaxTuple: + return core.AbstractTuple(map(abstractify, x)) + else: + return ShapedArray(onp.shape(x), onp.result_type(x)) + + jax_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) + jax_kwargs, kwargs_tree = pytree_to_jaxtupletree(kwargs) + f, out_tree = pytree_fun_to_jaxtupletree_fun2(lu.wrap_init(fun), kwargs_tree, in_trees) + abstract_args = map(abstractify, (jax_kwargs,) + tuple(jax_args)) + out = pe.abstract_eval_fun(f.call_wrapped, *abstract_args) + return tree_map(onp.shape, build_tree(out_tree(), out))
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -593,5 +593,82 @@ def f(pt): f_jit = api.jit(f) self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False) + def test_eval_shape(self): + def fun(x, y): + return np.tanh(np.dot(x, y) + 3.) + + x = np.ones((2, 3)) + y = np.ones((3, 4)) + out_shape = api.eval_shape(fun, x, y) + + self.assertEqual(out_shape, (2, 4)) + + def test_eval_shape_constants(self): + def fun(): + x = np.ones((2, 3)) + y = np.ones((3, 4)) + return np.tanh(np.dot(x, y) + 3.) + + out_shape = api.eval_shape(fun) + + self.assertEqual(out_shape, (2, 4)) + + def test_eval_shape_tuple_unpacking(self): + def fun(x, y): + a, b = x + return a + b + y + + x = (np.ones(2), np.ones(2)) + y = 3. + out_shape = api.eval_shape(fun, x, y) + + self.assertEqual(out_shape, (2,)) + + def test_eval_shape_tuple_itemgetting(self): + def fun(x, y): + return x[0] + x[1] + y + + x = (np.ones(2), np.ones(2)) + y = 3. + out_shape = api.eval_shape(fun, x, y) + + self.assertEqual(out_shape, (2,)) + + def test_eval_shape_output_dict(self): + def fun(x, y): + return {'hi': x[0] + x[1] + y} + + x = (np.ones(2), np.ones(2)) + y = 3. + out_shape = api.eval_shape(fun, x, y) + + self.assertEqual(out_shape, {'hi': (2,)}) + + def test_eval_shape_shape_error(self): + def fun(x, y): + return np.tanh(np.dot(x, y) + 3.) + + x = np.ones((3, 3)) + y = np.ones((4, 4)) + + self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y)) + + def test_eval_shape_duck_typing(self): + def fun(A, b, x): + return np.dot(A, x) + b + + class MyArgArray(object): + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + A = MyArgArray((3, 4), np.float32) + b = MyArgArray((5,), np.float32) + x = MyArgArray((4, 5), np.float32) + out_shape = api.eval_shape(fun, A, b, x) + + self.assertEqual(out_shape, (3, 5)) + + if __name__ == '__main__': absltest.main()
add a shape inference function to api.py A couple users (including @lukaszkaiser) have requested a way to do shape inference. Maybe we can provide something like `eval_shape` (taking example arguments, as here, or maybe shape tuples): ```python from __future__ import print_function # Look at all these internal imports! Not for the faint of heart. from jax.abstract_arrays import ShapedArray, raise_to_shaped from jax.api_util import pytree_to_jaxtupletree, pytree_fun_to_jaxtupletree_fun from jax.core import get_aval from jax.interpreters import partial_eval as pe from jax.util import unzip2 import jax.linear_util as lu from jax.tree_util import build_tree def abstractify(x): return raise_to_shaped(get_aval(x)) def eval_shape(f, *args): jaxtuple_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args)) f, out_tree = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(f), in_trees) out = pe.abstract_eval_fun(f.call_wrapped, *map(abstractify, jaxtuple_args)) return build_tree(out_tree(), out) ### import jax.numpy as np def fun(x, y): return np.tanh(np.dot(x, y) + 3.) x = np.ones((2, 3)) y = np.ones((3, 4)) out = eval_shape(fun, x, y) print(out) def fun1(): x = np.ones((2, 3)) y = np.ones((3, 4)) return np.tanh(np.dot(x, y) + 3.) out = eval_shape(fun1) print(out) def fun2(x, y): a, b = x return a + b + y x = (np.ones(2), np.ones(2)) y = 3. out = eval_shape(fun2, x, y) print(out) def fun3(x, y): return x[0] + x[1] + y x = (np.ones(2), np.ones(2)) y = 3. out = eval_shape(fun3, x, y) print(out) def fun4(x, y): return {'hi': x[0] + x[1] + y} x = (np.ones(2), np.ones(2)) y = 3. out = eval_shape(fun4, x, y) print(out) ```
Yes, having a function like this would be amazing! But, more precisely, we'd like to have a function: eval_shape(fun, *args_shapes) return shape of f(ran on rags with shapes as in args_shapes) 2 things that the above code still doesn't do * using shapes and types instead of values (should be simple) * removing the need for type of input (not super important, but would be very clean; is that technically possible though?) One more bug to check would be with indexing: def fun3(x, y): a = x[0][0] b = x[0][1] c = x[1] return a + b + c + y But in general it's a great idea! Thanks Matt!! Thanks for catching these bugs, Lukasz. Just updated the code in the OP to handle the `fun3` case. I think it might be getting close now...
2019-06-01T16:35:43
google/jax
807
google__jax-807
[ "806" ]
9dfe27880517d5583048e7a3384b504681968fb4
diff --git a/jax/tree_util.py b/jax/tree_util.py --- a/jax/tree_util.py +++ b/jax/tree_util.py @@ -247,7 +247,7 @@ def _get_node_type(maybe_tree): return node_types.get(t) or _namedtuple_node(t) def _namedtuple_node(t): - if t.__bases__ == (tuple,) and hasattr(t, '_fields'): + if issubclass(t, tuple) and hasattr(t, '_fields'): return NamedtupleNode NamedtupleNode = NodeType('namedtuple',
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -593,6 +593,23 @@ def f(pt): f_jit = api.jit(f) self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False) + def test_namedtuple_subclass_transparency(self): + # See https://github.com/google/jax/issues/806 + Point = collections.namedtuple("Point", ["x", "y"]) + + class ZeroPoint(Point): + def is_zero(self): + return (self.x == 0) and (self.y == 0) + + pt = ZeroPoint(0., 0.) + + def f(pt): + return 0. if pt.is_zero() else np.sqrt(pt.x ** 2 + pt.y ** 2) + + f(pt) # doesn't crash + g = api.grad(f)(pt) + self.assertIsInstance(pt, ZeroPoint) + def test_eval_shape(self): def fun(x, y): return np.tanh(np.dot(x, y) + 3.)
Class inheriting nametuple is not treated as a valid JAX Type ``` import collections from jax.lax import scan Point = collections.namedtuple( 'Point', ['x', 'y'] ) class ZeroPoint(Point): def is_zero(self): return (x==0) and (y==0) def loop(in_, idx): return in_ a = ZeroPoint(1., 0.) b = ZeroPoint(1., 1.) scan(loop, [a,b], np.arange(2)) ``` The error was `TypeError: <class '__main__.ZeroPoint'> is not a valid Jax type`
A related note would be, a class which inherits pytree class may be considered as a valid pytree type as well? So far we haven't followed the convention that a subclass of a pytree is a pytree. I think there are clear ways to implement that, but it's a bit more magic that can lead to less predictable behavior (plus potential overheads, though those would need to be profiled before we worry about them). So it's less clear whether we should add that complexity. When there's an ambiguous decision like whether pytrees should follow subclassing, we usually take the conservative route and keep things the way they are (more explicit, less magic) until a concrete use case comes along, or we get enough evidence that we're really violating user expectations. #506 explored one way to implement a pytree mechanism that follows subclassing, though I think it added significant complexity. If we decide to go that route, there might be simpler alternative mechanisms (like using `isinstance` instead of manually traversing the MRO). We might want to separate out the the narrower case of namedtuple subclasses, though. Namedtuples were requested several times to act like pytrees by default (without having to explicitly register them), and users expected them to work like other Python builtins. But [our special handling for namedtuples](https://github.com/google/jax/blob/9dfe27880517d5583048e7a3384b504681968fb4/jax/tree_util.py#L250) only works for direct subclasses of `tuple` (like namedtuple classes are, but not their children). I think we could make namedtuple subclasses work just by writing `isinstance` there. That would fix the original issue, but not address the broader question in the second comment. Is it worth handling all subclassing? Opinions welcome! By the way, for posterity (since I know @zhongwen already knows this), as a workaround in the meantime you can always register your class as a pytree using `jax.tree_util.register_pytree_node`, and for this special case of a namedtuple you can use this utility by @sschoenholz: ```python def register_pytree_namedtuple(cls): register_pytree_node( cls, lambda xs: (tuple(xs), None), lambda _, xs: cls(*xs)) ``` In particular, you'd just have to import that function and write after your class definition: ```python register_pytree_namedtuple(ZeroPoint) ``` As a tradeoff that requires more explicitness from the user but keeps the core system simpler and faster, that seems pretty good! I forgot to say, thanks for opening this with such a clear and concise explanation!
2019-06-03T14:23:08
google/jax
822
google__jax-822
[ "814" ]
8309836dd0d07963c2d4fb9d2f7d32974f575043
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -457,6 +457,18 @@ def dot(lhs, rhs): Returns: An array containing the product. """ + # TODO(b/134526360): XLA doesn't support integer dots, so we emit a sum of + # products instead. + if onp.issubdtype(lhs.dtype, onp.integer): + lhs_shape = onp.shape(lhs) + lhs_ndim = len(lhs_shape) + rhs_ndim = onp.ndim(rhs) + if rhs_ndim > 1: + lhs = broadcast_in_dim(lhs, lhs_shape + (1,), tuple(range(len(lhs_shape)))) + if lhs_ndim > 1: + rhs = broadcast(rhs, (1,)) + return reduce(mul(lhs, rhs), _zero(lhs), add, (len(lhs_shape) - 1,)) + return dot_p.bind(lhs, rhs) def dot_general(lhs, rhs, dimension_numbers): @@ -476,9 +488,36 @@ def dot_general(lhs, rhs, dimension_numbers): Returns: An array containing the result. """ - lhs_dims, rhs_dims = dimension_numbers - dimension_numbers = (tuple(map(tuple, lhs_dims)), tuple(map(tuple, rhs_dims))) - return dot_general_p.bind(lhs, rhs, dimension_numbers=dimension_numbers) + contract_dims, batch_dims = dimension_numbers + contract_dims = tuple(map(tuple, contract_dims)) + batch_dims = tuple(map(tuple, batch_dims)) + if onp.issubdtype(lhs.dtype, onp.integer): + # TODO(b/134526360): XLA doesn't support integer dots, so we emit a sum of + # products instead. + lhs_contract_dims, rhs_contract_dims = contract_dims + lhs_batch_dims, rhs_batch_dims = batch_dims + lhs_noncontract_dims = tuple(sorted( + set(range(onp.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims))) + rhs_noncontract_dims = tuple(sorted( + set(range(onp.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims))) + lhs = transpose(lhs, + lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims) + rhs = transpose(rhs, + rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims) + new_lhs_shape = onp.insert( + onp.shape(lhs), len(lhs_batch_dims) + len(lhs_noncontract_dims), + (1,) * len(rhs_noncontract_dims)) + new_rhs_shape = onp.insert(onp.shape(rhs), len(lhs_batch_dims), + (1,) * len(lhs_noncontract_dims)) + lhs = reshape(lhs, new_lhs_shape) + rhs = reshape(rhs, new_rhs_shape) + out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) + + len(rhs_noncontract_dims)) + return reduce(mul(lhs, rhs), _zero(lhs), add, + tuple(range(out_ndim, out_ndim + len(lhs_contract_dims)))) + + return dot_general_p.bind(lhs, rhs, + dimension_numbers=(contract_dims, batch_dims)) def broadcast(operand, sizes): """Broadcasts an array, adding new major dimensions.
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -497,7 +497,7 @@ def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng): ("tensor-matrix", (4, 3, 2), (2, 5)), ("matrix-tensor", (5, 2), (3, 2, 4)), ("tensor-tensor", (2, 3, 4), (5, 4, 1))] - for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2))) + for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2))) def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng): args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)] self._CheckAgainstNumpy(onp.dot, lnp.dot, args_maker, check_dtypes=True) @@ -523,7 +523,7 @@ def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng): ("tensor-matrix", (5, 2, 3), (3, 2)), ("tensor-tensor", (5, 3, 4), (5, 4, 1)), ("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))] - for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2))) + for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2))) def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng): args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)] self._CheckAgainstNumpy(onp.matmul, lnp.matmul, args_maker, @@ -546,7 +546,7 @@ def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng): [(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]], [(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]], ] - for lhs_dtype, rhs_dtype in CombosWithReplacement(inexact_dtypes, 2))) + for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2))) def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng): args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)] lnp_fun = lambda a, b: lnp.tensordot(a, b, axes) diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -588,7 +588,7 @@ def fun_via_grad(lhs, rhs): "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "rng": rng} for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)] - for dtype in float_dtypes + for dtype in default_dtypes for rng in [jtu.rand_default()])) def testDot(self, lhs_shape, rhs_shape, dtype, rng): args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)] @@ -601,7 +601,7 @@ def testDot(self, lhs_shape, rhs_shape, dtype, rng): "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "rng": rng} for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)] - for dtype in float_dtypes + for dtype in default_dtypes for rng in [jtu.rand_default()])) def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng): args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)] @@ -626,7 +626,7 @@ def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng): # [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]], [(3, 2), (2, 4), [1], [0]], ] - for dtype in float_dtypes + for dtype in default_dtypes for rng in [jtu.rand_small()])) def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype, lhs_contracting, rhs_contracting, rng): @@ -650,7 +650,7 @@ def fun(lhs, rhs): ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))), ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))), ] - for dtype in float_dtypes + for dtype in default_dtypes for rng in [jtu.rand_small()])) def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype, dimension_numbers, rng): @@ -673,7 +673,7 @@ def fun(lhs, rhs): ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))), ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))), ] - for dtype in float_dtypes + for dtype in default_dtypes for rng in [jtu.rand_small()])) def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype, dimension_numbers, rng):
np.dot for integer arrays ``` import jax.numpy as np x = np.array((1,2,3)) np.dot(x, x) ``` gives `RuntimeError: Unimplemented: unsupported operand type S32 in op dot` 1. The error isn't super clear since S32 isn't a Python/numpy type 2. This should work, right? In numpy this correctly gives 14: ``` import numpy as onp onp.dot(x, x) ```
It's surprising that people don't seem to complain about this more :) That error message is from XLA. Which backend are you on? Running with the CPU on my mac.
2019-06-06T21:25:20
google/jax
828
google__jax-828
[ "827" ]
d622e78e82a408a98334ea2548c0bf9b0e4787ff
diff --git a/jax/lax/lax_parallel.py b/jax/lax/lax_parallel.py --- a/jax/lax/lax_parallel.py +++ b/jax/lax/lax_parallel.py @@ -106,6 +106,9 @@ def ppermute(x, axis_name, perm): def pswapaxes(x, axis_name, axis): """Swap the pmapped axis ``axis_name`` with the unmapped axis ``axis``. + The mapped axis size must be equal to the size of the unmapped axis; that is, + we must have ``lax.psum(1, axis_name) == x.shape[axis]``. + This function is similar to ``psplit`` except the pmapped axis of the input is placed at the position ``axis`` in the output. @@ -121,6 +124,12 @@ def pswapaxes(x, axis_name, axis): where ``axis_size`` is the size of the mapped axis named ``axis_name`` in the input ``x``. """ + # TODO(mattjj): enable this check when _serial_pmap works with psum(1, ax) + # axis_size = psum(1, axis_name) + # if axis_size != x.shape[axis]: + # msg = ("pswapaxes requires the size of the mapped axis ``axis_name`` equal " + # "``x.shape[axis]``, but they are {} and {} respectively.") + # raise ValueError(msg.format(axis_size(axis_name), x.shape[axis])) return pswapaxes_p.bind(x, axis_name=axis_name, axis=axis) def psplit(x, axis_name, axis): @@ -233,7 +242,11 @@ def _pswapaxes_serial_pmap_rule(vals, axes, axis): perm[axis] = axis_in return lax.transpose(x, perm), axis_in +def _pswapaxes_translation_rule(c, xla_x, axis, replica_groups): + return c.AllToAll(xla_x, axis, axis, replica_groups) + pswapaxes_p = standard_pmap_primitive('pswapaxes') +pxla.parallel_translation_rules[pswapaxes_p] = _pswapaxes_translation_rule parallel.serial_pmap_primitive_rules[pswapaxes_p] = _pswapaxes_serial_pmap_rule
diff --git a/tests/pmap_test.py b/tests/pmap_test.py --- a/tests/pmap_test.py +++ b/tests/pmap_test.py @@ -519,6 +519,16 @@ def testVmapOfPmapTuple(self): self.assertAllClose(expected_bz1, bz1, check_dtypes=False) self.assertAllClose(bz2, bz2, check_dtypes=False) + @jtu.skip_on_devices("cpu", "gpu") + def testPswapaxes(self): + device_count = xla_bridge.device_count() + shape = (device_count, 3, device_count, 5) + x = onp.arange(prod(shape)).reshape(shape) + + ans = pmap(lambda x: lax.pswapaxes(x, 'i', 1), axis_name='i')(x) + expected = onp.swapaxes(x, 0, 2) + self.assertAllClose(ans, expected, check_dtypes=False) + if __name__ == '__main__': absltest.main()
add lax.pswapaxes pxla translation Needed by @thenerdstation.
2019-06-08T15:58:12
google/jax
832
google__jax-832
[ "831" ]
c061fd7b058759133aa56d48130fad1a0a0a311b
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -470,9 +470,8 @@ def __eq__(self, other): canonicalize_dtype_handlers[DeviceTuple] = identity def _device_tuple_constant_handler(c, val, canonicalize_types=True): - py_val = pack(c.Constant(elt, canonicalize_types=canonicalize_types) - for elt in val) - return c.Constant(py_val) + const = partial(c.Constant, canonicalize_types=canonicalize_types) + return c.Tuple(*map(const, val)) xb.register_constant_handler(DeviceTuple, _device_tuple_constant_handler) # TODO(mattjj): could jit-compile a computation here
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -1325,6 +1325,14 @@ def testLongConstantHandling(self): self.skipTest("Test is Python 2 specific") self.assertTrue(api.jit(lambda x: lax.lt(x, long(10)))(long(3))) + def testIssue831(self): + # Tests the DeviceTuple constant handler + def f(x): + g = lambda *args: args[1] + return api.jit(lax.fori_loop, static_argnums=(2,))( 0, 10, g, x) + + api.jit(f)(1.) # doesn't crash + class DeviceConstantTest(jtu.JaxTestCase): def _CheckDeviceConstant(self, make_const, expected):
DeviceTuple constant handler doesn't work (and is untested) From @fehiepsi in [this comment](https://github.com/google/jax/issues/804#issuecomment-500169961). ```python def f(x): return jit(lax.fori_loop, static_argnums=(2,))(0, 10, lambda *args: args[1], x) jit(f)(1.) ``` ``` TypeError: No constant handler for type: <class 'jaxlib.xla_extension.XlaOp'> ```
2019-06-09T16:50:19
google/jax
834
google__jax-834
[ "830" ]
532411880d8811f7e6c34e1f81d4c6f4657e7c57
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1379,7 +1379,7 @@ def identity(n, dtype=None): @_wraps(onp.arange) def arange(*args, **kwargs): - dtype = kwargs.pop("dtype", None) + dtype = kwargs.get("dtype", None) if not args: raise TypeError("Required argument 'start' (pos 1) not found") # same as numpy error
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1493,7 +1493,6 @@ def testIssue453(self): expected = onp.reshape(a, (3, 2), order='F') self.assertAllClose(ans, expected, check_dtypes=True) - @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_op={}_dtype={}".format( op, {bool: "bool", int: "int", float: "float"}[dtype]), @@ -1538,6 +1537,10 @@ def testArange(self): self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77))) self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77))) + def testIssue830(self): + a = lnp.arange(4, dtype=lnp.complex64) + self.assertEqual(a.dtype, lnp.complex64) + def testIssue728(self): assert lnp.allclose(lnp.eye(5000), onp.eye(5000)) self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))
np.arange ignores dtype arange ignores the passed dtype and is always `int*` ```python a = np.arange(4, dtype=np.complex64) print(a.dtype) assert a.dtype == np.complex64 ``` Results in: ``` int64 --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-12-5b101f76f4c3> in <module>() 1 a = np.arange(4, dtype=np.complex64) 2 print(a.dtype) ----> 3 assert a.dtype is np.complex64 AssertionError: ```
2019-06-10T03:19:07
google/jax
846
google__jax-846
[ "845" ]
619b033474912d3f0229ddd13d24c5d0af93911c
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py --- a/jax/interpreters/batching.py +++ b/jax/interpreters/batching.py @@ -349,6 +349,9 @@ def _moveaxis(force_bcast, sz, dst, src, aval, x): x = broadcast(x, sz, force_broadcast=force_bcast) src = 0 dst_ = dst % (aval.ndim + 1) + elif src >= aval.ndim: + raise ValueError( + "cannot move axis {} in {}-dimensional array".format(src, aval.ndim)) if src == dst_: return x else: diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1927,12 +1927,12 @@ def _dot_batch_rule(batched_args, batch_dims): if lbd == 0: return dot(lhs, rhs), 0 else: - return dot(T(rhs), lhs), 1 + return dot(T(rhs), lhs), onp.ndim(rhs) - 1 if lbd is None: assert rbd in (0, 1) if rbd == onp.ndim(rhs) - 1: - return dot(lhs, rhs), 1 + return dot(lhs, rhs), onp.ndim(lhs) - 1 else: return dot(rhs, T(lhs)), 0
diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -290,6 +290,14 @@ def testDot3(self): expected = onp.einsum('inj,jk->nik', xs, ys) self.assertAllClose(ans, expected, check_dtypes=False) + def testDot4(self): + R = onp.random.RandomState(0).randn + xs = R(3, 2) + ys = R(3) + ans = vmap(np.dot, in_axes=(1, None))(xs, ys) + expected = onp.einsum('ij,i->j', xs, ys) + self.assertAllClose(ans, expected, check_dtypes=False) + def testPad(self): R = onp.random.RandomState(0).randn
vmap fails possibly due to internal mishandling of scalar ```python import jax import jax.numpy as np x = np.zeros([3,2], dtype=np.float32) y = np.zeros([3], dtype=np.float32) foo = jax.vmap(np.vdot, (1,None), 0) foo(x,y) ``` Gives `TypeError: transpose permutation isn't a permutation of operand dimensions, got permutation (1, 0) for operand shape (2L,).`
Inside `dot_batch_rule`, the two highlighted lines: ``` 1926 if rbd is None: 1927 assert lbd in (0, 1) 1928 if lbd == 0: 1929 return dot(lhs, rhs), 0 1930 else: 1931 -> return dot(T(rhs), lhs), 1 1932 1933 if lbd is None: 1934 assert rbd in (0, 1) 1935 if rbd == onp.ndim(rhs) - 1: 1936 -> return dot(lhs, rhs), 1 1937 else: 1938 return dot(rhs, T(lhs)), 0 ``` are wrong in cases with a 1D non-batched argument (where the output will also be 1D, so the output `batch_dim` should be 0). Will fix.
2019-06-13T01:03:19
google/jax
860
google__jax-860
[ "851" ]
fbe9affbff8d4bf6355e946f609ff53445e65987
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -2692,7 +2692,7 @@ def _gather_transpose_rule(t, operand, start_indices, dimension_numbers, assert operand is None if t is ad_util.zero: return [ad_util.zero, ad_util.zero] - zeros = broadcast(_const(t, 0), operand_shape) + zeros = full(operand_shape, 0, dtype=t.dtype) scatter_dnums = ScatterDimensionNumbers( update_window_dims=dimension_numbers.offset_dims, inserted_window_dims=dimension_numbers.collapsed_slice_dims,
Backward pass of gather needlessly allocates global memory When using a gather-type operation in the forward pass for word embeddings, the backward pass pre-allocates a constant all-zero matrix that is the same size as the embedding matrix. My current fix is: ``` --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -2692,7 +2692,9 @@ def _gather_transpose_rule(t, operand, s assert operand is None if t is ad_util.zero: return [ad_util.zero, ad_util.zero] - zeros = broadcast(_const(t, 0), operand_shape) + zeros = full(operand_shape, 0, dtype=t.dtype) scatter_dnums = ScatterDimensionNumbers( update_window_dims=dimension_numbers.offset_dims, inserted_window_dims=dimension_numbers.collapsed_slice_dims, ```
This looks right to me. Please turn into a PR!
2019-06-17T12:22:12
google/jax
868
google__jax-868
[ "826" ]
0190684ee216f13d9a379ef55c94d866401f7281
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1164,7 +1164,7 @@ def index_in_dim(operand, index, axis=0, keepdims=True): def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0): """Convenience wrapper around dynamic_slice applying to one dimension.""" - start_indices = [onp.array([0])] * operand.ndim + start_indices = [onp.array([0], dtype=_dtype(start_index))] * operand.ndim slice_sizes = list(operand.shape) axis = int(axis) diff --git a/jax/lax_linalg.py b/jax/lax_linalg.py --- a/jax/lax_linalg.py +++ b/jax/lax_linalg.py @@ -19,9 +19,10 @@ import numpy as onp from jax.numpy import lax_numpy as np +from jax import ad_util from jax import core from jax import lax -from jax import ad_util +from jax import ops from jax.interpreters import xla from jax.interpreters import ad from jax.interpreters import batching @@ -450,23 +451,34 @@ def lu_cpu_translation_rule(c, operand): xla.backend_specific_translations['cpu'][lu_p] = lu_cpu_translation_rule -def lu_pivots_to_permutation(swaps, k): - """Converts the pivots (row swaps) returned by LU to a permutation.""" - - def body_fn(i, loop_carry): - swaps, permutation = loop_carry - j = swaps[i] - x, y = np.ravel(permutation[i]), np.ravel(permutation[j]) - permutation = lax.dynamic_update_index_in_dim(permutation, y, i, axis=0) - permutation = lax.dynamic_update_index_in_dim(permutation, x, j, axis=0) - return swaps, permutation - - n, = np.shape(swaps) - permutation = np.arange(k) - _, permutation = lax.fori_loop( - onp.array(0, onp.int32), onp.array(n, onp.int32), body_fn, (swaps, permutation)) - return permutation - +def lu_pivots_to_permutation(swaps, m): + """Converts the pivots (row swaps) returned by LU to a permutation. + + We build a permutation rather than applying `swaps` directly to the rows + of a matrix because lax loops aren't differentiable. + + Args: + swaps: an array of shape (..., k) of row swaps to perform + m: the size of the output permutation. m should be >= k. + Returns: + An int32 array of shape (..., m). + """ + assert len(swaps.shape) >= 1 + batch_dims = swaps.shape[:-1] + k = swaps.shape[-1] + + def body_fn(i, permutation): + j = swaps[..., i] + iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims)) + x = permutation[..., i] + y = permutation[iotas + (j,)] + permutation = ops.index_update(permutation, ops.index[..., i], y) + return ops.index_update(permutation, ops.index[iotas + (j,)], x) + + permutation = lax.broadcasted_iota(np.int32, batch_dims + (m,), + len(batch_dims)) + return lax.fori_loop( + onp.array(0, onp.int32), onp.array(k, onp.int32), body_fn, permutation) # QR decomposition diff --git a/jax/numpy/linalg.py b/jax/numpy/linalg.py --- a/jax/numpy/linalg.py +++ b/jax/numpy/linalg.py @@ -229,30 +229,35 @@ def solve(a, b): b_shape = np.shape(b) a_ndims = len(a_shape) b_ndims = len(b_shape) - if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and - (a_ndims == b_ndims or a_ndims == b_ndims + 1)): + if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2] and b_ndims >= 1): msg = ("The arguments to solve must have shapes a=[..., m, m] and " "b=[..., m, k] or b=[..., m]; got a={} and b={}") raise ValueError(msg.format(a_shape, b_shape)) lu, pivots = lax_linalg.lu(a) dtype = lax.dtype(a) - # TODO(phawkins): add unit_diagonal support to solve_triangular, use it here - # instead of explicit masking of l. m = a_shape[-1] - l = np.tril(lu, -1)[:, :m] + np.eye(m, m, dtype=dtype) - # TODO(phawkins): triangular_solve only supports matrices on the RHS, so we - # add a dummy dimension. Extend it to support vectors and simplify this. - x = b if a_ndims == b_ndims else b[..., None] + # Numpy treats the RHS as a (batched) vector if the number of dimensions + # differ by 1. Otherwise, broadcasting rules apply. + x = b[..., None] if a_ndims == b_ndims + 1 else b + + batch_dims = lax.broadcast_shapes(lu.shape[:-2], x.shape[:-2]) + x = np.broadcast_to(x, batch_dims + x.shape[-2:]) + lu = np.broadcast_to(lu, batch_dims + lu.shape[-2:]) permutation = lax_linalg.lu_pivots_to_permutation(pivots, m) - x = x[..., permutation, :] + permutation = np.broadcast_to(permutation, batch_dims + (m,)) + iotas = np.ix_(*(lax.iota(np.int32, b) for b in batch_dims + (1,))) + x = x[iotas[:-1] + (permutation, slice(None))] + # TODO(phawkins): add unit_diagonal support to triangular_solve, use it here + # instead of explicit masking of l. + l = np.tril(lu, -1)[..., :, :m] + np.eye(m, m, dtype=dtype) x = lax_linalg.triangular_solve(l, x, left_side=True, lower=True) x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False) - return x[..., 0] if a_ndims != b_ndims else x + return x[..., 0] if a_ndims == b_ndims + 1 else x for func in get_module_functions(onp.linalg):
diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py --- a/tests/lax_numpy_indexing_test.py +++ b/tests/lax_numpy_indexing_test.py @@ -365,6 +365,9 @@ def check_grads(f, args, order, atol=None, rtol=None, eps=None): None, onp.array([-1, 2]))), ]), + ("IntArrayWithInt32Type", + [IndexSpec(shape=(3, 4), indexer=(Ellipsis, onp.array(1, dtype=onp.int32))) + ]), ] class IndexingTest(jtu.JaxTestCase): diff --git a/tests/linalg_test.py b/tests/linalg_test.py --- a/tests/linalg_test.py +++ b/tests/linalg_test.py @@ -422,6 +422,8 @@ def testQrBatching(self): ((1, 1), (1, 1)), ((4, 4), (4,)), ((8, 8), (8, 4)), + ((1, 2, 2), (3, 2)), + ((2, 1, 3, 3), (2, 4, 3, 4)), ] for dtype in float_types() + complex_types() for rng in [jtu.rand_default()]))
linalg.solve doesn't respect broadcasting semantics I'm attempting a batched matrix solve, ```python import jax.numpy as jp import numpy as np A = np.eye(2)[np.newaxis] b = np.ones((3, 2)) print(A.shape, b.shape) print(np.linalg.solve(A, b)) print(jp.linalg.solve(A, b)) ``` but I'm getting the following: ``` (1, 2, 2) (3, 2) [[1. 1.] [1. 1.] [1. 1.]] /Users/skainswo/.local/share/virtualenvs/research-OGGq2tNy/lib/python3.7/site-packages/jax/lib/xla_bridge.py:130: UserWarning: No GPU/TPU found, falling back to CPU. warnings.warn('No GPU/TPU found, falling back to CPU.') --------------------------------------------------------------------------- ValueError Traceback (most recent call last) ~/nu/skainswo/research/gan_with_the_wind/mle_normal.py in <module> 31 print(A.shape, b.shape) 32 print(np.linalg.solve(A, b)) ---> 33 print(jp.linalg.solve(A, b)) ~/.local/share/virtualenvs/research-OGGq2tNy/lib/python3.7/site-packages/jax/numpy/linalg.py in solve(a, b) 247 x = b if a_ndims == b_ndims else b[..., None] 248 --> 249 permutation = lax_linalg.lu_pivots_to_permutation(pivots, m) 250 x = x[..., permutation, :] 251 ~/.local/share/virtualenvs/research-OGGq2tNy/lib/python3.7/site-packages/jax/lax_linalg.py in lu_pivots_to_permutation(swaps, k) 462 return swaps, permutation 463 --> 464 n, = np.shape(swaps) 465 permutation = np.arange(k) 466 _, permutation = lax.fori_loop( ValueError: too many values to unpack (expected 1) ``` I'm on jax 0.1.36 and numpy 1.16.4.
I also observed this problem and solved it by making `A` and `b` have the same batch_shape (using `np.broadcast_to`). @fehiepsi Hmm that's not actually working for me. I now have shapes `(3, 2, 2)` and `(3, 2)` for A and b, respectively but I'm still seeing the same error. And `A.shape == (3, 2, 2)` `b.shape == (3, 2, 1)` doesn't work either. Sorry, I used `jax.scipy.linalg.solve_triangular` so the behaviour is a bit difference. The case `(3, 2, 2)` and `(3, 2)`/`(3, 2, 1)` does not work with `jax.numpy.solve` in my system too. @fehiepsi Ah, that would explain it! Yes, `jax.numpy.linalg.solve` simply doesn't support broadcasting at the moment. As a workaround, you could most likely wrap a `vmap` around an unbatched solve; I see no reason why that wouldn't work. The underlying primitives should support batching, it's only the Python numpy layer on top that does not support batching.
2019-06-18T01:21:44
google/jax
870
google__jax-870
[ "847" ]
e1ff5b2a93159b9ee17c40bb703944f45df64e0f
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -822,19 +822,20 @@ def _reduction_jaxpr(computation, init_value): def _get_monoid_reducer(monoid_op, x): aval = core.get_aval(x) + dtype = _dtype(x) if (type(aval) is ConcreteArray) and aval.shape == (): if monoid_op is add: return aval.val == 0 and _reduce_sum if monoid_op is mul: return aval.val == 1 and _reduce_prod + elif monoid_op is bitwise_or and dtype == onp.bool_: + return aval.val == _get_max_identity(dtype) and _reduce_or + elif monoid_op is bitwise_and and dtype == onp.bool_: + return aval.val == _get_min_identity(dtype) and _reduce_and elif monoid_op is max: - return aval.val == _get_max_identity(aval.dtype) and _reduce_max + return aval.val == _get_max_identity(dtype) and _reduce_max elif monoid_op is min: - return aval.val == _get_min_identity(aval.dtype) and _reduce_min - elif monoid_op is bitwise_or and aval.dtype == onp.bool_: - return aval.val == _get_max_identity(aval.dtype) and _reduce_or - elif monoid_op is bitwise_and and aval.dtype == onp.bool_: - return aval.val == _get_min_identity(aval.dtype) and _reduce_and + return aval.val == _get_min_identity(dtype) and _reduce_min def _get_max_identity(dtype): if onp.issubdtype(dtype, onp.inexact): @@ -2267,11 +2268,10 @@ def _broadcast_in_dim_batch_rule(batched_args, batch_dims, shape, broadcast_dimensions): operand, = batched_args bdim, = batch_dims - new_shape = list(shape) - new_shape.insert(bdim, operand.shape[bdim]) - new_broadcast_dimensions = [d if d < bdim else d + 1 for d in broadcast_dimensions] - new_broadcast_dimensions.insert(bdim, bdim) - return broadcast_in_dim(operand, new_shape, new_broadcast_dimensions), bdim + new_operand = batching.move_dim_to_front(operand, bdim) + new_shape = (operand.shape[bdim],) + shape + new_broadcast_dimensions = (0,) + tuple(onp.add(1, broadcast_dimensions)) + return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0 broadcast_in_dim_p = standard_primitive( @@ -2569,8 +2569,7 @@ def _select_batch_rule(batched_args, batch_dims, **unused_kwargs): elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None: if ot_bdim == of_bdim: return select(pred, on_true, on_false), ot_bdim - else: - assert onp.shape(on_true) == onp.shape(on_false) + elif onp.shape(on_true) == onp.shape(on_false): on_false = batching.moveaxis(size, ot_bdim, of_bdim, on_false) return select(pred, on_true, on_false), ot_bdim @@ -3446,7 +3445,7 @@ def _reduce_window_batch_rule( operand = reduce_window( operand, window_dimensions, window_strides, padding) - return operand, 0 + return operand, bdim reduce_window_sum_p = standard_primitive( _reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -622,13 +622,11 @@ def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng): "lhs_contracting": lhs_contracting, "rhs_contracting": rhs_contracting, "rng": rng} for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [ - # these all fail with "RuntimeError: Unimplemented: Dot with - # non-standard contracting dimensions not implemented." - # [(3, 5), (2, 5), [1], [1]], - # [(5, 3), (5, 2), [0], [0]], - # [(5, 3, 2), (5, 2, 4), [0], [0]], - # [(5, 3, 2), (5, 2, 4), [0,2], [0,1]], - # [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]], + [(3, 5), (2, 5), [1], [1]], + [(5, 3), (5, 2), [0], [0]], + [(5, 3, 2), (5, 2, 4), [0], [0]], + [(5, 3, 2), (5, 2, 4), [0,2], [0,1]], + [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]], [(3, 2), (2, 4), [1], [0]], ] for dtype in default_dtypes @@ -1034,8 +1032,9 @@ def testTransposeAgainstNumpy(self, shape, dtype, perm, rng): self._CheckAgainstNumpy(op, numpy_op, args_maker) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_op={}_inshape={}_reducedims={}" - .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims), + {"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}" + .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims, + init_val), "op": op, "init_val": init_val, "shape": shape, "dtype": dtype, "dims": dims, "rng": rng} for init_val, op, dtypes in [ @@ -1043,12 +1042,12 @@ def testTransposeAgainstNumpy(self, shape, dtype, perm, rng): (1, lax.mul, default_dtypes), (-onp.inf, lax.max, float_dtypes), (onp.iinfo(onp.int32).min, lax.max, [onp.int32]), - (onp.iinfo(onp.int64).min, lax.max, [onp.int64]), + # (onp.iinfo(onp.int64).min, lax.max, [onp.int64]), # TODO fails (onp.iinfo(onp.uint32).min, lax.max, [onp.uint32]), (onp.iinfo(onp.uint64).min, lax.max, [onp.uint64]), (onp.inf, lax.min, float_dtypes), (onp.iinfo(onp.int32).max, lax.min, [onp.int32]), - (onp.iinfo(onp.int64).max, lax.min, [onp.int64]), + # (onp.iinfo(onp.int64).max, lax.min, [onp.int64]), # TODO fails (onp.iinfo(onp.uint32).max, lax.min, [onp.uint32]), (onp.iinfo(onp.uint64).max, lax.min, [onp.uint64]), ] @@ -1057,7 +1056,8 @@ def testTransposeAgainstNumpy(self, shape, dtype, perm, rng): [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)], [(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)] ] - for rng in [jtu.rand_small()])) + for rng in [jtu.rand_default() if onp.issubdtype(dtype, onp.integer) + else jtu.rand_small()])) def testReduce(self, op, init_val, shape, dtype, dims, rng): init_val = onp.asarray(init_val, dtype=dtype) fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims) @@ -2241,14 +2241,52 @@ def f2(x, y): self.assertAllClose(ans, expected, check_dtypes=False) +def all_bdims(*shapes): + bdims = (itertools.chain([None], range(len(shape) + 1)) for shape in shapes) + return (t for t in itertools.product(*bdims) if not all(e is None for e in t)) + +def add_bdim(bdim_size, bdim, shape): + shape = list(shape) + if bdim is not None: + shape.insert(bdim, bdim_size) + return tuple(shape) + def slicer(x, bdim): if bdim is None: return lambda _: x else: return lambda i: lax.index_in_dim(x, i, bdim, keepdims=False) +def args_slicer(args, bdims): + slicers = list(map(slicer, args, bdims)) + return lambda i: [sl(i) for sl in slicers] + class LaxVmapTest(jtu.JaxTestCase): + def _CheckBatching(self, op, bdim_size, bdims, shapes, dtype, rng, + rtol=None, atol=None): + batched_shapes = map(partial(add_bdim, bdim_size), bdims, shapes) + args = [rng(shape, dtype) for shape in batched_shapes] + args_slice = args_slicer(args, bdims) + ans = api.vmap(op, bdims)(*args) + expected = onp.stack([op(*args_slice(i)) for i in range(bdim_size)]) + self.assertAllClose(ans, expected, check_dtypes=True, rtol=rtol, atol=atol) + + @parameterized.named_parameters(itertools.chain.from_iterable( + jtu.cases_from_list( + {"testcase_name": "{}_bdims={}".format( + jtu.format_test_name_suffix(rec.op.__name__, shapes, + itertools.repeat(dtype)), bdims), + "op": rec.op, "rng": rec.rng, "shapes": shapes, "dtype": dtype, + "bdims": bdims} + for shape_group in compatible_shapes + for shapes in CombosWithReplacement(shape_group, rec.nargs) + for bdims in all_bdims(*shapes) + for dtype in rec.dtypes) + for rec in LAX_OPS)) + def testOp(self, op, rng, shapes, dtype, bdims): + self._CheckBatching(op, 10, bdims, shapes, dtype, rng) + @parameterized.named_parameters(jtu.cases_from_list( {"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_" @@ -2293,7 +2331,6 @@ def testConvGeneralDilatedBatching( self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dil, rhs_dil, dimension_numbers, perms, feature_group_count, lhs_bdim, rhs_bdim, rng): tol = 1e-1 if onp.finfo(dtype).bits == 32 else 1e-3 - bdim_size = 10 # permute shapes to match dim_spec, scale by feature_group_count lhs_perm, rhs_perm = perms @@ -2303,26 +2340,320 @@ def testConvGeneralDilatedBatching( lhs_shape[dim_spec.lhs_spec[1]] *= feature_group_count rhs_shape[dim_spec.rhs_spec[0]] *= feature_group_count - # add batch dimension - if lhs_bdim is not None: - lhs_shape.insert(lhs_bdim, bdim_size) - if rhs_bdim is not None: - rhs_shape.insert(rhs_bdim, bdim_size) - - # create arg values and sliced versions - lhs = rng(lhs_shape, dtype) - rhs = rng(rhs_shape, dtype) - lhs_slice = slicer(lhs, lhs_bdim) - rhs_slice = slicer(rhs, rhs_bdim) - conv = partial(lax.conv_general_dilated, window_strides=strides, padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil, dimension_numbers=dimension_numbers, feature_group_count=feature_group_count, precision=lax.Precision.HIGHEST) - ans = api.vmap(conv, (lhs_bdim, rhs_bdim))(lhs, rhs) - expected = onp.stack([conv(lhs_slice(i), rhs_slice(i)) for i in range(bdim_size)]) - self.assertAllClose(ans, expected, True, tol, tol) + self._CheckBatching(conv, 5, (lhs_bdim, rhs_bdim), (lhs_shape, rhs_shape), + dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format( + shape, from_dtype, to_dtype, bdims), + "shape": shape, "from_dtype": from_dtype, "to_dtype": to_dtype, + "bdims": bdims, "rng": rng} + for from_dtype, to_dtype in itertools.product( + [onp.float32, onp.int32, "float32", "int32"], repeat=2) + for shape in [(2, 3)] + for bdims in all_bdims(shape) + for rng in [jtu.rand_default()])) + def testConvertElementType(self, shape, from_dtype, to_dtype, bdims, rng): + op = lambda x: lax.convert_element_type(x, to_dtype) + self._CheckBatching(op, 10, bdims, (shape,), from_dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format( + shape, from_dtype, to_dtype, bdims), + "shape": shape, "from_dtype": from_dtype, "to_dtype": to_dtype, + "bdims": bdims, "rng": rng} + for from_dtype, to_dtype in itertools.product( + [onp.float32, onp.int32, "float32", "int32"], repeat=2) + for shape in [(2, 3)] + for bdims in all_bdims(shape) + for rng in [jtu.rand_default()])) + def testBitcastElementType(self, shape, from_dtype, to_dtype, bdims, rng): + op = lambda x: lax.bitcast_convert_type(x, to_dtype) + self._CheckBatching(op, 10, bdims, (shape,), from_dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}_bdims={}" + .format(jtu.format_shape_dtype_string(min_shape, dtype), + jtu.format_shape_dtype_string(operand_shape, dtype), + jtu.format_shape_dtype_string(max_shape, dtype), + bdims), + "min_shape": min_shape, "operand_shape": operand_shape, + "max_shape": max_shape, "dtype": dtype, "bdims": bdims, "rng": rng} + for min_shape, operand_shape, max_shape in [ + [(), (2, 3), ()], + [(2, 3), (2, 3), ()], + [(), (2, 3), (2, 3)], + [(2, 3), (2, 3), (2, 3)], + ] + for dtype in default_dtypes + for bdims in all_bdims(min_shape, operand_shape, max_shape) + for rng in [jtu.rand_default()])) + def testClamp(self, min_shape, operand_shape, max_shape, dtype, bdims, rng): + raise SkipTest("batching rule for clamp not implemented") # TODO(mattj) + shapes = [min_shape, operand_shape, max_shape] + self._CheckBatching(lax.clamp, 10, bdims, shapes, dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_lhs_shape={}_rhs_shape={}_bdims={}".format( + jtu.format_shape_dtype_string(lhs_shape, dtype), + jtu.format_shape_dtype_string(rhs_shape, dtype), + bdims), + "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, + "bdims": bdims, "rng": rng} + for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)] + for bdims in all_bdims(lhs_shape, rhs_shape) + for dtype in default_dtypes + for rng in [jtu.rand_default()])) + def testDot(self, lhs_shape, rhs_shape, dtype, bdims, rng): + self._CheckBatching(lax.dot, 5, bdims, (lhs_shape, rhs_shape), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}_bdims={}" + .format(jtu.format_shape_dtype_string(lhs_shape, dtype), + jtu.format_shape_dtype_string(rhs_shape, dtype), + lhs_contracting, rhs_contracting, bdims), + "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, + "lhs_contracting": lhs_contracting, "rhs_contracting": rhs_contracting, + "bdims": bdims, "rng": rng} + for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [ + [(3, 5), (2, 5), [1], [1]], + [(5, 3), (5, 2), [0], [0]], + [(5, 3, 2), (5, 2, 4), [0], [0]], + [(5, 3, 2), (5, 2, 4), [0,2], [0,1]], + [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]], + [(3, 2), (2, 4), [1], [0]], + ] + for bdims in all_bdims(lhs_shape, rhs_shape) + for dtype in default_dtypes + for rng in [jtu.rand_small()])) + def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype, + lhs_contracting, rhs_contracting, bdims, rng): + dimension_numbers = ((lhs_contracting, rhs_contracting), ([], [])) + dot = partial(lax.dot_general, dimension_numbers=dimension_numbers) + self._CheckBatching(dot, 5, bdims, (lhs_shape, rhs_shape), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_lhs_shape={}_rhs_shape={}_dimension_numbers={}_bdims={}" + .format(jtu.format_shape_dtype_string(lhs_shape, dtype), + jtu.format_shape_dtype_string(rhs_shape, dtype), + dimension_numbers, bdims), + "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, + "dimension_numbers": dimension_numbers, "bdims": bdims, "rng": rng} + for lhs_shape, rhs_shape, dimension_numbers in [ + ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))), + ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))), + ] + for bdims in all_bdims(lhs_shape, rhs_shape) + for dtype in default_dtypes + for rng in [jtu.rand_small()])) + def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype, + dimension_numbers, bdims, rng): + dot = partial(lax.dot_general, dimension_numbers=dimension_numbers) + self._CheckBatching(dot, 5, bdims, (lhs_shape, rhs_shape), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}_bdims={}".format( + shape, onp.dtype(dtype).name, broadcast_sizes, bdims), + "shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes, + "bdims": bdims, "rng": rng} + for shape in [(), (2, 3)] + for dtype in default_dtypes + for broadcast_sizes in [(), (2,), (1, 2)] + for bdims in all_bdims(shape) + for rng in [jtu.rand_default()])) + def testBroadcast(self, shape, dtype, broadcast_sizes, bdims, rng): + op = lambda x: lax.broadcast(x, broadcast_sizes) + self._CheckBatching(op, 5, bdims, (shape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_inshape={}_outshape={}_bcdims={}_bdims={}".format( + jtu.format_shape_dtype_string(inshape, dtype), + outshape, broadcast_dimensions, bdims), + "inshape": inshape, "dtype": dtype, "outshape": outshape, + "dimensions": broadcast_dimensions, "bdims": bdims, "rng": rng} + for inshape, outshape, broadcast_dimensions in [ + ([2], [2, 2], [0]), + ([2], [2, 2], [1]), + ([2], [2, 3], [0]), + ([], [2, 3], []), + ] + for dtype in default_dtypes + for bdims in all_bdims(inshape) + for rng in [jtu.rand_default()])) + def testBroadcastInDim(self, inshape, dtype, outshape, dimensions, bdims, rng): + raise SkipTest("this test has failures in some cases") # TODO(mattjj) + op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions) + self._CheckBatching(op, 5, bdims, (inshape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_inshape={}_outshape={}_bdims={}".format( + jtu.format_shape_dtype_string(arg_shape, dtype), + jtu.format_shape_dtype_string(out_shape, dtype), + bdims), + "arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype, + "bdims": bdims, "rng": rng} + for dtype in default_dtypes + for arg_shape, out_shape in [ + [(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)] + ] + for bdims in all_bdims(arg_shape) + for rng in [jtu.rand_default()])) + def testReshape(self, arg_shape, out_shape, dtype, bdims, rng): + op = lambda x: lax.reshape(x, out_shape) + self._CheckBatching(op, 10, bdims, (arg_shape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_inshape={}_pads={}_bdims={}" + .format(jtu.format_shape_dtype_string(shape, dtype), pads, bdims), + "shape": shape, "dtype": dtype, "pads": pads, "rng": jtu.rand_small(), + "bdims": bdims} + for shape in [(2, 3)] + for bdims in all_bdims(shape) + for dtype in default_dtypes + for pads in [[(1, 2, 1), (0, 1, 0)]])) + def testPad(self, shape, dtype, pads, bdims, rng): + fun = lambda operand: lax.pad(operand, onp.array(0, dtype), pads) + self._CheckBatching(fun, 5, bdims, (shape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_predshape={}_argshapes={}_bdims={}".format( + jtu.format_shape_dtype_string(pred_shape, onp.bool_), + jtu.format_shape_dtype_string(arg_shape, arg_dtype), + bdims), + "pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype, + "bdims": bdims, "rng": rng} + for arg_shape in [(), (3,), (2, 3)] + for pred_shape in ([(), arg_shape] if arg_shape else [()]) + for bdims in all_bdims(pred_shape, arg_shape, arg_shape) + for arg_dtype in default_dtypes + for rng in [jtu.rand_default()])) + def testSelect(self, pred_shape, arg_shape, arg_dtype, bdims, rng): + op = lambda c, x, y: lax.select(c < 0, x, y) + self._CheckBatching(op, 5, bdims, (pred_shape, arg_shape, arg_shape,), + arg_dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": + "_shape={}_start_indices={}_limit_indices={}_strides={}_bdims={}".format( + jtu.format_shape_dtype_string(shape, dtype), + start_indices, limit_indices, strides, bdims), + "shape": shape, "dtype": dtype, "starts": start_indices, + "limits": limit_indices, "strides": strides, "bdims": bdims, "rng": rng} + for shape, start_indices, limit_indices, strides in [ + [(3,), (1,), (2,), None], + [(7,), (4,), (7,), None], + [(5,), (1,), (5,), (2,)], + [(8,), (1,), (6,), (2,)], + [(5, 3), (1, 1), (3, 2), None], + [(5, 3), (1, 1), (3, 1), None], + [(7, 5, 3), (4, 0, 1), (7, 1, 3), None], + [(5, 3), (1, 1), (2, 1), (1, 1)], + [(5, 3), (1, 1), (5, 3), (2, 1)], + ] + for bdims in all_bdims(shape) + for dtype in default_dtypes + for rng in [jtu.rand_default()])) + def testSlice(self, shape, dtype, starts, limits, strides, bdims, rng): + op = lambda x: lax.slice(x, starts, limits, strides) + self._CheckBatching(op, 5, bdims, (shape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_shape={}_perm={}_bdims={}".format( + jtu.format_shape_dtype_string(shape, dtype), perm, bdims), + "shape": shape, "dtype": dtype, "perm": perm, "bdims": bdims, "rng": rng} + for shape, perm in [ + [(3, 4), (1, 0)], + [(3, 4), (0, 1)], + [(3, 4, 5), (2, 1, 0)], + [(3, 4, 5), (1, 0, 2)], + ] + for bdims in all_bdims(shape) + for dtype in default_dtypes + for rng in [jtu.rand_default()])) + def testTranspose(self, shape, dtype, perm, bdims, rng): + op = lambda x: lax.transpose(x, perm) + self._CheckBatching(op, 5, bdims, (shape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_op={}_inshape={}_reducedims={}_bdims={}" + .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims, + bdims), + "op": op, "init_val": init_val, "shape": shape, "dtype": dtype, + "dims": dims, "bdims": bdims, "rng": rng} + for init_val, op, dtypes in [ + (0, lax.add, default_dtypes), + (1, lax.mul, default_dtypes), + (-onp.inf, lax.max, float_dtypes), + (onp.iinfo(onp.int32).min, lax.max, [onp.int32]), + (onp.iinfo(onp.int64).min, lax.max, [onp.int64]), + (onp.iinfo(onp.uint32).min, lax.max, [onp.uint32]), + (onp.iinfo(onp.uint64).min, lax.max, [onp.uint64]), + (onp.inf, lax.min, float_dtypes), + (onp.iinfo(onp.int32).max, lax.min, [onp.int32]), + (onp.iinfo(onp.int64).max, lax.min, [onp.int64]), + (onp.iinfo(onp.uint32).max, lax.min, [onp.uint32]), + (onp.iinfo(onp.uint64).max, lax.min, [onp.uint64]), + ] + for dtype in dtypes + for shape, dims in [ + [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)], + [(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)] + ] + for bdims in all_bdims(shape) + for rng in [jtu.rand_small()])) + def testReduce(self, op, init_val, shape, dtype, dims, bdims, rng): + init_val = onp.asarray(init_val, dtype=dtype) + fun = lambda operand: lax.reduce(operand, init_val, op, dims) + self._CheckBatching(fun, 5, bdims, (shape,), dtype, rng) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": "_op={}_dtype={}_padding={}" + .format(op.__name__, onp.dtype(dtype).name, padding), + "op": op, "init_val": init_val, "dtype": dtype, "padding": padding, + "rng": rng} + for init_val, op, dtypes in [ + (0, lax.add, [onp.float32]), + (-onp.inf, lax.max, [onp.float32]), + (onp.inf, lax.min, [onp.float32]), + ] + for dtype in dtypes + for padding in ["VALID", "SAME"] + for rng in [jtu.rand_small()])) + def testReduceWindow(self, op, init_val, dtype, padding, rng): + init_val = onp.asarray(init_val, dtype=dtype) + + all_configs = itertools.chain( + itertools.product( + [(4, 6)], + [(2, 1), (1, 2)], + [(1, 1), (2, 1), (1, 2)]), + itertools.product( + [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)], + [(1, 2, 2, 1), (1, 1, 1, 1)])) + + def fun(operand): + return lax.reduce_window(operand, init_val, op, dims, strides, padding) + + for shape, dims, strides in all_configs: + for bdims in all_bdims(shape): + self._CheckBatching(fun, 3, bdims, (shape,), dtype, rng) + + # TODO Concatenate + # TODO Reverse + # TODO DynamicSlice + # TODO DynamicUpdateSlice + # TODO Sort + # TODO SortKeyVal + # TODO Collapse + # TODO ScatterAdd + # TODO Scatter if __name__ == '__main__':
Better vmap testing We should implement a test generator for checking batched primitives like the one we use for grad testing.
2019-06-18T05:14:49
google/jax
873
google__jax-873
[ "824" ]
a46e01364f2bc5c14aac7f3a9776c2df8c449e51
diff --git a/jax/core.py b/jax/core.py --- a/jax/core.py +++ b/jax/core.py @@ -119,7 +119,10 @@ def __eq__(self, other): return self.val == other.val if self.hashable else self.val is other.val def __repr__(self): - return 'Literal(val={}, hashable={})'.format(self.val, self.hashable) + if self.hashable: + return '{}'.format(self.val) + else: + return 'Literal(val={}, hashable={})'.format(self.val, self.hashable) class Primitive(object): def __init__(self, name): diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py --- a/jax/interpreters/partial_eval.py +++ b/jax/interpreters/partial_eval.py @@ -454,6 +454,7 @@ def tracers_to_jaxpr(in_tracers, out_tracer): eqns = [] env = {} consts = {} + const_to_var = defaultdict(newvar) destructuring_vars = {} for t in sorted_tracers: recipe = t.recipe @@ -464,7 +465,8 @@ def tracers_to_jaxpr(in_tracers, out_tracer): elif isinstance(recipe, FreeVar): env[var(t)] = recipe.val elif isinstance(recipe, ConstVar): - consts[var(t)] = recipe.val + v = t_to_var[id(t)] = const_to_var[id(recipe.val)] + consts[v] = recipe.val elif isinstance(recipe, Literal): t_to_var[id(t)] = recipe elif isinstance(recipe, Destructuring):
Memory explosion when jitting dynamic_slice The [following example][1] explodes in memory usage when calling the jitted functions. The culprit seems to be the dynamic_slice. It happens calling with or without the GPU. Incidentally, it is also rather slow (twice slower than the plain Python version), so I welcome comments if there is a jax-friendlier way to rewrite it, but perhaps that is what jit should do? My system: Fedora 29, 31 GB RAM jax 0.1.36 jaxlib 0.1.16, installed from the googleapis wheel. CUDA 10.0 Nvidia driver 430.14. [1]: https://gist.github.com/Dapid/b33919674f6a3cd9839473bfd2821e28
Sorry for the long delay in response, most of us were away last week. I think the memory blow-up happens because the gradient of `dynamic_slice` constructs a dense array of zeros to scatter a value into, and we end up with one such constant for each `dynamic_slice` call. This is similar to #850 ; in general JAX probably needs to get smarter about not instantiating large constants. However, there's a better way to write your computation to use a single gather (via NumPy advanced indexing) rather than a long chain of `dynamic_slice` calls. The JIT-ted version no longer blows up in memory and is much faster: ``` def single_loss(self, datum): J = self.J N = self.N loss = 0. for r in range(N): x = np.sum(J[np.arange(N), r, datum[np.arange(N)], datum[r]]) loss -= np.sum(np.exp(x)) return loss ``` If you want to get even fancier you can use a single gather for the entire outer loop too: ``` xs = np.arange(N) ys = np.sum(J[xs[:, None], xs, datum[xs, None], datum[xs]], axis=1) loss = -np.sum(np.exp(ys)) ``` (The second version didn't seem faster to me but it's fun to code-golf these things!)
2019-06-18T15:27:13
google/jax
874
google__jax-874
[ "871" ]
4bd603103a3d3ab549e82f6574a3a50ad398becf
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -762,13 +762,23 @@ def linearize(fun, *primals): out_primal, out_pval, jaxpr, consts = ad.linearize(jaxtree_fun, *primals_flat) out_tree = out_tree() out_primal_py = build_tree(out_tree, out_primal) - lifted_jvp = partial(lift_linearized, jaxpr, consts, (in_trees, out_tree), out_pval) + primal_avals = list(map(core.get_aval, primals_flat)) + lifted_jvp = partial(lift_linearized, jaxpr, primal_avals, consts, + (in_trees, out_tree), out_pval) return out_primal_py, lifted_jvp -def lift_linearized(jaxpr, consts, io_tree, out_pval, *py_args): - def fun(*args): - primals = pack(args) # doesn't matter what these are-they'll be ignored - tangents = pack(args) +def lift_linearized(jaxpr, primal_avals, consts, io_tree, out_pval, *py_args): + def fun(*tangents): + tangent_avals = list(map(core.get_aval, tangents)) + for primal_aval, tangent_aval in zip(primal_avals, tangent_avals): + try: + core.lattice_join(primal_aval, tangent_aval) + except TypeError: + msg = ("linearized function called on tangent values inconsistent with " + "the original primal values.") + raise ValueError(msg) + primals = pack(tangents) # doesn't matter what these are-they'll be ignored + tangents = pack(tangents) _, ans = eval_jaxpr(jaxpr, consts, (), primals, tangents) return pe.merge_pvals(ans, out_pval)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -784,6 +784,20 @@ def fun(x): assert len(api.make_jaxpr(fun)(1).eqns) == 0 + def test_issue_871(self): + T = np.array([[1., 2.], [3., 4.], [5., 6.]]) + x = np.array([1, 2, 3]) + + y, f_jvp = api.linearize(np.sum, x) + jtu.check_raises(lambda: f_jvp(T), ValueError, + ("linearized function called on tangent values " + "inconsistent with the original primal values.")) + + y, f_jvp = api.linearize(api.jit(np.sum), x) + jtu.check_raises(lambda: f_jvp(T), ValueError, + ("linearized function called on tangent values " + "inconsistent with the original primal values.")) + if __name__ == '__main__': absltest.main()
jvp of np.sum fails in shape assertion when jit'ed The following piece of code, which computes the jvp of the `(reduce_)sum` operation, fails with `AssertionError: (3, 2, 2) != (3,)` when the sum is jit'ed. It computes without problems and gives the expected result when the `sum` operation is not jit'ed. The assertion is thrown from the `_reduce_sum_shape_rule` (lax.py, line 3017) which appears to be only called during actual evaluation of the `f_jvp` function when jit'ed. ``` import jax import jax.numpy as np T = np.array([[1., 2.], [3., 4.], [5., 6.]]) x = np.array([1, 2, 3]) mysum = jax.jit(np.sum) # this doesn't work when jit'ed y, f_jvp = jax.linearize(mysum, x) Z = f_jvp(T) # this doesn't work either when jit'ed # y, Z = jax.jvp(mysum, (x, ), (T, )) print(y) print(Z) ``` Unfortunately, I haven't been able to work out a fix for this on my own so far due to the many layers of abstraction and indirections in between and would be grateful if someone could push me the right way on this.
Thanks for raising this! Actually, `x` and `T` should have the same shape here: in general, tangent vectors need to have the same shape as the corresponding input (primal) values. However, we're not raising this error in a good way, and it's leading to behavior that's only being caught way downstream.
2019-06-18T16:31:22
google/jax
875
google__jax-875
[ "850" ]
4bd603103a3d3ab549e82f6574a3a50ad398becf
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -739,7 +739,11 @@ def _maybe_numpy_1_13_isclose_behavior(a, out): return out +# The `jit` on `where` exists to avoid materializing constants in cases like +# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to +# materialize the broadcast forms of scalar arguments. @_wraps(onp.where) +@jit def where(condition, x=None, y=None): if x is None or y is None: raise ValueError("Must use the three-argument form of where().") @@ -1465,14 +1469,20 @@ def tri(N, M=None, k=0, dtype=None): @_wraps(onp.tril) def tril(m, k=0): - mask = tri(*shape(m)[-2:], k=k, dtype=bool) - return where(mask, m, zeros_like(m)) + m_shape = shape(m) + if len(m_shape) < 2: + raise ValueError("Argument to jax.numpy.tril must be at least 2D") + mask = tri(*m_shape[-2:], k=k, dtype=bool) + return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m)) @_wraps(onp.triu) def triu(m, k=0): - mask = tri(*shape(m)[-2:], k=k - 1, dtype=bool) - return where(mask, zeros_like(m), m) + m_shape = shape(m) + if len(m_shape) < 2: + raise ValueError("Argument to jax.numpy.triu must be at least 2D") + mask = tri(*m_shape[-2:], k=k - 1, dtype=bool) + return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m) @_wraps(onp.trace)
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -759,7 +759,7 @@ def testTri(self, m, n, k, dtype, rng): "dtype": dtype, "shape": shape, "op": op, "k": k, "rng": jtu.rand_default()} for dtype in default_dtypes - for shape in [shape for shape in all_shapes if len(shape) >= 1] + for shape in [shape for shape in all_shapes if len(shape) >= 2] for op in ["tril", "triu"] for k in list(range(-3, 3)))) def testTriLU(self, dtype, shape, op, k, rng):
np.where performs broadcasting in op-by-op mode The jax version of `np.where` needs to broadcast its arguments to the same shape. When one of the arguments is a constant, the broadcast operation isn't jitted. Examples: - `np.where(dropout_mask, x, 0)` creates an all-zero constant of the same shape as `x` and stores it in global memory. This potentially wastes hundreds of MB of global memory. - A similar problem occurs for the first argument (condition) of `np.where` if it has fewer dimensions than the other two arguments. The first instance can be worked around by doing `np.where(condition, x, np.zeros_like(x))`, but working around the second is trickier without resorting to `lax.tie_in`.
2019-06-18T23:10:00
google/jax
889
google__jax-889
[ "888" ]
6fae4d3c179702a007a6092ef2a766361085abef
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -231,6 +231,17 @@ def wrap(op): return op return wrap +# TODO(phawkins): use this helper everywhere. +def _canonicalize_axis(axis, num_dims): + """Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims).""" + axis = int(axis) + if axis < 0: + axis = axis + num_dims + if axis < 0 or axis >= num_dims: + raise ValueError( + "axis {} is out of bounds for array of dimension {}".format( + axis, num_dims)) + return axis ### implementations of numpy functions in terms of lax @@ -944,9 +955,9 @@ def _reduction_dims(a, axis): if axis is None: return onp.arange(ndim(a)) elif isinstance(axis, (onp.ndarray, tuple, list)): - return onp.mod(onp.asarray(axis), ndim(a)) + return tuple(_canonicalize_axis(x, ndim(a)) for x in axis) elif isinstance(axis, int): - return onp.mod([axis], ndim(a)) + return (_canonicalize_axis(axis, ndim(a)),) else: raise TypeError("Unexpected type of axis argument: {}".format(type(axis))) @@ -1154,19 +1165,6 @@ def cumulative_reduction(a, axis=None, dtype=None): ### Array-creation functions -# TODO(phawkins): use this helper everywhere. -def _canonicalize_axis(axis, num_dims): - """Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims).""" - axis = int(axis) - if axis < 0: - axis = axis + num_dims - if axis < 0 or axis >= num_dims: - raise ValueError( - "axis {} is out of bounds for array of dimension {}".format( - axis, num_dims)) - return axis - - @_wraps(onp.pad) def pad(array, pad_width, mode, constant_values=0): if mode != "constant":
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1622,6 +1622,10 @@ def f(x, v): first_call = f(x, v) second_call = f(x, v) # doesn't crash + def testReductionOfOutOfBoundsAxis(self): # Issue 888 + x = lnp.ones((3, 4)) + self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2)) + if __name__ == "__main__": absltest.main()
jax.numpy.sum behaving funny for non-existent axis When invoking the `sum` operation on a non-existent axis I don't get an error (as would be the expected result), but sums over one of the existing axes. As far as I can tell, for any array `x` with `n` axes, calls to `sum` with axis `j > n` behave as if the provided axis was `j % n`. E.g., if the array `x` has 3 axes, calls to `sum` with `j` being any multiple of three all sum over the first axis (j=0,3,6,9,... all give the same result). This is especially noteworthy for the one-dimensional array (only one axis), where calls to `sum` with any value for `axis` always sums over the whole array. It would be preferable if either an error was raised, or summing over a non-existing axis would be a no-op. Might be related to #278 example code ``` import jax.numpy as np x = np.array([ [ [ 1, 2 ], [ 3, 4 ] ], [ [ 5, 6 ], [ 7, 8] ], [ [ 9, 10 ], [ 11, 12 ] ] ]) assert(np.all(np.sum(x, axis=3) == np.sum(x, axis=0))) assert(np.all(np.sum(x, axis=4) == np.sum(x, axis=1))) assert(np.all(np.sum(x, axis=5) == np.sum(x, axis=2))) assert(np.all(np.sum(x, axis=6) == np.sum(x, axis=0))) # and so on.. ```
2019-06-20T12:41:29
google/jax
897
google__jax-897
[ "424" ]
d93e05ca0e64ea2cd0aba48793bd8ad15dc31267
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -784,6 +784,14 @@ def fun(*tangents): return apply_jaxtree_fun(fun, io_tree, *py_args) +def _check_inexact_input_vjp(x): + aval = core.get_aval(x) + if not onp.issubdtype(aval.dtype, onp.inexact): + msg = ("Primal inputs to reverse-mode differentiation must be of float " + "or complex type, got type {}") + raise TypeError(msg.format(aval.dtype.name)) + + def vjp(fun, *primals, **kwargs): """Compute a (reverse-mode) vector-Jacobian product of `fun`. @@ -823,6 +831,7 @@ def vjp(fun, *primals, **kwargs): fun = lu.wrap_init(fun) primals_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, primals)) _check_args(primals_flat) + tree_map(_check_inexact_input_vjp, primals) jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees) if not has_aux: out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)
diff --git a/tests/api_test.py b/tests/api_test.py --- a/tests/api_test.py +++ b/tests/api_test.py @@ -815,6 +815,13 @@ def f(a, b, c): if eqn.bound_subjaxprs) self.assertEqual(len(subjaxpr.eqns), 1) + def test_grad_of_int_errors(self): + dfn = grad(lambda x: x ** 2) + jtu.check_raises_regexp( + lambda: dfn(3), TypeError, + "Primal inputs to reverse-mode differentiation must be of float or " + "complex type, got type int..") + if __name__ == '__main__': absltest.main() diff --git a/tests/batching_test.py b/tests/batching_test.py --- a/tests/batching_test.py +++ b/tests/batching_test.py @@ -796,7 +796,7 @@ def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums, "op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes, "rng": rng, "rng_idx": rng_idx} - for dtype in [onp.float32, onp.int32] + for dtype in [onp.float32] for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [ (0, 0, (2, 5), onp.array([[[0], [2]], [[1], [3]]]), lax.GatherDimensionNumbers(
Grad should raise an error for non-inexact types Continuation of https://github.com/google/jax/issues/48 . `grad` should report an error for integer/bool types (i.e., non-inexact types): e.g., this example should report an error: ``` def square(x): return x**2 val = 3 dfn = grad(square) print(dfn(val)) ```
2019-06-21T15:07:07
google/jax
910
google__jax-910
[ "901" ]
96857771318e8b75991e9ed53edd8ed6a779c6eb
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -768,6 +768,20 @@ def where(condition, x=None, y=None): return lax.select(condition, *_promote_dtypes(x, y)) +@_wraps(onp.select) +def select(condlist, choicelist, default=0): + if len(condlist) != len(choicelist): + msg = "condlist must have length equal to choicelist ({} vs {})" + raise ValueError(msg.format(len(condlist), len(choicelist))) + if len(condlist) == 0: + raise ValueError("condlist must be non-empty") + + output = default + for cond, choice in zip(condlist[::-1], choicelist[::-1]): + output = where(cond, choice, output) + return output + + def broadcast_arrays(*args): """Like Numpy's broadcast_arrays but doesn't return views.""" shapes = [shape(arg) for arg in args]
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1478,6 +1478,29 @@ def testIx_(self, rng, shapes, dtypes): check_dtypes=True) self._CompileAndCheck(lnp.ix_, args_maker, check_dtypes=True) + + @parameterized.named_parameters(jtu.cases_from_list( + {"testcase_name": jtu.format_test_name_suffix("select", shapes, + (onp.bool_,) * n + dtypes), + "rng": jtu.rand_default(), "shapes": shapes, "dtypes": dtypes} + for n in range(0, 3) + for shapes in filter( + _shapes_are_broadcast_compatible, + CombosWithReplacement(all_shapes, 2 * n + 1)) + for dtypes in CombosWithReplacement(all_dtypes, n + 1))) + def test(self, rng, shapes, dtypes): + n = len(dtypes) - 1 + def args_maker(): + condlist = [rng(shape, onp.bool_) for shape in shapes[:n]] + choicelist = [rng(shape, dtype) + for shape, dtype in zip(shapes[n:-1], dtypes[:n])] + default = rng(shapes[-1], dtypes[-1]) + return condlist, choicelist, default + self._CheckAgainstNumpy(onp.select, lnp.select, args_maker, + check_dtypes=True) + self._CompileAndCheck(lnp.select, args_maker, check_dtypes=True) + + def testIssue330(self): x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash self.assertEqual(x[0, 0], 1)
Implement `np.select` It appears that JAX has not implemented `np.select`. One can work around this by using `lax.select`, but it doesn't implement numpy's broadcasting semantics, so it's not quite as nice.
2019-06-24T13:28:33
google/jax
911
google__jax-911
[ "902" ]
96857771318e8b75991e9ed53edd8ed6a779c6eb
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -2113,19 +2113,28 @@ def take(a, indices, axis=None, out=None, mode=None): @_wraps(getattr(onp, "take_along_axis", None)) def take_along_axis(arr, indices, axis): - if axis is None and ndim(arr) != 1: - return take_along_axis(arr.ravel(), indices.ravel(), 0) + if axis is None: + if ndim(indices) != 1: + msg = "take_along_axis indices must be 1D if axis=None, got shape {}" + raise ValueError(msg.format(shape(indices))) + return take_along_axis(arr.ravel(), indices, 0) + elif ndim(arr) != ndim(indices): + msg = "indices and arr must have the same number of dimensions; {} vs. {}" + raise ValueError(msg.format(ndim(indices), ndim(arr))) elif ndim(arr) == 1: return lax.index_take(arr, (indices,), (0,)) else: # TODO(mattjj): if we lower directly to lax.gather here, we might be able to # avoid the reshape on the output. - all_indices = [lax.broadcasted_iota(_dtype(indices), shape(indices), i) + arr_shape = list(shape(arr)) + arr_shape[axis] = 1 + out_shape = lax.broadcast_shapes(shape(indices), tuple(arr_shape)) + all_indices = [lax.broadcasted_iota(_dtype(indices), out_shape, i) for i in range(ndim(arr))] - all_indices[axis] = indices + all_indices[axis] = broadcast_to(indices, out_shape) all_indices = tuple(map(ravel, all_indices)) out_flat = lax.index_take(arr, all_indices, tuple(range(ndim(arr)))) - return reshape(out_flat, shape(indices)) + return reshape(out_flat, out_shape) ### Indexing
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -287,6 +287,9 @@ def _shapes_are_broadcast_compatible(shapes): return False return True +def _shapes_are_equal_length(shapes): + return all(len(shape) == len(shapes[0]) for shape in shapes[1:]) + class LaxBackedNumpyTests(jtu.JaxTestCase): """Tests for LAX-backed Numpy implementation.""" @@ -1410,17 +1413,29 @@ def args_maker(): self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True) @parameterized.named_parameters(jtu.cases_from_list( - {"testcase_name": "_{}_axis={}".format( - jtu.format_shape_dtype_string(shape, dtype), axis), - "rng": rng, "shape": shape, "dtype": dtype, "axis": axis} - for shape in [(3,), (3, 4), (3, 4, 5)] - for axis in itertools.chain(range(len(shape)), [-1], [None]) + {"testcase_name": "_{}_ishape={}_axis={}".format( + jtu.format_shape_dtype_string(x_shape, dtype), i_shape, axis), + "rng": rng, "x_shape": x_shape, "i_shape": i_shape, "dtype": dtype, + "axis": axis} + for x_shape, i_shape in filter( + _shapes_are_equal_length, + filter(_shapes_are_broadcast_compatible, + CombosWithReplacement(nonempty_nonscalar_array_shapes, 2))) + for axis in itertools.chain(range(len(x_shape)), [-1], [None]) for dtype in default_dtypes for rng in [jtu.rand_default()])) - def testTakeAlongAxis(self, shape, dtype, axis, rng): + def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng): + i_shape = onp.array(i_shape) + if axis is None: + i_shape = [onp.prod(i_shape, dtype=onp.int64)] + else: + # Test the case where the size of the axis doesn't necessarily broadcast. + i_shape[axis] *= 3 + i_shape = list(i_shape) def args_maker(): - x = rng(shape, dtype) - i = onp.argsort(x, axis=axis) + x = rng(x_shape, dtype) + n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis] + i = rng(i_shape, onp.int32) % n return x, i lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)
take_along_axis inconsistent with numpy Stumbled upon this while migrating some code for selecting a batch of main eigenvectors to jax: ```python import numpy as onp import jax.numpy as jnp B, M, N = 2, 3, 5 v, m = onp.random.rand(B, N), onp.random.rand(B, M, N) def foo(np=None): i = np.argmax(v, axis=-1)[..., None, None] return np.take_along_axis(m, i, axis=-1) print(foo(np=onp).shape) # => (2, 3, 1) print(foo(np=jnp).shape) # => (2, 1, 1) ``` Jax returns something of a different shape. I also found that I could omit one of the dummy axes on `i`, which numpy does not accept (`m` and `i` must have the same ndim and compatible shapes) but jax does.
For the record, explicitly tiling `i` with `i = np.argmax(v, axis=-1)[..., None, None] * np.ones(m.shape, dtype=int)` works. Thanks for catching this!
2019-06-24T14:35:43
google/jax
914
google__jax-914
[ "891" ]
a8cf0cd36d3e741196ebea829cb98dd76dda7f20
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -2630,7 +2630,7 @@ def _dynamic_slice_jvp_rule(g, operand, start_indices, slice_sizes, def _dynamic_slice_transpose_rule(t, operand, start_indices, slice_sizes, operand_shape): assert operand is None - zeros = full(operand_shape, 0, dtype=_dtype(t)) + zeros = full(operand_shape, tie_in(t, _zero(t))) return [dynamic_update_slice(zeros, t, start_indices), ad_util.zero] def _dynamic_slice_batching_rule(batched_args, batch_dims, slice_sizes, @@ -2798,7 +2798,7 @@ def _gather_transpose_rule(t, operand, start_indices, dimension_numbers, assert operand is None if t is ad_util.zero: return [ad_util.zero, ad_util.zero] - zeros = full(operand_shape, 0, dtype=t.dtype) + zeros = full(operand_shape, tie_in(t, _zero(t))) scatter_dnums = ScatterDimensionNumbers( update_window_dims=dimension_numbers.offset_dims, inserted_window_dims=dimension_numbers.collapsed_slice_dims,
Large memory needed in gathering operations [Here][1] is a simplified example of my problem. While I can compute the gradients of a single input, I quickly run out of memory. Is there a way to make it more memory efficient? With a batch size of 6 I need 2157 MiB, but the gradients are of shapes (6, 50, 21) and (6, 50, 50, 21, 21), which are 25 MiB, but I am incurring in a 100x overhead. Am I doing something wrong? Can we help jax be smarter? Can I profile or inspect the graph to figure out which operations are taking the space? [1]: https://gist.github.com/Dapid/e980a26e063838d472f46db1b7df59bd
From a quick look, this looks very similar to #824. Can you try to use a single large gather (e.g., using advanced indexing) rather multiple lookups in a loop? (That said, I suspect we can improve the memory usage of your code as written, too.) The problem of #824 was dynamic slice. I don't think I can get rid of the loop, in the real case each iteration depends on `r`, but I will rethink it. Nice code! Under a `jit` I would expect the sparse gradients to be accumulated efficiency, but we should investigate if something is going wrong there. In Autograd we also accumulated sparse gradients that arose from indexing efficiently (without having `jit`) because Dougal added [a special SparseObject](https://github.com/HIPS/autograd/blob/541a70b8a754356fe52e2d6caa7f4aa94c18cf05/autograd/numpy/numpy_vjps.py#L700) that [the gradient-summing machinery knew how to deal with](https://github.com/HIPS/autograd/blob/541a70b8a754356fe52e2d6caa7f4aa94c18cf05/autograd/core.py#L158-L186). We could do that in JAX too if necessary, though I'd be interested to figure out why `jit` might not be doing the optimization here.
2019-06-24T17:46:16
google/jax
920
google__jax-920
[ "653" ]
6c3cf13f659ef4002a92e486c0c59c37a9a6cd02
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -1270,7 +1270,16 @@ def concatenate(arrays, axis=0): raise ValueError("Need at least one array to concatenate.") if ndim(arrays[0]) == 0: raise ValueError("Zero-dimensional arrays cannot be concatenated.") - return lax.concatenate(_promote_dtypes(*arrays), axis % ndim(arrays[0])) + axis = _canonicalize_axis(axis, ndim(arrays[0])) + arrays = _promote_dtypes(*arrays) + # lax.concatenate can be slow to compile for wide concatenations, so form a + # tree of concatenations as a workaround especially for op-by-op mode. + # (https://github.com/google/jax/issues/653). + k = 16 + while len(arrays) > 1: + arrays = [lax.concatenate(arrays[i:i+k], axis) + for i in range(0, len(arrays), k)] + return arrays[0] @_wraps(onp.vstack)
Excessively long initialization time of plain old list integers Initialization of a modestly sized array seems to take a ridiculous amount of time that can be bypassed by first going to numpy. This isn't a blocker or anything since I first cast to numpy then jax.numpy, but thought I'd file it anyways. ``` python import time import numpy as onp import jax.numpy as np alist = [0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 7, 0, 0, 8, 0, 0, 9, 0, 1, -9, 0, 1, -8, 0, 1, -7, 0, 1, -6, 0, 1, -5, 0, 1, -4, 0, 1, -3, 0, 1, -2, 0, 1, -1, 0, 1, 0, 0, 1, 1, 0, 1, 2, 0, 1, 3, 0, 1, 4, 0, 1, 5, 0, 1, 6, 0, 1, 7, 0, 1, 8, 0, 1, 9, 0, 2, -9, 0, 2, -8, 0, 2, -7, 0, 2, -6, 0, 2, -5, 0, 2, -4, 0, 2, -3, 0, 2, -2, 0, 2, -1, 0, 2, 0, 0, 2, 1, 0, 2, 2, 0, 2, 3, 0, 2, 4, 0, 2, 5, 0, 2, 6, 0, 2, 7, 0, 2, 8, 0, 2, 9, 0, 3, -9, 0, 3, -8, 0, 3, -7, 0, 3, -6, 0, 3, -5, 0, 3, -4, 0, 3, -3, 0, 3, -2, 0, 3, -1, 0, 3, 0, 0, 3, 1, 0, 3, 2, 0, 3, 3, 0, 3, 4, 0, 3, 5, 0, 3, 6, 0, 3, 7, 0, 3, 8, 0, 3, 9, 0, 4, -9, 0, 4, -8, 0, 4, -7, 0, 4, -6, 0, 4, -5, 0, 4, -4, 0, 4, -3, 0, 4, -2, 0, 4, -1, 0, 4, 0, 0, 4, 1, 0, 4, 2, 0, 4, 3, 0, 4, 4, 0, 4, 5, 0, 4, 6, 0, 4, 7, 0, 4, 8, 0, 4, 9, 0, 5, -9, 0, 5, -8, 0, 5, -7, 0, 5, -6, 0, 5, -5, 0, 5, -4, 0, 5, -3, 0, 5, -2, 0, 5, -1, 0, 5, 0, 0, 5, 1, 0, 5, 2, 0, 5, 3, 0, 5, 4, 0, 5, 5, 0, 5, 6, 0, 5, 7, 0, 5, 8, 0, 5, 9, 0, 6, -9, 0, 6, -8, 0, 6, -7, 0, 6, -6, 0, 6, -5, 0, 6, -4, 0, 6, -3, 0, 6, -2, 0, 6, -1, 0, 6, 0, 0, 6, 1, 0, 6, 2, 0, 6, 3, 0, 6, 4, 0, 6, 5, 0, 6, 6, 0, 6, 7, 0, 6, 8, 0, 6, 9, 0, 7, -9, 0, 7, -8, 0, 7, -7, 0, 7, -6, 0, 7, -5, 0, 7, -4, 0, 7, -3, 0, 7, -2, 0, 7, -1, 0, 7, 0, 0, 7, 1, 0, 7, 2, 0, 7, 3, 0, 7, 4, 0, 7, 5, 0, 7, 6, 0, 7, 7, 0, 7, 8, 0, 7, 9, 0, 8, -9, 0, 8, -8, 0, 8, -7, 0, 8, -6, 0, 8, -5, 0, 8, -4, 0, 8, -3, 0, 8, -2, 0, 8, -1, 0, 8, 0, 0, 8, 1, 0, 8, 2, 0, 8, 3, 0, 8, 4, 0, 8, 5, 0, 8, 6, 0, 8, 7, 0, 8, 8, 0, 8, 9, 0, 9, -9, 0, 9, -8, 0, 9, -7, 0, 9, -6, 0, 9, -5, 0, 9, -4, 0, 9, -3, 0, 9, -2, 0, 9, -1, 0, 9, 0, 0, 9, 1, 0, 9, 2, 0, 9, 3, 0, 9, 4, 0, 9, 5, 0, 9, 6, 0, 9, 7, 0, 9, 8, 0, 9, 9, 1, -9, -9, 1, -9, -8, 1, -9, -7, 1, -9, -6, 1, -9, -5, 1, -9, -4, 1, -9, -3, 1, -9, -2, 1, -9, -1, 1, -9, 0, 1, -9, 1, 1, -9, 2, 1, -9, 3, 1, -9, 4, 1, -9, 5, 1, -9, 6, 1, -9, 7, 1, -9, 8, 1, -9, 9, 1, -8, -9, 1, -8, -8, 1, -8, -7, 1, -8, -6, 1, -8, -5, 1, -8, -4, 1, -8, -3, 1, -8, -2, 1, -8, -1, 1, -8, 0, 1, -8, 1, 1, -8, 2, 1, -8, 3, 1, -8, 4, 1, -8, 5, 1, -8, 6, 1, -8, 7, 1, -8, 8, 1, -8, 9, 1, -7, -9, 1, -7, -8, 1, -7, -7, 1, -7, -6, 1, -7, -5, 1, -7, -4, 1, -7, -3, 1, -7, -2, 1, -7, -1, 1, -7, 0, 1, -7, 1, 1, -7, 2, 1, -7, 3, 1, -7, 4, 1, -7, 5, 1, -7, 6, 1, -7, 7, 1, -7, 8, 1, -7, 9, 1, -6, -9, 1, -6, -8, 1, -6, -7, 1, -6, -6, 1, -6, -5, 1, -6, -4, 1, -6, -3, 1, -6, -2, 1, -6, -1, 1, -6, 0, 1, -6, 1, 1, -6, 2, 1, -6, 3, 1, -6, 4, 1, -6, 5, 1, -6, 6, 1, -6, 7, 1, -6, 8, 1, -6, 9, 1, -5, -9, 1, -5, -8, 1, -5, -7, 1, -5, -6, 1, -5, -5, 1, -5, -4, 1, -5, -3, 1, -5, -2, 1, -5, -1, 1, -5, 0, 1, -5, 1, 1, -5, 2, 1, -5, 3, 1, -5, 4, 1, -5, 5, 1, -5, 6, 1, -5, 7, 1, -5, 8, 1, -5, 9, 1, -4, -9, 1, -4, -8, 1, -4, -7, 1, -4, -6, 1, -4, -5, 1, -4, -4, 1, -4, -3, 1, -4, -2, 1, -4, -1, 1, -4, 0, 1, -4, 1, 1, -4, 2, 1, -4, 3, 1, -4, 4, 1, -4, 5, 1, -4, 6, 1, -4, 7, 1, -4, 8, 1, -4, 9, 1, -3, -9, 1, -3, -8, 1, -3, -7, 1, -3, -6, 1, -3, -5, 1, -3, -4, 1, -3, -3, 1, -3, -2, 1, -3, -1, 1, -3, 0, 1, -3, 1, 1, -3, 2, 1, -3, 3, 1, -3, 4, 1, -3, 5, 1, -3, 6, 1, -3, 7, 1, -3, 8, 1, -3, 9, 1, -2, -9, 1, -2, -8, 1, -2, -7, 1, -2, -6, 1, -2, -5, 1, -2, -4, 1, -2, -3, 1, -2, -2, 1, -2, -1, 1, -2, 0, 1, -2, 1, 1, -2, 2, 1, -2, 3, 1, -2, 4, 1, -2, 5, 1, -2, 6, 1, -2, 7, 1, -2, 8, 1, -2, 9, 1, -1, -9, 1, -1, -8, 1, -1, -7, 1, -1, -6, 1, -1, -5, 1, -1, -4, 1, -1, -3, 1, -1, -2, 1, -1, -1, 1, -1, 0, 1, -1, 1, 1, -1, 2, 1, -1, 3, 1, -1, 4, 1, -1, 5, 1, -1, 6, 1, -1, 7, 1, -1, 8, 1, -1, 9, 1, 0, -9, 1, 0, -8, 1, 0, -7, 1, 0, -6, 1, 0, -5, 1, 0, -4, 1, 0, -3, 1, 0, -2, 1, 0, -1, 1, 0, 0, 1, 0, 1, 1, 0, 2, 1, 0, 3, 1, 0, 4, 1, 0, 5, 1, 0, 6, 1, 0, 7, 1, 0, 8, 1, 0, 9, 1, 1, -9, 1, 1, -8, 1, 1, -7, 1, 1, -6, 1, 1, -5, 1, 1, -4, 1, 1, -3, 1, 1, -2, 1, 1, -1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1, 5, 1, 1, 6, 1, 1, 7, 1, 1, 8, 1, 1, 9, 1, 2, -9, 1, 2, -8, 1, 2, -7, 1, 2, -6, 1, 2, -5, 1, 2, -4, 1, 2, -3, 1, 2, -2, 1, 2, -1, 1, 2, 0, 1, 2, 1, 1, 2, 2, 1, 2, 3, 1, 2, 4, 1, 2, 5, 1, 2, 6, 1, 2, 7, 1, 2, 8, 1, 2, 9, 1, 3, -9, 1, 3, -8, 1, 3, -7, 1, 3, -6, 1, 3, -5, 1, 3, -4, 1, 3, -3, 1, 3, -2, 1, 3, -1, 1, 3, 0, 1, 3, 1, 1, 3, 2, 1, 3, 3, 1, 3, 4, 1, 3, 5, 1, 3, 6, 1, 3, 7, 1, 3, 8, 1, 3, 9, 1, 4, -9, 1, 4, -8, 1, 4, -7, 1, 4, -6, 1, 4, -5, 1, 4, -4, 1, 4, -3, 1, 4, -2, 1, 4, -1, 1, 4, 0, 1, 4, 1, 1, 4, 2, 1, 4, 3, 1, 4, 4, 1, 4, 5, 1, 4, 6, 1, 4, 7, 1, 4, 8, 1, 4, 9, 1, 5, -9, 1, 5, -8, 1, 5, -7, 1, 5, -6, 1, 5, -5, 1, 5, -4, 1, 5, -3, 1, 5, -2, 1, 5, -1, 1, 5, 0, 1, 5, 1, 1, 5, 2, 1, 5, 3, 1, 5, 4, 1, 5, 5, 1, 5, 6, 1, 5, 7, 1, 5, 8, 1, 5, 9, 1, 6, -9, 1, 6, -8, 1, 6, -7, 1, 6, -6, 1, 6, -5, 1, 6, -4, 1, 6, -3, 1, 6, -2, 1, 6, -1, 1, 6, 0, 1, 6, 1, 1, 6, 2, 1, 6, 3, 1, 6, 4, 1, 6, 5, 1, 6, 6, 1, 6, 7, 1, 6, 8, 1, 6, 9, 1, 7, -9, 1, 7, -8, 1, 7, -7, 1, 7, -6, 1, 7, -5, 1, 7, -4, 1, 7, -3, 1, 7, -2, 1, 7, -1, 1, 7, 0, 1, 7, 1, 1, 7, 2, 1, 7, 3, 1, 7, 4, 1, 7, 5, 1, 7, 6, 1, 7, 7, 1, 7, 8, 1, 7, 9, 1, 8, -9, 1, 8, -8, 1, 8, -7, 1, 8, -6, 1, 8, -5, 1, 8, -4, 1, 8, -3, 1, 8, -2, 1, 8, -1, 1, 8, 0, 1, 8, 1, 1, 8, 2, 1, 8, 3, 1, 8, 4, 1, 8, 5, 1, 8, 6, 1, 8, 7, 1, 8, 8, 1, 8, 9, 1, 9, -9, 1, 9, -8, 1, 9, -7, 1, 9, -6, 1, 9, -5, 1, 9, -4, 1, 9, -3, 1, 9, -2, 1, 9, -1, 1, 9, 0, 1, 9, 1, 1, 9, 2, 1, 9, 3, 1, 9, 4, 1, 9, 5, 1, 9, 6, 1, 9, 7, 1, 9, 8, 1, 9, 9, 2, -9, -9, 2, -9, -8, 2, -9, -7, 2, -9, -6, 2, -9, -5, 2, -9, -4, 2, -9, -3, 2, -9, -2, 2, -9, -1, 2, -9, 0, 2, -9, 1, 2, -9, 2, 2, -9, 3, 2, -9, 4, 2, -9, 5, 2, -9, 6, 2, -9, 7, 2, -9, 8, 2, -9, 9, 2, -8, -9, 2, -8, -8, 2, -8, -7, 2, -8, -6, 2, -8, -5, 2, -8, -4, 2, -8, -3, 2, -8, -2, 2, -8, -1, 2, -8, 0, 2, -8, 1, 2, -8, 2, 2, -8, 3, 2, -8, 4, 2, -8, 5, 2, -8, 6, 2, -8, 7, 2, -8, 8, 2, -8, 9, 2, -7, -9, 2, -7, -8, 2, -7, -7, 2, -7, -6, 2, -7, -5, 2, -7, -4, 2, -7, -3, 2, -7, -2, 2, -7, -1, 2, -7, 0, 2, -7, 1, 2, -7, 2, 2, -7, 3, 2, -7, 4, 2, -7, 5, 2, -7, 6, 2, -7, 7, 2, -7, 8, 2, -7, 9, 2, -6, -9, 2, -6, -8, 2, -6, -7, 2, -6, -6, 2, -6, -5, 2, -6, -4, 2, -6, -3, 2, -6, -2, 2, -6, -1, 2, -6, 0, 2, -6, 1, 2, -6, 2, 2, -6, 3, 2, -6, 4, 2, -6, 5, 2, -6, 6, 2, -6, 7, 2, -6, 8, 2, -6, 9, 2, -5, -9, 2, -5, -8, 2, -5, -7, 2, -5, -6, 2, -5, -5, 2, -5, -4, 2, -5, -3, 2, -5, -2, 2, -5, -1, 2, -5, 0, 2, -5, 1, 2, -5, 2, 2, -5, 3, 2, -5, 4, 2, -5, 5, 2, -5, 6, 2, -5, 7, 2, -5, 8, 2, -5, 9, 2, -4, -9, 2, -4, -8, 2, -4, -7, 2, -4, -6, 2, -4, -5, 2, -4, -4, 2, -4, -3, 2, -4, -2, 2, -4, -1, 2, -4, 0, 2, -4, 1, 2, -4, 2, 2, -4, 3, 2, -4, 4, 2, -4, 5, 2, -4, 6, 2, -4, 7, 2, -4, 8, 2, -4, 9, 2, -3, -9, 2, -3, -8, 2, -3, -7, 2, -3, -6, 2, -3, -5, 2, -3, -4, 2, -3, -3, 2, -3, -2, 2, -3, -1, 2, -3, 0, 2, -3, 1, 2, -3, 2, 2, -3, 3, 2, -3, 4, 2, -3, 5, 2, -3, 6, 2, -3, 7, 2, -3, 8, 2, -3, 9, 2, -2, -9, 2, -2, -8, 2, -2, -7, 2, -2, -6, 2, -2, -5, 2, -2, -4, 2, -2, -3, 2, -2, -2, 2, -2, -1, 2, -2, 0, 2, -2, 1, 2, -2, 2, 2, -2, 3, 2, -2, 4, 2, -2, 5, 2, -2, 6, 2, -2, 7, 2, -2, 8, 2, -2, 9, 2, -1, -9, 2, -1, -8, 2, -1, -7, 2, -1, -6, 2, -1, -5, 2, -1, -4, 2, -1, -3, 2, -1, -2, 2, -1, -1, 2, -1, 0, 2, -1, 1, 2, -1, 2, 2, -1, 3, 2, -1, 4, 2, -1, 5, 2, -1, 6, 2, -1, 7, 2, -1, 8, 2, -1, 9, 2, 0, -9, 2, 0, -8, 2, 0, -7, 2, 0, -6, 2, 0, -5, 2, 0, -4, 2, 0, -3, 2, 0, -2, 2, 0, -1, 2, 0, 0, 2, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0, 4, 2, 0, 5, 2, 0, 6, 2, 0, 7, 2, 0, 8, 2, 0, 9, 2, 1, -9, 2, 1, -8, 2, 1, -7, 2, 1, -6, 2, 1, -5, 2, 1, -4, 2, 1, -3, 2, 1, -2, 2, 1, -1, 2, 1, 0, 2, 1, 1, 2, 1, 2, 2, 1, 3, 2, 1, 4, 2, 1, 5, 2, 1, 6, 2, 1, 7, 2, 1, 8, 2, 1, 9, 2, 2, -9, 2, 2, -8, 2, 2, -7, 2, 2, -6, 2, 2, -5, 2, 2, -4, 2, 2, -3, 2, 2, -2, 2, 2, -1, 2, 2, 0, 2, 2, 1, 2, 2, 2, 2, 2, 3, 2, 2, 4, 2, 2, 5, 2, 2, 6, 2, 2, 7, 2, 2, 8, 2, 2, 9, 2, 3, -9, 2, 3, -8, 2, 3, -7, 2, 3, -6, 2, 3, -5, 2, 3, -4, 2, 3, -3, 2, 3, -2, 2, 3, -1, 2, 3, 0, 2, 3, 1, 2, 3, 2, 2, 3, 3, 2, 3, 4, 2, 3, 5, 2, 3, 6, 2, 3, 7, 2, 3, 8, 2, 3, 9, 2, 4, -9, 2, 4, -8, 2, 4, -7, 2, 4, -6, 2, 4, -5, 2, 4, -4, 2, 4, -3, 2, 4, -2, 2, 4, -1, 2, 4, 0, 2, 4, 1, 2, 4, 2, 2, 4, 3, 2, 4, 4, 2, 4, 5, 2, 4, 6, 2, 4, 7, 2, 4, 8, 2, 4, 9, 2, 5, -9, 2, 5, -8, 2, 5, -7, 2, 5, -6, 2, 5, -5, 2, 5, -4, 2, 5, -3, 2, 5, -2, 2, 5, -1, 2, 5, 0, 2, 5, 1, 2, 5, 2, 2, 5, 3, 2, 5, 4, 2, 5, 5, 2, 5, 6, 2, 5, 7, 2, 5, 8, 2, 5, 9, 2, 6, -9, 2, 6, -8, 2, 6, -7, 2, 6, -6, 2, 6, -5, 2, 6, -4, 2, 6, -3, 2, 6, -2, 2, 6, -1, 2, 6, 0, 2, 6, 1, 2, 6, 2, 2, 6, 3, 2, 6, 4, 2, 6, 5, 2, 6, 6, 2, 6, 7, 2, 6, 8, 2, 6, 9, 2, 7, -9, 2, 7, -8, 2, 7, -7, 2, 7, -6, 2, 7, -5, 2, 7, -4, 2, 7, -3, 2, 7, -2, 2, 7, -1, 2, 7, 0, 2, 7, 1, 2, 7, 2, 2, 7, 3, 2, 7, 4, 2, 7, 5, 2, 7, 6, 2, 7, 7, 2, 7, 8, 2, 7, 9, 2, 8, -9, 2, 8, -8, 2, 8, -7, 2, 8, -6, 2, 8, -5, 2, 8, -4, 2, 8, -3, 2, 8, -2, 2, 8, -1, 2, 8, 0, 2, 8, 1, 2, 8, 2, 2, 8, 3, 2, 8, 4, 2, 8, 5, 2, 8, 6, 2, 8, 7, 2, 8, 8, 2, 8, 9, 2, 9, -9, 2, 9, -8, 2, 9, -7, 2, 9, -6, 2, 9, -5, 2, 9, -4, 2, 9, -3, 2, 9, -2, 2, 9, -1, 2, 9, 0, 2, 9, 1, 2, 9, 2, 2, 9, 3, 2, 9, 4, 2, 9, 5, 2, 9, 6, 2, 9, 7, 2, 9, 8, 2, 9, 9, 3, -9, -9, 3, -9, -8, 3, -9, -7, 3, -9, -6, 3, -9, -5, 3, -9, -4, 3, -9, -3, 3, -9, -2, 3, -9, -1, 3, -9, 0, 3, -9, 1, 3, -9, 2, 3, -9, 3, 3, -9, 4, 3, -9, 5, 3, -9, 6, 3, -9, 7, 3, -9, 8, 3, -9, 9, 3, -8, -9, 3, -8, -8, 3, -8, -7, 3, -8, -6, 3, -8, -5, 3, -8, -4, 3, -8, -3, 3, -8, -2, 3, -8, -1, 3, -8, 0, 3, -8, 1, 3, -8, 2, 3, -8, 3, 3, -8, 4, 3, -8, 5, 3, -8, 6, 3, -8, 7, 3, -8, 8, 3, -8, 9, 3, -7, -9, 3, -7, -8, 3, -7, -7, 3, -7, -6, 3, -7, -5, 3, -7, -4, 3, -7, -3, 3, -7, -2, 3, -7, -1, 3, -7, 0, 3, -7, 1, 3, -7, 2, 3, -7, 3, 3, -7, 4, 3, -7, 5, 3, -7, 6, 3, -7, 7, 3, -7, 8, 3, -7, 9, 3, -6, -9, 3, -6, -8, 3, -6, -7, 3, -6, -6, 3, -6, -5, 3, -6, -4, 3, -6, -3, 3, -6, -2, 3, -6, -1, 3, -6, 0, 3, -6, 1, 3, -6, 2, 3, -6, 3, 3, -6, 4, 3, -6, 5, 3, -6, 6, 3, -6, 7, 3, -6, 8, 3, -6, 9, 3, -5, -9, 3, -5, -8, 3, -5, -7, 3, -5, -6, 3, -5, -5, 3, -5, -4, 3, -5, -3, 3, -5, -2, 3, -5, -1, 3, -5, 0, 3, -5, 1, 3, -5, 2, 3, -5, 3, 3, -5, 4, 3, -5, 5, 3, -5, 6, 3, -5, 7, 3, -5, 8, 3, -5, 9, 3, -4, -9, 3, -4, -8, 3, -4, -7, 3, -4, -6, 3, -4, -5, 3, -4, -4, 3, -4, -3, 3, -4, -2, 3, -4, -1, 3, -4, 0, 3, -4, 1, 3, -4, 2, 3, -4, 3, 3, -4, 4, 3, -4, 5, 3, -4, 6, 3, -4, 7, 3, -4, 8, 3, -4, 9, 3, -3, -9, 3, -3, -8, 3, -3, -7, 3, -3, -6, 3, -3, -5, 3, -3, -4, 3, -3, -3, 3, -3, -2, 3, -3, -1, 3, -3, 0, 3, -3, 1, 3, -3, 2, 3, -3, 3, 3, -3, 4, 3, -3, 5, 3, -3, 6, 3, -3, 7, 3, -3, 8, 3, -3, 9, 3, -2, -9, 3, -2, -8, 3, -2, -7, 3, -2, -6, 3, -2, -5, 3, -2, -4, 3, -2, -3, 3, -2, -2, 3, -2, -1, 3, -2, 0, 3, -2, 1, 3, -2, 2, 3, -2, 3, 3, -2, 4, 3, -2, 5, 3, -2, 6, 3, -2, 7, 3, -2, 8, 3, -2, 9, 3, -1, -9, 3, -1, -8, 3, -1, -7, 3, -1, -6, 3, -1, -5, 3, -1, -4, 3, -1, -3, 3, -1, -2, 3, -1, -1, 3, -1, 0, 3, -1, 1, 3, -1, 2, 3, -1, 3, 3, -1, 4, 3, -1, 5, 3, -1, 6, 3, -1, 7, 3, -1, 8, 3, -1, 9, 3, 0, -9, 3, 0, -8, 3, 0, -7, 3, 0, -6, 3, 0, -5, 3, 0, -4, 3, 0, -3, 3, 0, -2, 3, 0, -1, 3, 0, 0, 3, 0, 1, 3, 0, 2, 3, 0, 3, 3, 0, 4, 3, 0, 5, 3, 0, 6, 3, 0, 7, 3, 0, 8, 3, 0, 9, 3, 1, -9, 3, 1, -8, 3, 1, -7, 3, 1, -6, 3, 1, -5, 3, 1, -4, 3, 1, -3, 3, 1, -2, 3, 1, -1, 3, 1, 0, 3, 1, 1, 3, 1, 2, 3, 1, 3, 3, 1, 4, 3, 1, 5, 3, 1, 6, 3, 1, 7, 3, 1, 8, 3, 1, 9, 3, 2, -9, 3, 2, -8, 3, 2, -7, 3, 2, -6, 3, 2, -5, 3, 2, -4, 3, 2, -3, 3, 2, -2, 3, 2, -1, 3, 2, 0, 3, 2, 1, 3, 2, 2, 3, 2, 3, 3, 2, 4, 3, 2, 5, 3, 2, 6, 3, 2, 7, 3, 2, 8, 3, 2, 9, 3, 3, -9, 3, 3, -8, 3, 3, -7, 3, 3, -6, 3, 3, -5, 3, 3, -4, 3, 3, -3, 3, 3, -2, 3, 3, -1, 3, 3, 0, 3, 3, 1, 3, 3, 2, 3, 3, 3, 3, 3, 4, 3, 3, 5, 3, 3, 6, 3, 3, 7, 3, 3, 8, 3, 3, 9, 3, 4, -9, 3, 4, -8, 3, 4, -7, 3, 4, -6, 3, 4, -5, 3, 4, -4, 3, 4, -3, 3, 4, -2, 3, 4, -1, 3, 4, 0, 3, 4, 1, 3, 4, 2, 3, 4, 3, 3, 4, 4, 3, 4, 5, 3, 4, 6, 3, 4, 7, 3, 4, 8, 3, 4, 9, 3, 5, -9, 3, 5, -8, 3, 5, -7, 3, 5, -6, 3, 5, -5, 3, 5, -4, 3, 5, -3, 3, 5, -2, 3, 5, -1, 3, 5, 0, 3, 5, 1, 3, 5, 2, 3, 5, 3, 3, 5, 4, 3, 5, 5, 3, 5, 6, 3, 5, 7, 3, 5, 8, 3, 5, 9, 3, 6, -9, 3, 6, -8, 3, 6, -7, 3, 6, -6, 3, 6, -5, 3, 6, -4, 3, 6, -3, 3, 6, -2, 3, 6, -1, 3, 6, 0, 3, 6, 1, 3, 6, 2, 3, 6, 3, 3, 6, 4, 3, 6, 5, 3, 6, 6, 3, 6, 7, 3, 6, 8, 3, 6, 9, 3, 7, -9, 3, 7, -8, 3, 7, -7, 3, 7, -6, 3, 7, -5, 3, 7, -4, 3, 7, -3, 3, 7, -2, 3, 7, -1, 3, 7, 0, 3, 7, 1, 3, 7, 2, 3, 7, 3, 3, 7, 4, 3, 7, 5, 3, 7, 6, 3, 7, 7, 3, 7, 8, 3, 7, 9, 3, 8, -9, 3, 8, -8, 3, 8, -7, 3, 8, -6, 3, 8, -5, 3, 8, -4, 3, 8, -3, 3, 8, -2, 3, 8, -1, 3, 8, 0, 3, 8, 1, 3, 8, 2, 3, 8, 3, 3, 8, 4, 3, 8, 5, 3, 8, 6, 3, 8, 7, 3, 8, 8, 3, 8, 9, 3, 9, -9, 3, 9, -8, 3, 9, -7, 3, 9, -6, 3, 9, -5, 3, 9, -4, 3, 9, -3, 3, 9, -2, 3, 9, -1, 3, 9, 0, 3, 9, 1, 3, 9, 2, 3, 9, 3, 3, 9, 4, 3, 9, 5, 3, 9, 6, 3, 9, 7, 3, 9, 8, 3, 9, 9, 4, -9, -9, 4, -9, -8, 4, -9, -7, 4, -9, -6, 4, -9, -5, 4, -9, -4, 4, -9, -3, 4, -9, -2, 4, -9, -1, 4, -9, 0, 4, -9, 1, 4, -9, 2, 4, -9, 3, 4, -9, 4, 4, -9, 5, 4, -9, 6, 4, -9, 7, 4, -9, 8, 4, -9, 9, 4, -8, -9, 4, -8, -8, 4, -8, -7, 4, -8, -6, 4, -8, -5, 4, -8, -4, 4, -8, -3, 4, -8, -2, 4, -8, -1, 4, -8, 0, 4, -8, 1, 4, -8, 2, 4, -8, 3, 4, -8, 4, 4, -8, 5, 4, -8, 6, 4, -8, 7, 4, -8, 8, 4, -8, 9, 4, -7, -9, 4, -7, -8, 4, -7, -7, 4, -7, -6, 4, -7, -5, 4, -7, -4, 4, -7, -3, 4, -7, -2, 4, -7, -1, 4, -7, 0, 4, -7, 1, 4, -7, 2, 4, -7, 3, 4, -7, 4, 4, -7, 5, 4, -7, 6, 4, -7, 7, 4, -7, 8, 4, -7, 9, 4, -6, -9, 4, -6, -8, 4, -6, -7, 4, -6, -6, 4, -6, -5, 4, -6, -4, 4, -6, -3, 4, -6, -2, 4, -6, -1, 4, -6, 0, 4, -6, 1, 4, -6, 2, 4, -6, 3, 4, -6, 4, 4, -6, 5, 4, -6, 6, 4, -6, 7, 4, -6, 8, 4, -6, 9, 4, -5, -9, 4, -5, -8, 4, -5, -7, 4, -5, -6, 4, -5, -5, 4, -5, -4, 4, -5, -3, 4, -5, -2, 4, -5, -1, 4, -5, 0, 4, -5, 1, 4, -5, 2, 4, -5, 3, 4, -5, 4, 4, -5, 5, 4, -5, 6, 4, -5, 7, 4, -5, 8, 4, -5, 9, 4, -4, -9, 4, -4, -8, 4, -4, -7, 4, -4, -6, 4, -4, -5, 4, -4, -4, 4, -4, -3, 4, -4, -2, 4, -4, -1, 4, -4, 0, 4, -4, 1, 4, -4, 2, 4, -4, 3, 4, -4, 4, 4, -4, 5, 4, -4, 6, 4, -4, 7, 4, -4, 8, 4, -4, 9, 4, -3, -9, 4, -3, -8, 4, -3, -7, 4, -3, -6, 4, -3, -5, 4, -3, -4, 4, -3, -3, 4, -3, -2, 4, -3, -1, 4, -3, 0, 4, -3, 1, 4, -3, 2, 4, -3, 3, 4, -3, 4, 4, -3, 5, 4, -3, 6, 4, -3, 7, 4, -3, 8, 4, -3, 9, 4, -2, -9, 4, -2, -8, 4, -2, -7, 4, -2, -6, 4, -2, -5, 4, -2, -4, 4, -2, -3, 4, -2, -2, 4, -2, -1, 4, -2, 0, 4, -2, 1, 4, -2, 2, 4, -2, 3, 4, -2, 4, 4, -2, 5, 4, -2, 6, 4, -2, 7, 4, -2, 8, 4, -2, 9, 4, -1, -9, 4, -1, -8, 4, -1, -7, 4, -1, -6, 4, -1, -5, 4, -1, -4, 4, -1, -3, 4, -1, -2, 4, -1, -1, 4, -1, 0, 4, -1, 1, 4, -1, 2, 4, -1, 3, 4, -1, 4, 4, -1, 5, 4, -1, 6, 4, -1, 7, 4, -1, 8, 4, -1, 9, 4, 0, -9, 4, 0, -8, 4, 0, -7, 4, 0, -6, 4, 0, -5, 4, 0, -4, 4, 0, -3, 4, 0, -2, 4, 0, -1, 4, 0, 0, 4, 0, 1, 4, 0, 2, 4, 0, 3, 4, 0, 4, 4, 0, 5, 4, 0, 6, 4, 0, 7, 4, 0, 8, 4, 0, 9, 4, 1, -9, 4, 1, -8, 4, 1, -7, 4, 1, -6, 4, 1, -5, 4, 1, -4, 4, 1, -3, 4, 1, -2, 4, 1, -1, 4, 1, 0, 4, 1, 1, 4, 1, 2, 4, 1, 3, 4, 1, 4, 4, 1, 5, 4, 1, 6, 4, 1, 7, 4, 1, 8, 4, 1, 9, 4, 2, -9, 4, 2, -8, 4, 2, -7, 4, 2, -6, 4, 2, -5, 4, 2, -4, 4, 2, -3, 4, 2, -2, 4, 2, -1, 4, 2, 0, 4, 2, 1, 4, 2, 2, 4, 2, 3, 4, 2, 4, 4, 2, 5, 4, 2, 6, 4, 2, 7, 4, 2, 8, 4, 2, 9, 4, 3, -9, 4, 3, -8, 4, 3, -7, 4, 3, -6, 4, 3, -5, 4, 3, -4, 4, 3, -3, 4, 3, -2, 4, 3, -1, 4, 3, 0, 4, 3, 1, 4, 3, 2, 4, 3, 3, 4, 3, 4, 4, 3, 5, 4, 3, 6, 4, 3, 7, 4, 3, 8, 4, 3, 9, 4, 4, -9, 4, 4, -8, 4, 4, -7, 4, 4, -6, 4, 4, -5, 4, 4, -4, 4, 4, -3, 4, 4, -2, 4, 4, -1, 4, 4, 0, 4, 4, 1, 4, 4, 2, 4, 4, 3, 4, 4, 4, 4, 4, 5, 4, 4, 6, 4, 4, 7, 4, 4, 8, 4, 4, 9, 4, 5, -9, 4, 5, -8, 4, 5, -7, 4, 5, -6, 4, 5, -5, 4, 5, -4, 4, 5, -3, 4, 5, -2, 4, 5, -1, 4, 5, 0, 4, 5, 1, 4, 5, 2, 4, 5, 3, 4, 5, 4, 4, 5, 5, 4, 5, 6, 4, 5, 7, 4, 5, 8, 4, 5, 9, 4, 6, -9, 4, 6, -8, 4, 6, -7, 4, 6, -6, 4, 6, -5, 4, 6, -4, 4, 6, -3, 4, 6, -2, 4, 6, -1, 4, 6, 0, 4, 6, 1, 4, 6, 2, 4, 6, 3, 4, 6, 4, 4, 6, 5, 4, 6, 6, 4, 6, 7, 4, 6, 8, 4, 6, 9, 4, 7, -9, 4, 7, -8, 4, 7, -7, 4, 7, -6, 4, 7, -5, 4, 7, -4, 4, 7, -3, 4, 7, -2, 4, 7, -1, 4, 7, 0, 4, 7, 1, 4, 7, 2, 4, 7, 3, 4, 7, 4, 4, 7, 5, 4, 7, 6, 4, 7, 7, 4, 7, 8, 4, 7, 9, 4, 8, -9, 4, 8, -8, 4, 8, -7, 4, 8, -6, 4, 8, -5, 4, 8, -4, 4, 8, -3, 4, 8, -2, 4, 8, -1, 4, 8, 0, 4, 8, 1, 4, 8, 2, 4, 8, 3, 4, 8, 4, 4, 8, 5, 4, 8, 6, 4, 8, 7, 4, 8, 8, 4, 8, 9, 4, 9, -9, 4, 9, -8, 4, 9, -7, 4, 9, -6, 4, 9, -5, 4, 9, -4, 4, 9, -3, 4, 9, -2, 4, 9, -1, 4, 9, 0, 4, 9, 1, 4, 9, 2, 4, 9, 3, 4, 9, 4, 4, 9, 5, 4, 9, 6, 4, 9, 7, 4, 9, 8, 4, 9, 9, 5, -9, -9, 5, -9, -8, 5, -9, -7, 5, -9, -6, 5, -9, -5, 5, -9, -4, 5, -9, -3, 5, -9, -2, 5, -9, -1, 5, -9, 0, 5, -9, 1, 5, -9, 2, 5, -9, 3, 5, -9, 4, 5, -9, 5, 5, -9, 6, 5, -9, 7, 5, -9, 8, 5, -9, 9, 5, -8, -9, 5, -8, -8, 5, -8, -7, 5, -8, -6, 5, -8, -5, 5, -8, -4, 5, -8, -3, 5, -8, -2, 5, -8, -1, 5, -8, 0, 5, -8, 1, 5, -8, 2, 5, -8, 3, 5, -8, 4, 5, -8, 5, 5, -8, 6, 5, -8, 7, 5, -8, 8, 5, -8, 9, 5, -7, -9, 5, -7, -8, 5, -7, -7, 5, -7, -6, 5, -7, -5, 5, -7, -4, 5, -7, -3, 5, -7, -2, 5, -7, -1, 5, -7, 0, 5, -7, 1, 5, -7, 2, 5, -7, 3, 5, -7, 4, 5, -7, 5, 5, -7, 6, 5, -7, 7, 5, -7, 8, 5, -7, 9, 5, -6, -9, 5, -6, -8, 5, -6, -7, 5, -6, -6, 5, -6, -5, 5, -6, -4, 5, -6, -3, 5, -6, -2, 5, -6, -1, 5, -6, 0, 5, -6, 1, 5, -6, 2, 5, -6, 3, 5, -6, 4, 5, -6, 5, 5, -6, 6, 5, -6, 7, 5, -6, 8, 5, -6, 9, 5, -5, -9, 5, -5, -8, 5, -5, -7, 5, -5, -6, 5, -5, -5, 5, -5, -4, 5, -5, -3, 5, -5, -2, 5, -5, -1, 5, -5, 0, 5, -5, 1, 5, -5, 2, 5, -5, 3, 5, -5, 4, 5, -5, 5, 5, -5, 6, 5, -5, 7, 5, -5, 8, 5, -5, 9, 5, -4, -9, 5, -4, -8, 5, -4, -7, 5, -4, -6, 5, -4, -5, 5, -4, -4, 5, -4, -3, 5, -4, -2, 5, -4, -1, 5, -4, 0, 5, -4, 1, 5, -4, 2, 5, -4, 3, 5, -4, 4, 5, -4, 5, 5, -4, 6, 5, -4, 7, 5, -4, 8, 5, -4, 9, 5, -3, -9, 5, -3, -8, 5, -3, -7, 5, -3, -6, 5, -3, -5, 5, -3, -4, 5, -3, -3, 5, -3, -2, 5, -3, -1, 5, -3, 0, 5, -3, 1, 5, -3, 2, 5, -3, 3, 5, -3, 4, 5, -3, 5, 5, -3, 6, 5, -3, 7, 5, -3, 8, 5, -3, 9, 5, -2, -9, 5, -2, -8, 5, -2, -7, 5, -2, -6, 5, -2, -5, 5, -2, -4, 5, -2, -3, 5, -2, -2, 5, -2, -1, 5, -2, 0, 5, -2, 1, 5, -2, 2, 5, -2, 3, 5, -2, 4, 5, -2, 5, 5, -2, 6, 5, -2, 7, 5, -2, 8, 5, -2, 9, 5, -1, -9, 5, -1, -8, 5, -1, -7, 5, -1, -6, 5, -1, -5, 5, -1, -4, 5, -1, -3, 5, -1, -2, 5, -1, -1, 5, -1, 0, 5, -1, 1, 5, -1, 2, 5, -1, 3, 5, -1, 4, 5, -1, 5, 5, -1, 6, 5, -1, 7, 5, -1, 8, 5, -1, 9, 5, 0, -9, 5, 0, -8, 5, 0, -7, 5, 0, -6, 5, 0, -5, 5, 0, -4, 5, 0, -3, 5, 0, -2, 5, 0, -1, 5, 0, 0, 5, 0, 1, 5, 0, 2, 5, 0, 3, 5, 0, 4, 5, 0, 5, 5, 0, 6, 5, 0, 7, 5, 0, 8, 5, 0, 9, 5, 1, -9, 5, 1, -8, 5, 1, -7, 5, 1, -6, 5, 1, -5, 5, 1, -4, 5, 1, -3, 5, 1, -2, 5, 1, -1, 5, 1, 0, 5, 1, 1, 5, 1, 2, 5, 1, 3, 5, 1, 4, 5, 1, 5, 5, 1, 6, 5, 1, 7, 5, 1, 8, 5, 1, 9, 5, 2, -9, 5, 2, -8, 5, 2, -7, 5, 2, -6, 5, 2, -5, 5, 2, -4, 5, 2, -3, 5, 2, -2, 5, 2, -1, 5, 2, 0, 5, 2, 1, 5, 2, 2, 5, 2, 3, 5, 2, 4, 5, 2, 5, 5, 2, 6, 5, 2, 7, 5, 2, 8, 5, 2, 9, 5, 3, -9, 5, 3, -8, 5, 3, -7, 5, 3, -6, 5, 3, -5, 5, 3, -4, 5, 3, -3, 5, 3, -2, 5, 3, -1, 5, 3, 0, 5, 3, 1, 5, 3, 2, 5, 3, 3, 5, 3, 4, 5, 3, 5, 5, 3, 6, 5, 3, 7, 5, 3, 8, 5, 3, 9, 5, 4, -9, 5, 4, -8, 5, 4, -7, 5, 4, -6, 5, 4, -5, 5, 4, -4, 5, 4, -3, 5, 4, -2, 5, 4, -1, 5, 4, 0, 5, 4, 1, 5, 4, 2, 5, 4, 3, 5, 4, 4, 5, 4, 5, 5, 4, 6, 5, 4, 7, 5, 4, 8, 5, 4, 9, 5, 5, -9, 5, 5, -8, 5, 5, -7, 5, 5, -6, 5, 5, -5, 5, 5, -4, 5, 5, -3, 5, 5, -2, 5, 5, -1, 5, 5, 0, 5, 5, 1, 5, 5, 2, 5, 5, 3, 5, 5, 4, 5, 5, 5, 5, 5, 6, 5, 5, 7, 5, 5, 8, 5, 5, 9, 5, 6, -9, 5, 6, -8, 5, 6, -7, 5, 6, -6, 5, 6, -5, 5, 6, -4, 5, 6, -3, 5, 6, -2, 5, 6, -1, 5, 6, 0, 5, 6, 1, 5, 6, 2, 5, 6, 3, 5, 6, 4, 5, 6, 5, 5, 6, 6, 5, 6, 7, 5, 6, 8, 5, 6, 9, 5, 7, -9, 5, 7, -8, 5, 7, -7, 5, 7, -6, 5, 7, -5, 5, 7, -4, 5, 7, -3, 5, 7, -2, 5, 7, -1, 5, 7, 0, 5, 7, 1, 5, 7, 2, 5, 7, 3, 5, 7, 4, 5, 7, 5, 5, 7, 6, 5, 7, 7, 5, 7, 8, 5, 7, 9, 5, 8, -9, 5, 8, -8, 5, 8, -7, 5, 8, -6, 5, 8, -5, 5, 8, -4, 5, 8, -3, 5, 8, -2, 5, 8, -1, 5, 8, 0, 5, 8, 1, 5, 8, 2, 5, 8, 3, 5, 8, 4, 5, 8, 5, 5, 8, 6, 5, 8, 7, 5, 8, 8, 5, 8, 9, 5, 9, -9, 5, 9, -8, 5, 9, -7, 5, 9, -6, 5, 9, -5, 5, 9, -4, 5, 9, -3, 5, 9, -2, 5, 9, -1, 5, 9, 0, 5, 9, 1, 5, 9, 2, 5, 9, 3, 5, 9, 4, 5, 9, 5, 5, 9, 6, 5, 9, 7, 5, 9, 8, 5, 9, 9, 6, -9, -9, 6, -9, -8, 6, -9, -7, 6, -9, -6, 6, -9, -5, 6, -9, -4, 6, -9, -3, 6, -9, -2, 6, -9, -1, 6, -9, 0, 6, -9, 1, 6, -9, 2, 6, -9, 3, 6, -9, 4, 6, -9, 5, 6, -9, 6, 6, -9, 7, 6, -9, 8, 6, -9, 9, 6, -8, -9, 6, -8, -8, 6, -8, -7, 6, -8, -6, 6, -8, -5, 6, -8, -4, 6, -8, -3, 6, -8, -2, 6, -8, -1, 6, -8, 0, 6, -8, 1, 6, -8, 2, 6, -8, 3, 6, -8, 4, 6, -8, 5, 6, -8, 6, 6, -8, 7, 6, -8, 8, 6, -8, 9, 6, -7, -9, 6, -7, -8, 6, -7, -7, 6, -7, -6, 6, -7, -5, 6, -7, -4, 6, -7, -3, 6, -7, -2, 6, -7, -1, 6, -7, 0, 6, -7, 1, 6, -7, 2, 6, -7, 3, 6, -7, 4, 6, -7, 5, 6, -7, 6, 6, -7, 7, 6, -7, 8, 6, -7, 9, 6, -6, -9, 6, -6, -8, 6, -6, -7, 6, -6, -6, 6, -6, -5, 6, -6, -4, 6, -6, -3, 6, -6, -2, 6, -6, -1, 6, -6, 0, 6, -6, 1, 6, -6, 2, 6, -6, 3, 6, -6, 4, 6, -6, 5, 6, -6, 6, 6, -6, 7, 6, -6, 8, 6, -6, 9, 6, -5, -9, 6, -5, -8, 6, -5, -7, 6, -5, -6, 6, -5, -5, 6, -5, -4, 6, -5, -3, 6, -5, -2, 6, -5, -1, 6, -5, 0, 6, -5, 1, 6, -5, 2, 6, -5, 3, 6, -5, 4, 6, -5, 5, 6, -5, 6, 6, -5, 7, 6, -5, 8, 6, -5, 9, 6, -4, -9, 6, -4, -8, 6, -4, -7, 6, -4, -6, 6, -4, -5, 6, -4, -4, 6, -4, -3, 6, -4, -2, 6, -4, -1, 6, -4, 0, 6, -4, 1, 6, -4, 2, 6, -4, 3, 6, -4, 4, 6, -4, 5, 6, -4, 6, 6, -4, 7, 6, -4, 8, 6, -4, 9, 6, -3, -9, 6, -3, -8, 6, -3, -7, 6, -3, -6, 6, -3, -5, 6, -3, -4, 6, -3, -3, 6, -3, -2, 6, -3, -1, 6, -3, 0, 6, -3, 1, 6, -3, 2, 6, -3, 3, 6, -3, 4, 6, -3, 5, 6, -3, 6, 6, -3, 7, 6, -3, 8, 6, -3, 9, 6, -2, -9, 6, -2, -8, 6, -2, -7, 6, -2, -6, 6, -2, -5, 6, -2, -4, 6, -2, -3, 6, -2, -2, 6, -2, -1, 6, -2, 0, 6, -2, 1, 6, -2, 2, 6, -2, 3, 6, -2, 4, 6, -2, 5, 6, -2, 6, 6, -2, 7, 6, -2, 8, 6, -2, 9, 6, -1, -9, 6, -1, -8, 6, -1, -7, 6, -1, -6, 6, -1, -5, 6, -1, -4, 6, -1, -3, 6, -1, -2, 6, -1, -1, 6, -1, 0, 6, -1, 1, 6, -1, 2, 6, -1, 3, 6, -1, 4, 6, -1, 5, 6, -1, 6, 6, -1, 7, 6, -1, 8, 6, -1, 9, 6, 0, -9, 6, 0, -8, 6, 0, -7, 6, 0, -6, 6, 0, -5, 6, 0, -4, 6, 0, -3, 6, 0, -2, 6, 0, -1, 6, 0, 0, 6, 0, 1, 6, 0, 2, 6, 0, 3, 6, 0, 4, 6, 0, 5, 6, 0, 6, 6, 0, 7, 6, 0, 8, 6, 0, 9, 6, 1, -9, 6, 1, -8, 6, 1, -7, 6, 1, -6, 6, 1, -5, 6, 1, -4, 6, 1, -3, 6, 1, -2, 6, 1, -1, 6, 1, 0, 6, 1, 1, 6, 1, 2, 6, 1, 3, 6, 1, 4, 6, 1, 5, 6, 1, 6, 6, 1, 7, 6, 1, 8, 6, 1, 9, 6, 2, -9, 6, 2, -8, 6, 2, -7, 6, 2, -6, 6, 2, -5, 6, 2, -4, 6, 2, -3, 6, 2, -2, 6, 2, -1, 6, 2, 0, 6, 2, 1, 6, 2, 2, 6, 2, 3, 6, 2, 4, 6, 2, 5, 6, 2, 6, 6, 2, 7, 6, 2, 8, 6, 2, 9, 6, 3, -9, 6, 3, -8, 6, 3, -7, 6, 3, -6, 6, 3, -5, 6, 3, -4, 6, 3, -3, 6, 3, -2, 6, 3, -1, 6, 3, 0, 6, 3, 1, 6, 3, 2, 6, 3, 3, 6, 3, 4, 6, 3, 5, 6, 3, 6, 6, 3, 7, 6, 3, 8, 6, 3, 9, 6, 4, -9, 6, 4, -8, 6, 4, -7, 6, 4, -6, 6, 4, -5, 6, 4, -4, 6, 4, -3, 6, 4, -2, 6, 4, -1, 6, 4, 0, 6, 4, 1, 6, 4, 2, 6, 4, 3, 6, 4, 4, 6, 4, 5, 6, 4, 6, 6, 4, 7, 6, 4, 8, 6, 4, 9, 6, 5, -9, 6, 5, -8, 6, 5, -7, 6, 5, -6, 6, 5, -5, 6, 5, -4, 6, 5, -3, 6, 5, -2, 6, 5, -1, 6, 5, 0, 6, 5, 1, 6, 5, 2, 6, 5, 3, 6, 5, 4, 6, 5, 5, 6, 5, 6, 6, 5, 7, 6, 5, 8, 6, 5, 9, 6, 6, -9, 6, 6, -8, 6, 6, -7, 6, 6, -6, 6, 6, -5, 6, 6, -4, 6, 6, -3, 6, 6, -2, 6, 6, -1, 6, 6, 0, 6, 6, 1, 6, 6, 2, 6, 6, 3, 6, 6, 4, 6, 6, 5, 6, 6, 6, 6, 6, 7, 6, 6, 8, 6, 6, 9, 6, 7, -9, 6, 7, -8, 6, 7, -7, 6, 7, -6, 6, 7, -5, 6, 7, -4, 6, 7, -3, 6, 7, -2, 6, 7, -1, 6, 7, 0, 6, 7, 1, 6, 7, 2, 6, 7, 3, 6, 7, 4, 6, 7, 5, 6, 7, 6, 6, 7, 7, 6, 7, 8, 6, 7, 9, 6, 8, -9, 6, 8, -8, 6, 8, -7, 6, 8, -6, 6, 8, -5, 6, 8, -4, 6, 8, -3, 6, 8, -2, 6, 8, -1, 6, 8, 0, 6, 8, 1, 6, 8, 2, 6, 8, 3, 6, 8, 4, 6, 8, 5, 6, 8, 6, 6, 8, 7, 6, 8, 8, 6, 8, 9, 6, 9, -9, 6, 9, -8, 6, 9, -7, 6, 9, -6, 6, 9, -5, 6, 9, -4, 6, 9, -3, 6, 9, -2, 6, 9, -1, 6, 9, 0, 6, 9, 1, 6, 9, 2, 6, 9, 3, 6, 9, 4, 6, 9, 5, 6, 9, 6, 6, 9, 7, 6, 9, 8, 6, 9, 9, 7, -9, -9, 7, -9, -8, 7, -9, -7, 7, -9, -6, 7, -9, -5, 7, -9, -4, 7, -9, -3, 7, -9, -2, 7, -9, -1, 7, -9, 0, 7, -9, 1, 7, -9, 2, 7, -9, 3, 7, -9, 4, 7, -9, 5, 7, -9, 6, 7, -9, 7, 7, -9, 8, 7, -9, 9, 7, -8, -9, 7, -8, -8, 7, -8, -7, 7, -8, -6, 7, -8, -5, 7, -8, -4, 7, -8, -3, 7, -8, -2, 7, -8, -1, 7, -8, 0, 7, -8, 1, 7, -8, 2, 7, -8, 3, 7, -8, 4, 7, -8, 5, 7, -8, 6, 7, -8, 7, 7, -8, 8, 7, -8, 9, 7, -7, -9, 7, -7, -8, 7, -7, -7, 7, -7, -6, 7, -7, -5, 7, -7, -4, 7, -7, -3, 7, -7, -2, 7, -7, -1, 7, -7, 0, 7, -7, 1, 7, -7, 2, 7, -7, 3, 7, -7, 4, 7, -7, 5, 7, -7, 6, 7, -7, 7, 7, -7, 8, 7, -7, 9, 7, -6, -9, 7, -6, -8, 7, -6, -7, 7, -6, -6, 7, -6, -5, 7, -6, -4, 7, -6, -3, 7, -6, -2, 7, -6, -1, 7, -6, 0, 7, -6, 1, 7, -6, 2, 7, -6, 3, 7, -6, 4, 7, -6, 5, 7, -6, 6, 7, -6, 7, 7, -6, 8, 7, -6, 9, 7, -5, -9, 7, -5, -8, 7, -5, -7, 7, -5, -6, 7, -5, -5, 7, -5, -4, 7, -5, -3, 7, -5, -2, 7, -5, -1, 7, -5, 0, 7, -5, 1, 7, -5, 2, 7, -5, 3, 7, -5, 4, 7, -5, 5, 7, -5, 6, 7, -5, 7, 7, -5, 8, 7, -5, 9, 7, -4, -9, 7, -4, -8, 7, -4, -7, 7, -4, -6, 7, -4, -5, 7, -4, -4, 7, -4, -3, 7, -4, -2, 7, -4, -1, 7, -4, 0, 7, -4, 1, 7, -4, 2, 7, -4, 3, 7, -4, 4, 7, -4, 5, 7, -4, 6, 7, -4, 7, 7, -4, 8, 7, -4, 9, 7, -3, -9, 7, -3, -8, 7, -3, -7, 7, -3, -6, 7, -3, -5, 7, -3, -4, 7, -3, -3, 7, -3, -2, 7, -3, -1, 7, -3, 0, 7, -3, 1, 7, -3, 2, 7, -3, 3, 7, -3, 4, 7, -3, 5, 7, -3, 6, 7, -3, 7, 7, -3, 8, 7, -3, 9, 7, -2, -9, 7, -2, -8, 7, -2, -7, 7, -2, -6, 7, -2, -5, 7, -2, -4, 7, -2, -3, 7, -2, -2, 7, -2, -1, 7, -2, 0, 7, -2, 1, 7, -2, 2, 7, -2, 3, 7, -2, 4, 7, -2, 5, 7, -2, 6, 7, -2, 7, 7, -2, 8, 7, -2, 9, 7, -1, -9, 7, -1, -8, 7, -1, -7, 7, -1, -6, 7, -1, -5, 7, -1, -4, 7, -1, -3, 7, -1, -2, 7, -1, -1, 7, -1, 0, 7, -1, 1, 7, -1, 2, 7, -1, 3, 7, -1, 4, 7, -1, 5, 7, -1, 6, 7, -1, 7, 7, -1, 8, 7, -1, 9, 7, 0, -9, 7, 0, -8, 7, 0, -7, 7, 0, -6, 7, 0, -5, 7, 0, -4, 7, 0, -3, 7, 0, -2, 7, 0, -1, 7, 0, 0, 7, 0, 1, 7, 0, 2, 7, 0, 3, 7, 0, 4, 7, 0, 5, 7, 0, 6, 7, 0, 7, 7, 0, 8, 7, 0, 9, 7, 1, -9, 7, 1, -8, 7, 1, -7, 7, 1, -6, 7, 1, -5, 7, 1, -4, 7, 1, -3, 7, 1, -2, 7, 1, -1, 7, 1, 0, 7, 1, 1, 7, 1, 2, 7, 1, 3, 7, 1, 4, 7, 1, 5, 7, 1, 6, 7, 1, 7, 7, 1, 8, 7, 1, 9, 7, 2, -9, 7, 2, -8, 7, 2, -7, 7, 2, -6, 7, 2, -5, 7, 2, -4, 7, 2, -3, 7, 2, -2, 7, 2, -1, 7, 2, 0, 7, 2, 1, 7, 2, 2, 7, 2, 3, 7, 2, 4, 7, 2, 5, 7, 2, 6, 7, 2, 7, 7, 2, 8, 7, 2, 9, 7, 3, -9, 7, 3, -8, 7, 3, -7, 7, 3, -6, 7, 3, -5, 7, 3, -4, 7, 3, -3, 7, 3, -2, 7, 3, -1, 7, 3, 0, 7, 3, 1, 7, 3, 2, 7, 3, 3, 7, 3, 4, 7, 3, 5, 7, 3, 6, 7, 3, 7, 7, 3, 8, 7, 3, 9, 7, 4, -9, 7, 4, -8, 7, 4, -7, 7, 4, -6, 7, 4, -5, 7, 4, -4, 7, 4, -3, 7, 4, -2, 7, 4, -1, 7, 4, 0, 7, 4, 1, 7, 4, 2, 7, 4, 3, 7, 4, 4, 7, 4, 5, 7, 4, 6, 7, 4, 7, 7, 4, 8, 7, 4, 9, 7, 5, -9, 7, 5, -8, 7, 5, -7, 7, 5, -6, 7, 5, -5, 7, 5, -4, 7, 5, -3, 7, 5, -2, 7, 5, -1, 7, 5, 0, 7, 5, 1, 7, 5, 2, 7, 5, 3, 7, 5, 4, 7, 5, 5, 7, 5, 6, 7, 5, 7, 7, 5, 8, 7, 5, 9, 7, 6, -9, 7, 6, -8, 7, 6, -7, 7, 6, -6, 7, 6, -5, 7, 6, -4, 7, 6, -3, 7, 6, -2, 7, 6, -1, 7, 6, 0, 7, 6, 1, 7, 6, 2, 7, 6, 3, 7, 6, 4, 7, 6, 5, 7, 6, 6, 7, 6, 7, 7, 6, 8, 7, 6, 9, 7, 7, -9, 7, 7, -8, 7, 7, -7, 7, 7, -6, 7, 7, -5, 7, 7, -4, 7, 7, -3, 7, 7, -2, 7, 7, -1, 7, 7, 0, 7, 7, 1, 7, 7, 2, 7, 7, 3, 7, 7, 4, 7, 7, 5, 7, 7, 6, 7, 7, 7, 7, 7, 8, 7, 7, 9, 7, 8, -9, 7, 8, -8, 7, 8, -7, 7, 8, -6, 7, 8, -5, 7, 8, -4, 7, 8, -3, 7, 8, -2, 7, 8, -1, 7, 8, 0, 7, 8, 1, 7, 8, 2, 7, 8, 3, 7, 8, 4, 7, 8, 5, 7, 8, 6, 7, 8, 7, 7, 8, 8, 7, 8, 9, 7, 9, -9, 7, 9, -8, 7, 9, -7, 7, 9, -6, 7, 9, -5, 7, 9, -4, 7, 9, -3, 7, 9, -2, 7, 9, -1, 7, 9, 0, 7, 9, 1, 7, 9, 2, 7, 9, 3, 7, 9, 4, 7, 9, 5, 7, 9, 6, 7, 9, 7, 7, 9, 8, 7, 9, 9, 8, -9, -9, 8, -9, -8, 8, -9, -7, 8, -9, -6, 8, -9, -5, 8, -9, -4, 8, -9, -3, 8, -9, -2, 8, -9, -1, 8, -9, 0, 8, -9, 1, 8, -9, 2, 8, -9, 3, 8, -9, 4, 8, -9, 5, 8, -9, 6, 8, -9, 7, 8, -9, 8, 8, -9, 9, 8, -8, -9, 8, -8, -8, 8, -8, -7, 8, -8, -6, 8, -8, -5, 8, -8, -4, 8, -8, -3, 8, -8, -2, 8, -8, -1, 8, -8, 0, 8, -8, 1, 8, -8, 2, 8, -8, 3, 8, -8, 4, 8, -8, 5, 8, -8, 6, 8, -8, 7, 8, -8, 8, 8, -8, 9, 8, -7, -9, 8, -7, -8, 8, -7, -7, 8, -7, -6, 8, -7, -5, 8, -7, -4, 8, -7, -3, 8, -7, -2, 8, -7, -1, 8, -7, 0, 8, -7, 1, 8, -7, 2, 8, -7, 3, 8, -7, 4, 8, -7, 5, 8, -7, 6, 8, -7, 7, 8, -7, 8, 8, -7, 9, 8, -6, -9, 8, -6, -8, 8, -6, -7, 8, -6, -6, 8, -6, -5, 8, -6, -4, 8, -6, -3, 8, -6, -2, 8, -6, -1, 8, -6, 0, 8, -6, 1, 8, -6, 2, 8, -6, 3, 8, -6, 4, 8, -6, 5, 8, -6, 6, 8, -6, 7, 8, -6, 8, 8, -6, 9, 8, -5, -9, 8, -5, -8, 8, -5, -7, 8, -5, -6, 8, -5, -5, 8, -5, -4, 8, -5, -3, 8, -5, -2, 8, -5, -1, 8, -5, 0, 8, -5, 1, 8, -5, 2, 8, -5, 3, 8, -5, 4, 8, -5, 5, 8, -5, 6, 8, -5, 7, 8, -5, 8, 8, -5, 9, 8, -4, -9, 8, -4, -8, 8, -4, -7, 8, -4, -6, 8, -4, -5, 8, -4, -4, 8, -4, -3, 8, -4, -2, 8, -4, -1, 8, -4, 0, 8, -4, 1, 8, -4, 2, 8, -4, 3, 8, -4, 4, 8, -4, 5, 8, -4, 6, 8, -4, 7, 8, -4, 8, 8, -4, 9, 8, -3, -9, 8, -3, -8, 8, -3, -7, 8, -3, -6, 8, -3, -5, 8, -3, -4, 8, -3, -3, 8, -3, -2, 8, -3, -1, 8, -3, 0, 8, -3, 1, 8, -3, 2, 8, -3, 3, 8, -3, 4, 8, -3, 5, 8, -3, 6, 8, -3, 7, 8, -3, 8, 8, -3, 9, 8, -2, -9, 8, -2, -8, 8, -2, -7, 8, -2, -6, 8, -2, -5, 8, -2, -4, 8, -2, -3, 8, -2, -2, 8, -2, -1, 8, -2, 0, 8, -2, 1, 8, -2, 2, 8, -2, 3, 8, -2, 4, 8, -2, 5, 8, -2, 6, 8, -2, 7, 8, -2, 8, 8, -2, 9, 8, -1, -9, 8, -1, -8, 8, -1, -7, 8, -1, -6, 8, -1, -5, 8, -1, -4, 8, -1, -3, 8, -1, -2, 8, -1, -1, 8, -1, 0, 8, -1, 1, 8, -1, 2, 8, -1, 3, 8, -1, 4, 8, -1, 5, 8, -1, 6, 8, -1, 7, 8, -1, 8, 8, -1, 9, 8, 0, -9, 8, 0, -8, 8, 0, -7, 8, 0, -6, 8, 0, -5, 8, 0, -4, 8, 0, -3, 8, 0, -2, 8, 0, -1, 8, 0, 0, 8, 0, 1, 8, 0, 2, 8, 0, 3, 8, 0, 4, 8, 0, 5, 8, 0, 6, 8, 0, 7, 8, 0, 8, 8, 0, 9, 8, 1, -9, 8, 1, -8, 8, 1, -7, 8, 1, -6, 8, 1, -5, 8, 1, -4, 8, 1, -3, 8, 1, -2, 8, 1, -1, 8, 1, 0, 8, 1, 1, 8, 1, 2, 8, 1, 3, 8, 1, 4, 8, 1, 5, 8, 1, 6, 8, 1, 7, 8, 1, 8, 8, 1, 9, 8, 2, -9, 8, 2, -8, 8, 2, -7, 8, 2, -6, 8, 2, -5, 8, 2, -4, 8, 2, -3, 8, 2, -2, 8, 2, -1, 8, 2, 0, 8, 2, 1, 8, 2, 2, 8, 2, 3, 8, 2, 4, 8, 2, 5, 8, 2, 6, 8, 2, 7, 8, 2, 8, 8, 2, 9, 8, 3, -9, 8, 3, -8, 8, 3, -7, 8, 3, -6, 8, 3, -5, 8, 3, -4, 8, 3, -3, 8, 3, -2, 8, 3, -1, 8, 3, 0, 8, 3, 1, 8, 3, 2, 8, 3, 3, 8, 3, 4, 8, 3, 5, 8, 3, 6, 8, 3, 7, 8, 3, 8, 8, 3, 9, 8, 4, -9, 8, 4, -8, 8, 4, -7, 8, 4, -6, 8, 4, -5, 8, 4, -4, 8, 4, -3, 8, 4, -2, 8, 4, -1, 8, 4, 0, 8, 4, 1, 8, 4, 2, 8, 4, 3, 8, 4, 4, 8, 4, 5, 8, 4, 6, 8, 4, 7, 8, 4, 8, 8, 4, 9, 8, 5, -9, 8, 5, -8, 8, 5, -7, 8, 5, -6, 8, 5, -5, 8, 5, -4, 8, 5, -3, 8, 5, -2, 8, 5, -1, 8, 5, 0, 8, 5, 1, 8, 5, 2, 8, 5, 3, 8, 5, 4, 8, 5, 5, 8, 5, 6, 8, 5, 7, 8, 5, 8, 8, 5, 9, 8, 6, -9, 8, 6, -8, 8, 6, -7, 8, 6, -6, 8, 6, -5, 8, 6, -4, 8, 6, -3, 8, 6, -2, 8, 6, -1, 8, 6, 0, 8, 6, 1, 8, 6, 2, 8, 6, 3, 8, 6, 4, 8, 6, 5, 8, 6, 6, 8, 6, 7, 8, 6, 8, 8, 6, 9, 8, 7, -9, 8, 7, -8, 8, 7, -7, 8, 7, -6, 8, 7, -5, 8, 7, -4, 8, 7, -3, 8, 7, -2, 8, 7, -1, 8, 7, 0, 8, 7, 1, 8, 7, 2, 8, 7, 3, 8, 7, 4, 8, 7, 5, 8, 7, 6, 8, 7, 7, 8, 7, 8, 8, 7, 9, 8, 8, -9, 8, 8, -8, 8, 8, -7, 8, 8, -6, 8, 8, -5, 8, 8, -4, 8, 8, -3, 8, 8, -2, 8, 8, -1, 8, 8, 0, 8, 8, 1, 8, 8, 2, 8, 8, 3, 8, 8, 4, 8, 8, 5, 8, 8, 6, 8, 8, 7, 8, 8, 8, 8, 8, 9, 8, 9, -9, 8, 9, -8, 8, 9, -7, 8, 9, -6, 8, 9, -5, 8, 9, -4, 8, 9, -3, 8, 9, -2, 8, 9, -1, 8, 9, 0, 8, 9, 1, 8, 9, 2, 8, 9, 3, 8, 9, 4, 8, 9, 5, 8, 9, 6, 8, 9, 7, 8, 9, 8, 8, 9, 9, 9, -9, -9, 9, -9, -8, 9, -9, -7, 9, -9, -6, 9, -9, -5, 9, -9, -4, 9, -9, -3, 9, -9, -2, 9, -9, -1, 9, -9, 0, 9, -9, 1, 9, -9, 2, 9, -9, 3, 9, -9, 4, 9, -9, 5, 9, -9, 6, 9, -9, 7, 9, -9, 8, 9, -9, 9, 9, -8, -9, 9, -8, -8, 9, -8, -7, 9, -8, -6, 9, -8, -5, 9, -8, -4, 9, -8, -3, 9, -8, -2, 9, -8, -1, 9, -8, 0, 9, -8, 1, 9, -8, 2, 9, -8, 3, 9, -8, 4, 9, -8, 5, 9, -8, 6, 9, -8, 7, 9, -8, 8, 9, -8, 9, 9, -7, -9, 9, -7, -8, 9, -7, -7, 9, -7, -6, 9, -7, -5, 9, -7, -4, 9, -7, -3, 9, -7, -2, 9, -7, -1, 9, -7, 0, 9, -7, 1, 9, -7, 2, 9, -7, 3, 9, -7, 4, 9, -7, 5, 9, -7, 6, 9, -7, 7, 9, -7, 8, 9, -7, 9, 9, -6, -9, 9, -6, -8, 9, -6, -7, 9, -6, -6, 9, -6, -5, 9, -6, -4, 9, -6, -3, 9, -6, -2, 9, -6, -1, 9, -6, 0, 9, -6, 1, 9, -6, 2, 9, -6, 3, 9, -6, 4, 9, -6, 5, 9, -6, 6, 9, -6, 7, 9, -6, 8, 9, -6, 9, 9, -5, -9, 9, -5, -8, 9, -5, -7, 9, -5, -6, 9, -5, -5, 9, -5, -4, 9, -5, -3, 9, -5, -2, 9, -5, -1, 9, -5, 0, 9, -5, 1, 9, -5, 2, 9, -5, 3, 9, -5, 4, 9, -5, 5, 9, -5, 6, 9, -5, 7, 9, -5, 8, 9, -5, 9, 9, -4, -9, 9, -4, -8, 9, -4, -7, 9, -4, -6, 9, -4, -5, 9, -4, -4, 9, -4, -3, 9, -4, -2, 9, -4, -1, 9, -4, 0, 9, -4, 1, 9, -4, 2, 9, -4, 3, 9, -4, 4, 9, -4, 5, 9, -4, 6, 9, -4, 7, 9, -4, 8, 9, -4, 9, 9, -3, -9, 9, -3, -8, 9, -3, -7, 9, -3, -6, 9, -3, -5, 9, -3, -4, 9, -3, -3, 9, -3, -2, 9, -3, -1, 9, -3, 0, 9, -3, 1, 9, -3, 2, 9, -3, 3, 9, -3, 4, 9, -3, 5, 9, -3, 6, 9, -3, 7, 9, -3, 8, 9, -3, 9, 9, -2, -9, 9, -2, -8, 9, -2, -7, 9, -2, -6, 9, -2, -5, 9, -2, -4, 9, -2, -3, 9, -2, -2, 9, -2, -1, 9, -2, 0, 9, -2, 1, 9, -2, 2, 9, -2, 3, 9, -2, 4, 9, -2, 5, 9, -2, 6, 9, -2, 7, 9, -2, 8, 9, -2, 9, 9, -1, -9, 9, -1, -8, 9, -1, -7, 9, -1, -6, 9, -1, -5, 9, -1, -4, 9, -1, -3, 9, -1, -2, 9, -1, -1, 9, -1, 0, 9, -1, 1, 9, -1, 2, 9, -1, 3, 9, -1, 4, 9, -1, 5, 9, -1, 6, 9, -1, 7, 9, -1, 8, 9, -1, 9, 9, 0, -9, 9, 0, -8, 9, 0, -7, 9, 0, -6, 9, 0, -5, 9, 0, -4, 9, 0, -3, 9, 0, -2, 9, 0, -1, 9, 0, 0, 9, 0, 1, 9, 0, 2, 9, 0, 3, 9, 0, 4, 9, 0, 5, 9, 0, 6, 9, 0, 7, 9, 0, 8, 9, 0, 9, 9, 1, -9, 9, 1, -8, 9, 1, -7, 9, 1, -6, 9, 1, -5, 9, 1, -4, 9, 1, -3, 9, 1, -2, 9, 1, -1, 9, 1, 0, 9, 1, 1, 9, 1, 2, 9, 1, 3, 9, 1, 4, 9, 1, 5, 9, 1, 6, 9, 1, 7, 9, 1, 8, 9, 1, 9, 9, 2, -9, 9, 2, -8, 9, 2, -7, 9, 2, -6, 9, 2, -5, 9, 2, -4, 9, 2, -3, 9, 2, -2, 9, 2, -1, 9, 2, 0, 9, 2, 1, 9, 2, 2, 9, 2, 3, 9, 2, 4, 9, 2, 5, 9, 2, 6, 9, 2, 7, 9, 2, 8, 9, 2, 9, 9, 3, -9, 9, 3, -8, 9, 3, -7, 9, 3, -6, 9, 3, -5, 9, 3, -4, 9, 3, -3, 9, 3, -2, 9, 3, -1, 9, 3, 0, 9, 3, 1, 9, 3, 2, 9, 3, 3, 9, 3, 4, 9, 3, 5, 9, 3, 6, 9, 3, 7, 9, 3, 8, 9, 3, 9, 9, 4, -9, 9, 4, -8, 9, 4, -7, 9, 4, -6, 9, 4, -5, 9, 4, -4, 9, 4, -3, 9, 4, -2, 9, 4, -1, 9, 4, 0, 9, 4, 1, 9, 4, 2, 9, 4, 3, 9, 4, 4, 9, 4, 5, 9, 4, 6, 9, 4, 7, 9, 4, 8, 9, 4, 9, 9, 5, -9, 9, 5, -8, 9, 5, -7, 9, 5, -6, 9, 5, -5, 9, 5, -4, 9, 5, -3, 9, 5, -2, 9, 5, -1, 9, 5, 0, 9, 5, 1, 9, 5, 2, 9, 5, 3, 9, 5, 4, 9, 5, 5, 9, 5, 6, 9, 5, 7, 9, 5, 8, 9, 5, 9, 9, 6, -9, 9, 6, -8, 9, 6, -7, 9, 6, -6, 9, 6, -5, 9, 6, -4, 9, 6, -3, 9, 6, -2, 9, 6, -1, 9, 6, 0, 9, 6, 1, 9, 6, 2, 9, 6, 3, 9, 6, 4, 9, 6, 5, 9, 6, 6, 9, 6, 7, 9, 6, 8, 9, 6, 9, 9, 7, -9, 9, 7, -8, 9, 7, -7, 9, 7, -6, 9, 7, -5, 9, 7, -4, 9, 7, -3, 9, 7, -2, 9, 7, -1, 9, 7, 0, 9, 7, 1, 9, 7, 2, 9, 7, 3, 9, 7, 4, 9, 7, 5, 9, 7, 6, 9, 7, 7, 9, 7, 8, 9, 7, 9, 9, 8, -9, 9, 8, -8, 9, 8, -7, 9, 8, -6, 9, 8, -5, 9, 8, -4, 9, 8, -3, 9, 8, -2, 9, 8, -1, 9, 8, 0, 9, 8, 1, 9, 8, 2, 9, 8, 3, 9, 8, 4, 9, 8, 5, 9, 8, 6, 9, 8, 7, 9, 8, 8, 9, 8, 9, 9, 9, -9, 9, 9, -8, 9, 9, -7, 9, 9, -6, 9, 9, -5, 9, 9, -4, 9, 9, -3, 9, 9, -2, 9, 9, -1, 9, 9, 0, 9, 9, 1, 9, 9, 2, 9, 9, 3, 9, 9, 4, 9, 9, 5, 9, 9, 6, 9, 9, 7, 9, 9, 8, 9, 9, 9] start = time.time() np.array(onp.array(alist)) print(time.time()-start) # takes 0.00053 seconds start = time.time() np.array(alist) print(time.time()-start) # takes 23 seconds ```
Thanks for raising! What jaxlib version are you on? (I ask because this might have improved recently.) The slowness here is from two factors: (1) when called on a list of N integers like this, `jax.numpy.array` generates an XLA computation with N inputs, and (2) it's reasonable for XLA to take time that's linear in N (though at least on XLA:CPU there was a bug that made it scale super-linearly, and I _think_ that may have been squashed already). In general, if we had a list of N values backed by XLA device memory, we might need to generate a computation with N inputs and then pay the XLA compile time. But in this example paying the compile time feels silly because `jax.numpy.array` is just being called on a list of Python integers, and so it could just call `onp.array` under the hood here. We haven't implemented the logic to do that (or any more sophisticated policy), but maybe we should. jaxlib==0.1.12 Good to know you guys are aware of the issue. Again, this isn't a blocker or anything, was just surprised. This isn't any better at head, so updating jaxlib won't help. Looks like the XLA changes that could help haven't landed yet, though it's clear they're working on it for XLA:CPU at least (where this effect is particularly bad). @j-towns did a nice investigation of this issue in https://github.com/google/jax/issues/919 and saw that compilation is indeed superlinear. Thanks for the detail analysis. This isn't a particularly huge blocker for me since I can always converted a numpy array first , but glad you guys still dug into it.
2019-06-25T14:01:08
google/jax
922
google__jax-922
[ "694" ]
6c3cf13f659ef4002a92e486c0c59c37a9a6cd02
diff --git a/jax/scipy/special.py b/jax/scipy/special.py --- a/jax/scipy/special.py +++ b/jax/scipy/special.py @@ -216,7 +216,7 @@ def _ndtr(x): lax.select(lax.gt(w, dtype(0.)), dtype(2.) - lax.erfc(z), lax.erfc(z))) - return 0.5 * y + return dtype(0.5) * y def ndtri(p): @@ -355,6 +355,7 @@ def _create_polynomial(var, coeffs): return x_nan_replaced +@custom_transforms def log_ndtr(x, series_order=3): r"""Log Normal distribution function. @@ -488,3 +489,14 @@ def _log_ndtr_asymptotic_series(x, series_order): def _double_factorial(n): """The double factorial function for small Python integer `n`.""" return onp.prod(onp.arange(n, 1, -2)) + + +_norm_logpdf_constant = onp.log(onp.sqrt(2 * onp.pi)) + +def _norm_logpdf(x): + neg_half = _constant_like(x, -0.5) + log_normalizer = _constant_like(x, _norm_logpdf_constant) + return lax.sub(lax.mul(neg_half, lax.square(x)), log_normalizer) + +defjvp(log_ndtr, + lambda g, ans, x: lax.mul(g, lax.exp(lax.sub(_norm_logpdf(x), ans))))
diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py --- a/tests/lax_scipy_test.py +++ b/tests/lax_scipy_test.py @@ -65,8 +65,8 @@ def op_record(name, nargs, dtypes, rng, test_grad, test_name=None): # TODO: gammaln has slightly high error. op_record("gammaln", 1, float_dtypes, jtu.rand_positive(), False), op_record("logit", 1, float_dtypes, jtu.rand_small_positive(), False), - op_record("log_ndtr", 1, float_dtypes, jtu.rand_small(), True), - op_record("ndtri", 1, float_dtypes, jtu.rand_uniform(0., 1.), True), + op_record("log_ndtr", 1, float_dtypes, jtu.rand_default(), True), + op_record("ndtri", 1, float_dtypes, jtu.rand_uniform(0.05, 0.95), True), op_record("ndtr", 1, float_dtypes, jtu.rand_default(), True), ] @@ -119,7 +119,7 @@ def testScipySpecialFun(self, scipy_op, lax_op, rng, shapes, dtypes, self._CompileAndCheck(lax_op, args_maker, check_dtypes=True) if test_autodiff: - jtu.check_grads(lax_op, args, order=1, atol=1e-3, rtol=3e-3) + jtu.check_grads(lax_op, args, order=1, atol=1e-3, rtol=3e-3, eps=1e-3) if __name__ == "__main__":
Higher order derivatives of norm.logcdf seem to have numerical problems at low input values Hi there, I think I might have found a problem while investigating https://github.com/google/jax/issues/475 (though still checking!). It looks like second derivatives of the logcdf and higher have problems for small input values (<-10) -- see reproducible code below: ```python import os os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false' import jax.numpy as np from jax.scipy.stats import norm from jax import grad, vmap x_vals = np.linspace(-30, 20, 100) first_grad = vmap(grad(norm.logcdf))(x_vals) second_grad = vmap(grad(grad(norm.logcdf)))(x_vals) third_grad = vmap(grad(grad(grad(norm.logcdf))))(x_vals) count_nans = lambda x: int(np.sum(np.isnan(x))) print(f'Found {count_nans(first_grad)} NaNs in first derivative.') print(f'Found {count_nans(second_grad)} NaNs in second derivative.') print(f'Found {count_nans(third_grad)} NaNs in third derivative.') ``` Output is: ``` Found 0 NaNs in first derivative. Found 40 NaNs in second derivative. Found 42 NaNs in third derivative. ``` I need derivatives up to third order and have been able to get around the issue by defining the ratio: ```python def pdf_cdf_ratio(x): return np.exp(norm.logpdf(x) - norm.logcdf(x)) ``` Which is the gradient of the logcdf (subtracting and then exponentiating is much more stable than computing the ratio directly). You can express the second and third derivatives in terms of the ratio. Here's the full code: ```python def pdf_cdf_ratio(x): return np.exp(norm.logpdf(x) - norm.logcdf(x)) def pdf_cdf_ratio_grad(x): g = pdf_cdf_ratio(x) return -g * (x + g) def pdf_cdf_ratio_grad_grad(x): g = pdf_cdf_ratio(x) gp = pdf_cdf_ratio_grad(x) return -gp * (x + g) - g * (1 + gp) ``` With these alternatives, I get: ```python first_grad_alt = pdf_cdf_ratio(x_vals) second_grad_alt = pdf_cdf_ratio_grad(x_vals) third_grad_alt = pdf_cdf_ratio_grad_grad(x_vals) count_nans = lambda x: int(np.sum(np.isnan(x))) print(f'Found {count_nans(first_grad_alt)} NaNs in first derivative.') print(f'Found {count_nans(second_grad_alt)} NaNs in second derivative.') print(f'Found {count_nans(third_grad_alt)} NaNs in third derivative.') ``` ``` Found 0 NaNs in first derivative. Found 0 NaNs in second derivative. Found 0 NaNs in third derivative. ``` Mind you, plotting the functions, although it has no `nan`s, it looks like the third gradient gets pretty wonky below values of -10, so maybe there's a better alternative. Anyway, I'm not sure what to do to fix this with autodiff, and whether that's even possible, but thought I'd raise this as a possible problem!
It may also be of interest (apologies if not!) that autograd doesn't have this problem: ```python import autograd.numpy as np from autograd.scipy.stats import norm from autograd import elementwise_grad inputs = np.linspace(-50, 20, 100) fun = norm.logcdf grad_fun = elementwise_grad(fun) grad_grad_fun = elementwise_grad(grad_fun) grad_grad_grad_fun = elementwise_grad(grad_grad_fun) f = fun(inputs) fp = grad_fun(inputs) fpp = grad_grad_fun(inputs) fppp = grad_grad_grad_fun(inputs) [np.any(np.isnan(x)) for x in [f, fp, fpp, fppp]] ``` Gives: ``` [False, False, False, False] ``` And the plots actually look nicer than what my ratio trick gives, so it seems to have a nice solution there. Looks like there, the vjp is defined using exp(logcdf - logpdf): https://github.com/HIPS/autograd/blob/master/autograd/scipy/stats/norm.py Sorry if this was all obvious already, but I thought I'd add this information in case it's helpful! EDIT: I could try to implement this using: https://github.com/google/jax/pull/636 If that would be useful. Our guess is that differentiating the (rather complicated!) implementation of `log_ndtr` isn't working that well, and that problem can be fixed by adding a derivative that treats `log_ndtr` as a black box, much like autograd does.
2019-06-25T14:39:17
google/jax
942
google__jax-942
[ "940" ]
a681c9da7e353baa179135a51f0390e8c3a3e027
diff --git a/jax/lax/lax.py b/jax/lax/lax.py --- a/jax/lax/lax.py +++ b/jax/lax/lax.py @@ -1504,7 +1504,6 @@ def _brcast_to(x, shape): return broadcast(x, shape) -_f32 = {onp.float32} _float = {onp.floating} _complex = {onp.complexfloating} _complex_elem_types = {onp.float32, onp.float64} @@ -1825,8 +1824,8 @@ def _conv_general_dilated_shape_rule( def _conv_general_dilated_dtype_rule( lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, dimension_numbers, **unused_kwargs): - return binop_dtype_rule(_input_dtype, [_f32, _f32], 'conv_general_dilated', - lhs, rhs) + return binop_dtype_rule(_input_dtype, [_float, _float], + 'conv_general_dilated', lhs, rhs) _conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:] _conv_sdims = lambda spec: spec[2:]
diff --git a/tests/lax_test.py b/tests/lax_test.py --- a/tests/lax_test.py +++ b/tests/lax_test.py @@ -309,7 +309,7 @@ def testConcatenateAgainstNumpy(self, dim, base_shape, dtype, num_arrs, rng): for lhs_shape, rhs_shape in [ ((b, i, 9, 10), (j, i, 4, 5)) for b, i, j in itertools.product([2, 3], repeat=3)] - for dtype in [onp.float32] + for dtype in float_dtypes for strides in [(1, 1), (1, 2), (2, 1)] for padding in ["VALID", "SAME"] for rng in [jtu.rand_small()])) @@ -331,7 +331,7 @@ def fun(lhs, rhs): for lhs_shape, rhs_shape in [ ((b, i, 9, 10), (j, i, 4, 5)) for b, i, j in itertools.product([2, 3], repeat=3)] - for dtype in [onp.float32] + for dtype in float_dtypes for strides in [(1, 1), (1, 2), (2, 1)] for padding in ["VALID", "SAME"] for rng in [jtu.rand_small()])) @@ -354,7 +354,8 @@ def testConvAgainstNumpy(self, lhs_shape, rhs_shape, dtype, strides, padding, for lhs_shape, rhs_shape in [ ((b, i, 9, 10), (j, i, 4, 5)) for b, i, j in itertools.product([1, 2, 3], repeat=3)] - for dtype in [onp.float32] for strides in [(1, 1), (1, 2), (2, 1)] + for dtype in float_dtypes + for strides in [(1, 1), (1, 2), (2, 1)] for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))] for lhs_dilation, rhs_dilation in itertools.product( [(1, 1), (1, 2), (2, 2)], repeat=2) @@ -418,7 +419,7 @@ def numpy_fun(lhs, rhs): for lhs_shape, rhs_shape in [ ((b, i, 9, 10), (j, i, 4, 5)) for b, i, j in itertools.product([2, 3], repeat=3)] - for dtype in [onp.float32] for strides in [(1, 1), (2, 1)] + for dtype in float_dtypes for strides in [(1, 1), (2, 1)] for padding in [((1, 2), (2, 0))] for lhs_dilation, rhs_dilation in itertools.product( [(1, 1), (1, 2)], repeat=2) @@ -494,7 +495,7 @@ def _transpose_conv_kernel(data, kernel, dimension_numbers): for lhs_shape, rhs_shape in [ ((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])] - for dtype in [onp.float32] + for dtype in float_dtypes for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)] for padding in ["VALID", "SAME"] for dspec in [('NHWC', 'HWIO', 'NHWC'),] @@ -527,7 +528,7 @@ def fun_via_grad(lhs, rhs): for lhs_shape, rhs_shape in [ ((b, 9, 10, i), (k, k, i, j)) for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])] - for dtype in [onp.float32] + for dtype in float_dtypes for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)] for padding in ["VALID", "SAME"] for dspec in [('NHWC', 'HWIO', 'NHWC'),] @@ -559,7 +560,7 @@ def fun_via_grad(lhs, rhs): for lhs_shape, rhs_shape in [ ((b, 10, i), (k, i, j)) for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])] - for dtype in [onp.float32] + for dtype in float_dtypes for strides in [(1,), (2,), (3,)] for padding in ["VALID", "SAME"] for dspec in [('NHC', 'HIO', 'NHC'),] @@ -1646,7 +1647,7 @@ def testConcatenateGrad(self, dim, base_shape, dtype, num_arrs, rng): for b, i, j in itertools.product([2, 3], repeat=3)], [((4, 2, 1), (3, 2, 1), [(1,)])]) for strides in all_strides - for dtype in [onp.float32] + for dtype in float_dtypes for padding in ["VALID", "SAME"] for rng in [jtu.rand_small()])) @jtu.skip_on_devices("tpu") @@ -1678,7 +1679,7 @@ def testConvGrad(self, lhs_shape, rhs_shape, dtype, strides, padding, rng): for strides in all_strides for rhs_dil in rhs_dils for lhs_dil in lhs_dils - for dtype in [onp.float32] + for dtype in float_dtypes for padding in all_pads for rng in [jtu.rand_small()])) @jtu.skip_on_devices("tpu") @@ -1715,7 +1716,7 @@ def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides, for strides in all_strides for rhs_dil in rhs_dils for lhs_dil in lhs_dils - for dtype in [onp.float32] + for dtype in float_dtypes for padding in all_pads for dim_nums, perms in [ (("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
lax.conv does not support float64 The lax package currently does not support float64 convolutions. This appears to be an explicit constraint and not a bug. https://github.com/google/jax/blob/master/jax/lax/lax.py#L1828 Feature request: allow convolutions with float64 -> float64 in full precision. TF supports this type of convolution, so I don't think there's anything inherent preventing this, but maybe it's something subtle.
2019-06-27T21:18:36
google/jax
958
google__jax-958
[ "956" ]
acda3f398bc2930e6f4b9da68a973cced9478c4c
diff --git a/jax/numpy/lax_numpy.py b/jax/numpy/lax_numpy.py --- a/jax/numpy/lax_numpy.py +++ b/jax/numpy/lax_numpy.py @@ -82,7 +82,10 @@ def __instancecheck__(self, instance): # pylint: disable=invalid-name class ndarray(six.with_metaclass(_ArrayMeta, onp.ndarray)): - pass + def __init__(shape, dtype=None, buffer=None, offset=0, strides=None, + order=None): + raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly." + " Use jax.numpy.array, or jax.numpy.zeros instead.") # pylint: enable=invalid-name
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py --- a/tests/lax_numpy_test.py +++ b/tests/lax_numpy_test.py @@ -1679,6 +1679,9 @@ def testReductionOfOutOfBoundsAxis(self): # Issue 888 x = lnp.ones((3, 4)) self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2)) + def testIssue956(self): + self.assertRaises(TypeError, lambda: lnp.ndarray((1, 1))) + if __name__ == "__main__": absltest.main()
TypeError when taking inverse In this case, taking the inverse in jax.numpy throws the error 'No abstraction handler for type: <class 'jax.numpy.lax_numpy.ndarray'>', while doing the same thing in numpy does not. ``` import jax.numpy as np import numpy.random as random import matplotlib.pyplot as plt class KalmanFilter(): def __init__(self): self.initialized = False def to_ndarray(self, x): if(type(x) is not np.ndarray): x_2D = np.ndarray((1, 1)) x_2D[0, 0] = x else: x_2D = x return x_2D def initialize(self, x, A, B, H, P, Q, R): self.initialized = True x, A, B, H, P, Q, R = self.to_ndarray(x), self.to_ndarray(A), self.to_ndarray(B), self.to_ndarray(H), self.to_ndarray(P), self.to_ndarray(Q), self.to_ndarray(R) self.x, self.A, self.B, self.H, self.P, self.Q, self.R = x, A, B, H, P, Q, R self.K = np.ndarray(A.shape) def step(self, u, z, n = 1): u, z = self.to_ndarray(u), self.to_ndarray(z) for i in range(n): self.x = self.A @ self.x + self.B @ u self.P = self.A @ self.P @ self.A.T + self.Q self.K = self.P @ self.H.T @ np.linalg.inv(self.H @ self.P @ self.H.T + self.R) self.x = self.x + self.K @ (z - self.H @ self.x) self.P = self.P - self.K @ self.H @ self.P if(type(z) is float): return float(self.x) else: return self.x def predict(self, u, z, n = 1): u, z = self.to_ndarray(u), self.to_ndarray(z) for i in range(n): x_temp = self.A @ self.x + self.B @ u P_temp = self.A @ self.P @ self.A.T + self.Q K_temp = P_temp @ self.H.T @ np.linalg.inv(self.H @ P_temp @ self.H.T + self.R) x_temp = x_temp + K_temp @ (z - self.H @ x_temp) if(type(z) is not np.ndarray): return float(x_temp) else: return x_temp def test_kalman_filter(steps=100, show_plot=True): T = steps x_true = 0.5 env_noise = 0.1 x0 = 0 model = KalmanFilter() model.initialize(x0, 1, 0, 1, 1, 0, env_noise) loss = lambda x_true, x_pred: (x_true - x_pred)**2 results = [] for i in range(T): z = x_true + float(random.normal(0, env_noise, 1)) x_pred = model.step(0, z) cur_loss = float(loss(x_true, x_pred)) results.append(cur_loss) if show_plot: plt.plot(results) plt.title("KalmanFilter model on constant signal") plt.show(block=False) plt.pause(1) plt.close() print("test_kalman_filter passed") return if __name__=="__main__": test_kalman_filter() ```
2019-07-01T18:57:15
google/jax
960
google__jax-960
[ "947" ]
45b599017a120f6f28852da8b7e0f6aa332b5330
diff --git a/jax/api.py b/jax/api.py --- a/jax/api.py +++ b/jax/api.py @@ -989,7 +989,26 @@ def jaxpr_maker(*args, **kwargs): tree_to_pval_tuples = partial(process_pytree, pe.pack_pvals) -device_put = jit(lambda x: x) + +_traceable_device_put = jit(lambda x: x) + +def device_put(x, device_num=0): + def _device_put(x): + if isinstance(x, core.Tracer): + return _traceable_device_put(x) + + try: + a = xla.abstractify(x) + except TypeError: + raise TypeError("Argument '{}' of type {} is not a valid JAX type" + .format(x, type(x))) + + result_shape = xla.xla_shape_to_result_shape(xla.xla_shape(a)) + handler = xla.device_persistent_result_handler(result_shape) + return handler(xla.device_put(x, device_num)) + return tree_map(_device_put, x) + + device_get = _jit(lambda x: x, (), device_values=False) diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -179,11 +179,11 @@ class _ResultArray(tuple): pass def result_handler(result_shape): if FLAGS.jax_device_values: - return _device_persistent_result_handler(result_shape) + return device_persistent_result_handler(result_shape) else: return _pyval_result_handler(result_shape) -def _device_persistent_result_handler(result_shape): +def device_persistent_result_handler(result_shape): t = type(result_shape) if t is _ResultArray: return partial(DeviceArray, result_shape) @@ -416,7 +416,7 @@ def __init__(self, result_shape, device_buffer): def __iter__(self): bufs = self.device_buffer.destructure() - handlers = map(_device_persistent_result_handler, self.result_shapes) + handlers = map(device_persistent_result_handler, self.result_shapes) elts = [handler(buf) for handler, buf in zip(handlers, bufs)] return iter(elts) @@ -632,7 +632,7 @@ def _xla_callable(fun, device_values, *abstract_args): compiled, result_shape = _compile_jaxpr(jaxpr, consts, *abstract_args) del master, consts, jaxpr, env if device_values: - handle_result = _device_persistent_result_handler(result_shape) + handle_result = device_persistent_result_handler(result_shape) else: handle_result = _pyval_result_handler(result_shape) return partial(_execute_compiled, compiled, pval, handle_result)
`jax.device_put` has complexity superlinear in the number of arguments. Reproduction on CPU: ``` import jax import numpy as onp In [9]: %time x = jax.device_put([onp.random.randn(10,5) for _ in range(100)]) CPU times: user 1.45 s, sys: 7.8 ms, total: 1.46 s Wall time: 1.45 s In [10]: %time x = jax.device_put([onp.random.randn(10,5) for _ in range(500)]) CPU times: user 24.8 s, sys: 0 ns, total: 24.8 s Wall time: 24.8 s In [11]: %time x = jax.device_put([onp.random.randn(10,5) for _ in range(700)]) CPU times: user 45.2 s, sys: 190 ms, total: 45.4 s Wall time: 45.4 s ```
2019-07-01T20:48:02
google/jax
965
google__jax-965
[ "946" ]
59be9b7a0450336f665becd0037baef6b588aa37
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py --- a/jax/interpreters/xla.py +++ b/jax/interpreters/xla.py @@ -526,13 +526,13 @@ def __iter__(self): if self.ndim == 0: raise TypeError("iteration over a 0-d array") # same as numpy error else: - return (self[i] for i in xrange(self.shape[0])) + return self._value.__iter__() def __reversed__(self): if self.ndim == 0: raise TypeError("iteration over a 0-d array") else: - return (self[i] for i in xrange(self.shape[0] - 1, -1, -1)) + return reversed(self._value) def __format__(self, format_spec): # Simulates behavior of https://github.com/numpy/numpy/pull/9883
`list(np.arange(10001))` is slow. We should improve this.
2019-07-02T19:20:08