repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
google/jax | 6,822 | google__jax-6822 | [
"6788"
] | 070295494a31153161c68a6f4002c1dc6b0e563b | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -4676,6 +4676,11 @@ def take(a, indices, axis: Optional[int] = None, out=None, mode=None):
index_dims = len(shape(indices))
slice_sizes = list(shape(a))
+ if slice_sizes[axis_idx] == 0:
+ if indices.size != 0:
+ raise IndexError("Cannot do a non-empty jnp.take() from an empty axis.")
+ return a
+
slice_sizes[axis_idx] = _min(indices.size, 1)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -3933,6 +3933,14 @@ def testTakeEmpty(self):
jnp.array([], dtype=jnp.float32),
jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))
+ np.testing.assert_array_equal(
+ jnp.ones((2, 0, 4), dtype=jnp.float32),
+ jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),
+ axis=1))
+
+ with self.assertRaisesRegex(IndexError, "non-empty jnp.take"):
+ jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),
+ jnp.array([0], jnp.int32), axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}".format(
| Shape checking rule issue
```pytb
---------------------------------------------------------------------------
LayerError Traceback (most recent call last)
<ipython-input-75-a4586cbd092a> in <module>
1 # sample prediction
----> 2 tmp_pred = model(x)
3 print(type(tmp_pred))
4 print(f"tmp_pred has shape: {tmp_pred.shape}")
/opt/conda/lib/python3.7/site-packages/trax/layers/base.py in __call__(self, x, weights, state, rng)
165 self.state = state # Needed if the model wasn't fully initialized.
166 state = self.state
--> 167 outputs, new_state = self.pure_fn(x, weights, state, rng)
168 self.state = new_state
169 self.weights = weights
/opt/conda/lib/python3.7/site-packages/trax/layers/base.py in pure_fn(self, x, weights, state, rng, use_cache)
448 name, trace = self._name, _short_traceback(skip=3)
449 raise LayerError(name, 'pure_fn',
--> 450 self._caller, signature(x), trace) from None
451
452 def output_signature(self, input_signature):
`
LayerError: Exception passing through layer Serial (in pure_fn):
layer created in file [...]/<ipython-input-68-50afc500007a>, line 16
layer input shapes: ShapeDtype{shape:(10, 27), dtype:int64}
File [...]/trax/layers/combinators.py, line 88, in forward
outputs, s = layer.pure_fn(inputs, w, s, rng, use_cache=True)
LayerError: Exception passing through layer Embedding_369_15 (in pure_fn):
layer created in file [...]/<ipython-input-68-50afc500007a>, line 13
layer input shapes: ShapeDtype{shape:(10, 27), dtype:int64}
File [...]/trax/layers/core.py, line 150, in forward
return jnp.take(self.weights, x, axis=0)
File [...]/jax/numpy/lax_numpy.py, line 3298, in take
slice_sizes=tuple(slice_sizes))
File [...]/jax/lax/lax.py, line 835, in gather
slice_sizes=canonicalize_shape(slice_sizes))
File [...]/site-packages/jax/core.py, line 273, in bind
return self.impl(*args, **kwargs)
File [...]/jax/interpreters/xla.py, line 228, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *unsafe_map(arg_spec, args), **params)
File [...]/jax/interpreters/xla.py, line 262, in xla_primitive_callable
*avals, **params)
File [...]/jax/interpreters/xla.py, line 320, in primitive_computation
raise RuntimeError(msg) from e
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 1), got 1.:
This is a bug in JAX's shape-checking rules; please report it!
```
| Thanks! The traceback on its own is not particularly helpful for diagnosing the problem. Can you provide a minimal code snippet that reproduces this error? | 2021-05-24T16:00:40 |
google/jax | 6,839 | google__jax-6839 | [
"6831"
] | 44fcd710919ff560f32fa647e308f48b0e3e4a21 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1082,15 +1082,18 @@ def _sinc_maclaurin_jvp(k, primals, tangents):
(x,), (t,) = primals, tangents
return _sinc_maclaurin(k, x), _sinc_maclaurin(k + 1, x) * t
+_ARRAY_VIEW_DOC = """
+The JAX version of this function will return a copy rather than a view of the input.
+"""
-@_wraps(np.transpose)
+@_wraps(np.transpose, lax_description=_ARRAY_VIEW_DOC)
def transpose(a, axes=None):
_check_arraylike("transpose", a)
axes = np.arange(ndim(a))[::-1] if axes is None else axes
return lax.transpose(a, axes)
-@_wraps(np.rot90)
+@_wraps(np.rot90, lax_description=_ARRAY_VIEW_DOC)
def rot90(m, k=1, axes=(0, 1)):
_check_arraylike("rot90", m)
ax1, ax2 = axes
@@ -1112,7 +1115,7 @@ def rot90(m, k=1, axes=(0, 1)):
return flip(transpose(m, perm), ax2)
-@_wraps(np.flip)
+@_wraps(np.flip, lax_description=_ARRAY_VIEW_DOC)
def flip(m, axis: Optional[Union[int, Tuple[int, ...]]] = None):
_check_arraylike("flip", m)
if axis is None:
@@ -1121,12 +1124,12 @@ def flip(m, axis: Optional[Union[int, Tuple[int, ...]]] = None):
return lax.rev(m, [_canonicalize_axis(ax, ndim(m)) for ax in axis])
-@_wraps(np.fliplr)
+@_wraps(np.fliplr, lax_description=_ARRAY_VIEW_DOC)
def fliplr(m):
return flip(m, 1)
-@_wraps(np.flipud)
+@_wraps(np.flipud, lax_description=_ARRAY_VIEW_DOC)
def flipud(m):
return flip(m, 0)
@@ -1304,7 +1307,7 @@ def isrealobj(x):
return not iscomplexobj(x)
-@_wraps(np.reshape)
+@_wraps(np.reshape, lax_description=_ARRAY_VIEW_DOC)
def reshape(a, newshape, order="C"):
_check_arraylike("reshape", a)
try:
@@ -1353,7 +1356,7 @@ def _transpose(a, *args):
axis = _ensure_index_tuple(args)
return transpose(a, axis)
-@_wraps(np.ravel)
+@_wraps(np.ravel, lax_description=_ARRAY_VIEW_DOC)
def ravel(a, order="C"):
_check_arraylike("ravel", a)
if order == "K":
@@ -1433,7 +1436,7 @@ def resize(a, new_shape):
return reshape(a, new_shape)
-@_wraps(np.squeeze)
+@_wraps(np.squeeze, lax_description=_ARRAY_VIEW_DOC)
def squeeze(a, axis: Optional[Union[int, Tuple[int, ...]]] = None):
_check_arraylike("squeeze", a)
if axis is None:
@@ -1452,7 +1455,7 @@ def expand_dims(a, axis: Union[int, Tuple[int, ...]]):
return lax.expand_dims(a, axis)
-@_wraps(np.swapaxes)
+@_wraps(np.swapaxes, lax_description=_ARRAY_VIEW_DOC)
def swapaxes(a, axis1: int, axis2: int):
_check_arraylike("swapaxes", a)
perm = np.arange(ndim(a))
@@ -1460,7 +1463,7 @@ def swapaxes(a, axis1: int, axis2: int):
return lax.transpose(a, perm)
-@_wraps(np.moveaxis)
+@_wraps(np.moveaxis, lax_description=_ARRAY_VIEW_DOC)
def moveaxis(a, source: Union[int, Sequence[int]],
destination: Union[int, Sequence[int]]):
_check_arraylike("moveaxis", a)
@@ -1829,7 +1832,7 @@ def _split(op, ary, indices_or_sections, axis=0):
return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))
for start, end in zip(split_indices[:-1], split_indices[1:])]
-@_wraps(np.split)
+@_wraps(np.split, lax_description=_ARRAY_VIEW_DOC)
def split(ary, indices_or_sections, axis: int = 0):
return _split("split", ary, indices_or_sections, axis=axis)
@@ -2911,8 +2914,7 @@ def block(arrays):
out, _ = _block(arrays)
return out
-
-@_wraps(np.atleast_1d, update_doc=False)
+@_wraps(np.atleast_1d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)
def atleast_1d(*arys):
if len(arys) == 1:
arr = asarray(arys[0])
@@ -2921,7 +2923,7 @@ def atleast_1d(*arys):
return [atleast_1d(arr) for arr in arys]
-@_wraps(np.atleast_2d, update_doc=False)
+@_wraps(np.atleast_2d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)
def atleast_2d(*arys):
if len(arys) == 1:
arr = asarray(arys[0])
@@ -2935,7 +2937,7 @@ def atleast_2d(*arys):
return [atleast_2d(arr) for arr in arys]
-@_wraps(np.atleast_3d, update_doc=False)
+@_wraps(np.atleast_3d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)
def atleast_3d(*arys):
if len(arys) == 1:
arr = asarray(arys[0])
@@ -3243,7 +3245,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis: int = 0):
return lax.convert_element_type(res, dtype)
-@_wraps(np.meshgrid)
+@_wraps(np.meshgrid, lax_description=_ARRAY_VIEW_DOC)
def meshgrid(*args, **kwargs):
indexing = kwargs.get("indexing", "xy")
sparse = kwargs.get("sparse", False)
@@ -3804,7 +3806,7 @@ def diag_indices_from(arr):
return diag_indices(arr.shape[0], ndim=arr.ndim)
-@_wraps(np.diagonal)
+@_wraps(np.diagonal, lax_description=_ARRAY_VIEW_DOC)
def diagonal(a, offset=0, axis1: int = 0, axis2: int = 1):
_check_arraylike("diagonal", a)
a_shape = shape(a)
@@ -3830,7 +3832,7 @@ def diagonal(a, offset=0, axis1: int = 0, axis2: int = 1):
return lax.slice_in_dim(d, 0, diag_size, axis=-1)
-@_wraps(np.diag)
+@_wraps(np.diag, lax_description=_ARRAY_VIEW_DOC)
def diag(v, k=0):
_check_arraylike("diag", v)
v_shape = shape(v)
@@ -4590,7 +4592,7 @@ def roll(a, shift, axis: Optional[Union[int, Sequence[int]]] = None):
return _roll(a, shift, axis)
-@_wraps(np.rollaxis)
+@_wraps(np.rollaxis, lax_description=_ARRAY_VIEW_DOC)
def rollaxis(a, axis: int, start=0):
_check_arraylike("rollaxis", a)
start = core.concrete_or_error(operator.index, start, "'start' argument of jnp.rollaxis()")
| Functions claim to return a view but return a copy
My understanding is the immutable-array policy means that `jnp` functions (eg, `transpose`) return a copy of an array, rather than a view.
There are a few places in the docs where a function claims to return a view: [atleast_2d](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.atleast_2d.html#jax.numpy.atleast_2d) and [atleast_3d](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.atleast_3d.html#jax.numpy.atleast_3d) state it in the top line of the docstring.
Other functions, like [ravel](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ravel.html) or [transpose](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.transpose.html), have some verbiage about returning a view where possible in the "original docstring below" portion. I suppose this is inevitable if the docstring is copied directly from the numpy functions.
Perhaps the view/copy distinction could be added to the bulleted list in the `jax.numpy` [API doc page ](https://jax.readthedocs.io/en/latest/jax.numpy.html)?
| Thanks - the key phrase there (admittedly easy to miss) is *Original docstring below.*
Rather than creating new docstrings for all numpy/scipy functions, JAX wraps the original numpy/scipy docstrings, with automated modifications to the extent that is possible. This means that some details about the implementation will differ.
There's not really any way to fix this without rewriting all the docstrings from scratch, and the cost/benefit tradeoff has so far not favored that.
Updating the API doc page is a good idea
Hey, sorry, I realized I didn't read your initial report very closely... I guess I shouldn't be responding to issues so late in the day π¬
Along with the change in #6836, I think we could also add an extra doc note to the particular functions you mention, where the incorrect information is in the top-line function description. I'll get on that as well. | 2021-05-26T16:56:40 |
|
google/jax | 6,851 | google__jax-6851 | [
"6599"
] | 4ad332e83f9f7ff2bae8b91808a341de6513a39e | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1805,7 +1805,8 @@ def _split(op, ary, indices_or_sections, axis=0):
size = ary.shape[axis]
if isinstance(indices_or_sections, (tuple, list) + _arraylike_types):
indices_or_sections = np.array(
- [core.concrete_or_error(np.int64, i_s, f"in jax.numpy.{op} argument 1")
+ [_canonicalize_axis(core.concrete_or_error(np.int64, i_s,
+ f"in jax.numpy.{op} argument 1"), size)
for i_s in indices_or_sections], np.int64)
split_indices = np.concatenate([[np.int64(0)], indices_or_sections,
[np.int64(size)]])
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2925,8 +2925,8 @@ def testSliceWeakTypes(self, shape, dtype, weak_type, slc):
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
- ((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
- ((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
+ ((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((3,), 0, [-1]),
+ ((12, 4), 1, 2), ((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
| Support negative numpy.split indices
```python
import numpy as np
np.split([1, 2, 3], [-1])
```
works, while
```python
import jax.numpy as jnp
jnp.split(jnp.array([1, 2, 3]), [-1])
```
fails.
| Hi! I've got some time. I'd like to take up this issue.
Awesome! Please take a look at the [contributing](https://jax.readthedocs.io/en/latest/contributing.html#contributing-code-using-pull-requests) doc, and please feel free to reach out if you have questions.
You can open a draft pull request if that would be helpful :grin: | 2021-05-27T16:41:47 |
google/jax | 6,877 | google__jax-6877 | [
"6715"
] | 46cc65453753c81773f8a24861cdf99fb4769534 | diff --git a/jax/_src/config.py b/jax/_src/config.py
--- a/jax/_src/config.py
+++ b/jax/_src/config.py
@@ -250,7 +250,10 @@ def define_enum_state(
See docstring for ``define_bool_state``.
"""
name = name.lower()
- self.DEFINE_enum(name, os.getenv(name.upper(), default),
+ default = os.getenv(name.upper(), default)
+ if default is not None and default not in enum_values:
+ raise ValueError(f"Invalid value \"{default}\" for JAX flag {name}")
+ self.DEFINE_enum(name, default,
enum_values=enum_values, help=help,
update_hook=update_global_hook)
self._contextmanager_flags.add(name)
@@ -517,3 +520,17 @@ def _update_disable_jit_thread_local(val):
update_global_jit_state(default_matmul_precision=val),
update_thread_local_hook=lambda val: \
update_thread_local_jit_state(default_matmul_precision=val))
+
+traceback_filtering = config.define_enum_state(
+ name = 'jax_traceback_filtering',
+ enum_values=["off", "tracebackhide", "remove_frames", "auto"],
+ default="auto",
+ help="Controls how JAX filters internal frames out of tracebacks.\n\n"
+ "Valid values are:\n"
+ " * \"off\": disables traceback filtering.\n"
+ " * \"auto\": use \"tracebackhide\" if running under a sufficiently "
+ "new IPython, or \"remove_frames\" otherwise.\n"
+ " * \"tracebackhide\": adds \"__tracebackhide__\" annotations to "
+ " hidden stack frames, which some traceback printers support.\n"
+ " * \"remove_frames\": removes hidden frames from tracebacks, and adds "
+ " the unfiltered traceback as a __cause__ of the exception.\n")
diff --git a/jax/_src/traceback_util.py b/jax/_src/traceback_util.py
--- a/jax/_src/traceback_util.py
+++ b/jax/_src/traceback_util.py
@@ -16,7 +16,9 @@
import sys
import traceback
import types
+import warnings
+import jax
from jax.lib import xla_extension
from jax._src import util
@@ -56,6 +58,11 @@ def include_frame(f):
def ignore_known_hidden_frame(f):
return 'importlib._bootstrap' in f.f_code.co_filename
+def add_tracebackhide_to_hidden_frames(tb):
+ for f, lineno in traceback.walk_tb(tb):
+ if not include_frame(f):
+ f.f_locals["__tracebackhide__"] = True
+
def filter_traceback(tb):
out = None
# Scan the traceback and collect relevant frames.
@@ -111,6 +118,38 @@ class UnfilteredStackTrace(Exception): pass
def filtered_tracebacks_supported():
return make_traceback is not None
+def running_under_ipython():
+ """Returns true if we appear to be in an IPython session."""
+ try:
+ get_ipython() # type: ignore
+ return True
+ except NameError:
+ return False
+
+def python_supports_tracebackhide():
+ """Returns true we can add __tracebackhide__ to frames."""
+ # TODO(phawkins): remove this test after droppping Python 3.6 support.
+ return sys.version_info[:2] >= (3, 7)
+
+def ipython_supports_tracebackhide():
+ """Returns true if the IPython version supports __tracebackhide__."""
+ import IPython # type: ignore
+ return IPython.version_info[:2] >= (7, 17)
+
+def filtering_mode():
+ mode = jax.config.jax_traceback_filtering
+ if mode is None or mode == "auto":
+ if (running_under_ipython() and ipython_supports_tracebackhide() and
+ python_supports_tracebackhide()):
+ mode = "tracebackhide"
+ else:
+ mode = "remove_frames"
+ if mode == "tracebackhide" and not python_supports_tracebackhide():
+ warnings.warn("--jax_traceback_filtering=tracebackhide requires Python 3.7 "
+ "or newer.")
+ mode = "remove_frames"
+ return mode
+
def api_boundary(fun):
'''Wraps ``fun`` to form a boundary for filtering exception tracebacks.
@@ -139,39 +178,47 @@ def api_boundary(fun):
@util.wraps(fun)
def reraise_with_filtered_traceback(*args, **kwargs):
+ __tracebackhide__ = True
try:
return fun(*args, **kwargs)
except Exception as e:
- if not is_under_reraiser(e):
- filtered_tb, unfiltered = None, None
- try:
- filtered_tb = filter_traceback(e.__traceback__)
- if filtered_tb is None:
- raise
- msg = format_exception_only(e)
- msg = f'{msg}\n\n{_jax_message_append}'
- unfiltered = UnfilteredStackTrace(msg)
- unfiltered.with_traceback(add_call_stack_frames(e.__traceback__))
- unfiltered.__context__ = e.__context__
- unfiltered.__cause__ = e.__cause__
- unfiltered.__suppress_context__ = e.__suppress_context__
- e.__context__ = None
- e.__cause__ = unfiltered
- # There seems to be no way to alter the currently raised exception's
- # traceback, except via the C API. The currently raised exception
- # is part of the interpreter's thread state: value `e` is a copy.
- if hasattr(xla_extension, 'replace_thread_exc_traceback'):
- xla_extension.replace_thread_exc_traceback(filtered_tb)
- raise
- else:
- # TODO(phawkins): remove this case when jaxlib 0.1.66 is the
- # minimum.
-
- # Fallback case for older jaxlibs; includes the current frame.
- raise e.with_traceback(filtered_tb)
- finally:
- del filtered_tb
- del unfiltered
- else:
+ mode = filtering_mode()
+ if is_under_reraiser(e) or mode == "off":
+ raise
+ if mode == "tracebackhide":
+ add_tracebackhide_to_hidden_frames(e.__traceback__)
raise
+ assert mode == "remove_frames", mode
+
+ filtered_tb, unfiltered, mode = None, None, None
+ try:
+ filtered_tb = filter_traceback(e.__traceback__)
+ if filtered_tb is None:
+ raise
+ msg = format_exception_only(e)
+ msg = f'{msg}\n\n{_jax_message_append}'
+ unfiltered = UnfilteredStackTrace(msg)
+ unfiltered.with_traceback(add_call_stack_frames(e.__traceback__))
+ unfiltered.__context__ = e.__context__
+ unfiltered.__cause__ = e.__cause__
+ unfiltered.__suppress_context__ = e.__suppress_context__
+ e.__context__ = None
+ e.__cause__ = unfiltered
+
+ # There seems to be no way to alter the currently raised exception's
+ # traceback, except via the C API. The currently raised exception
+ # is part of the interpreter's thread state: value `e` is a copy.
+ if hasattr(xla_extension, 'replace_thread_exc_traceback'):
+ xla_extension.replace_thread_exc_traceback(filtered_tb)
+ raise
+ else:
+ # TODO(phawkins): remove this case when jaxlib 0.1.66 is the
+ # minimum.
+
+ # Fallback case for older jaxlibs; includes the current frame.
+ raise e.with_traceback(filtered_tb)
+ finally:
+ del filtered_tb
+ del unfiltered
+ del mode
return reraise_with_filtered_traceback
| diff --git a/tests/errors_test.py b/tests/errors_test.py
--- a/tests/errors_test.py
+++ b/tests/errors_test.py
@@ -13,6 +13,7 @@
# limitations under the License.
import re
+import sys
import traceback
import unittest
@@ -40,16 +41,31 @@ def get_exception(etype, f):
return e
assert False
-def check_filtered_stack_trace(test, etype, f, frame_patterns=[]):
- test.assertRaises(etype, f)
- e = get_exception(etype, f)
+def check_filtered_stack_trace(test, etype, f, frame_patterns=[],
+ filter_mode="remove_frames"):
+ with jax._src.config.traceback_filtering(filter_mode):
+ test.assertRaises(etype, f)
+ e = get_exception(etype, f)
c = e.__cause__
- test.assertIsInstance(c, traceback_util.UnfilteredStackTrace)
- c_tb = traceback.format_tb(e.__traceback__)
- # TODO(phawkins): remove this condition after jaxlib 0.1.66 is the minimum.
- if not hasattr(xla_extension, "replace_thread_exc_traceback"):
- c_tb = [t for t in c_tb if "reraise_with_filtered_traceback" not in t]
+ if filter_mode == "remove_frames":
+ test.assertIsInstance(c, traceback_util.UnfilteredStackTrace)
+ else:
+ test.assertFalse(isinstance(c, traceback_util.UnfilteredStackTrace))
+
if frame_patterns:
+ frames = []
+ for frame, lineno in traceback.walk_tb(e.__traceback__):
+ if filter_mode == "tracebackhide":
+ if "__tracebackhide__" in frame.f_locals.keys():
+ continue
+ elif filter_mode == "remove_frames":
+ # TODO(phawkins): remove this condition after jaxlib 0.1.66 is the minimum.
+ if (not hasattr(xla_extension, "replace_thread_exc_traceback") and
+ frame.f_code.co_name == "reraise_with_filtered_traceback"):
+ continue
+ frames.append((frame, lineno))
+
+ c_tb = traceback.format_list(traceback.StackSummary.extract(frames))
for (fname_pat, line_pat), frame_fmt in zip(
reversed(frame_patterns), reversed(c_tb)):
file = re.escape(__file__)
@@ -60,12 +76,21 @@ def check_filtered_stack_trace(test, etype, f, frame_patterns=[]):
f', in {fname_pat}' r'\n\s*' f'{line_pat}')
test.assertRegex(frame_fmt, full_pat)
+def skip_if_unsupported_filter_mode(filter_mode):
+ if (filter_mode == "remove_frames" and
+ not traceback_util.filtered_tracebacks_supported()):
+ raise unittest.SkipTest('Filtered tracebacks not supported')
+ elif filter_mode == "tracebackhide" and sys.version_info[:2] < (3, 7):
+ raise unittest.SkipTest('Tracebackhide requires Python 3.7 or newer')
+
[email protected]_parameters(
+ {"testcase_name": f"_{f}", "filter_mode": f}
+ for f in ("tracebackhide", "remove_frames"))
class FilteredTracebackTest(jtu.JaxTestCase):
- def test_nested_jit(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_nested_jit(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
@jit
def innermost(x):
@@ -83,11 +108,11 @@ def outermost(x):
('<lambda>', 'f = lambda: outermost'),
('outermost', 'return 2 + inbetween(x)'),
('inbetween', 'return 1 + innermost(x)'),
- ('innermost', 'assert False')])
+ ('innermost', 'assert False')],
+ filter_mode=filter_mode)
- def test_nested_jit_and_vmap(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_nested_jit_and_vmap(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
@jit
def innermost(x):
@@ -105,11 +130,11 @@ def outermost(x):
('<lambda>', 'f = lambda: outermost'),
('outermost', 'return 2 + inbetween(x)'),
('inbetween', 'return 1 + vmap(innermost)(x)'),
- ('innermost', 'assert False')])
+ ('innermost', 'assert False')],
+ filter_mode=filter_mode)
- def test_nested_jit_and_grad(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_nested_jit_and_grad(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
@jit
def innermost(x):
@@ -127,11 +152,10 @@ def outermost(x):
('<lambda>', 'f = lambda: outermost'),
('outermost', 'return 2 + inbetween(x)'),
('inbetween', 'return 1 + grad(innermost)(x)'),
- ])
+ ], filter_mode=filter_mode)
- def test_lax_cond(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_cond(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(_):
assert False
@@ -142,11 +166,11 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.cond(True, err, lambda _: (), ())'),
- ('err', 'assert False')])
+ ('err', 'assert False')],
+ filter_mode=filter_mode)
- def test_lax_switch(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_switch(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(_):
assert False
@@ -158,11 +182,10 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.switch(1, branches, ())'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_scan(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_scan(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(*_):
assert False
@@ -173,11 +196,10 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.scan(err, (), (), 3)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_fori_loop(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_fori_loop(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(*_):
assert False
@@ -188,11 +210,10 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.fori_loop(0, 3, err, ())'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_while_loop(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_while_loop(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(*_):
assert False
@@ -204,11 +225,10 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.while_loop(pred, err, ())'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_map(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_map(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(_):
assert False
@@ -220,11 +240,10 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.map(err, xs)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_custom_root(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_custom_root(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(*_):
assert False
@@ -242,17 +261,16 @@ def f3():
check_filtered_stack_trace(self, AssertionError, f1, [
('f1', 'return lax.custom_root(g, 0., err, solve)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, f2, [
('f2', 'return lax.custom_root(g, 0., solve, err)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, f3, [
('f3', 'return lax.custom_root(err, 0., solve, solve)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_custom_linear_solve(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_custom_linear_solve(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(*_):
assert False
@@ -269,14 +287,13 @@ def f2():
check_filtered_stack_trace(self, AssertionError, f1, [
('f1', 'return lax.custom_linear_solve(err, b, solve)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
check_filtered_stack_trace(self, AssertionError, f2, [
('f2', 'return lax.custom_linear_solve(matvec, b, err)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_lax_associative_scan(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_lax_associative_scan(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
def err(*_):
assert False
@@ -288,11 +305,10 @@ def f():
check_filtered_stack_trace(self, AssertionError, f, [
('f', 'return lax.associative_scan(err, xs)'),
- ('err', 'assert False')])
+ ('err', 'assert False')], filter_mode=filter_mode)
- def test_cause_chain(self):
- if not traceback_util.filtered_tracebacks_supported():
- raise unittest.SkipTest('Filtered tracebacks not supported')
+ def test_cause_chain(self, filter_mode):
+ skip_if_unsupported_filter_mode(filter_mode)
@jit
def inner(x):
@@ -308,7 +324,7 @@ def outer(x):
check_filtered_stack_trace(self, TypeError, f, [
('<lambda>', 'f = lambda: outer'),
- ('outer', 'raise TypeError')])
+ ('outer', 'raise TypeError')], filter_mode=filter_mode)
e = get_exception(TypeError, f)
self.assertIsInstance(e.__cause__, traceback_util.UnfilteredStackTrace)
self.assertIsInstance(e.__cause__.__cause__, ValueError)
| Add configuration option for disabling filtered stack traces
Some users report filtered stack traces interact badly with `pdb`.
We should add an `jax.config` option for disabling filtered stack traces, and hint at its existence in the filtered stack trace messages.
| Other users would like to turn off the unfiltered stack traces completely (https://github.com/google/jax/issues/6271). Probably the best way to do this would be to make this a tristate option. | 2021-06-02T19:23:53 |
google/jax | 6,885 | google__jax-6885 | [
"6884"
] | 39526a0d08b216c411aafd58c9654560b6ecdea7 | diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py
--- a/jax/_src/lax/control_flow.py
+++ b/jax/_src/lax/control_flow.py
@@ -2510,7 +2510,7 @@ def combine(a_flat, b_flat):
if not all(int(elem.shape[axis]) == num_elems for elem in elems_flat[1:]):
raise ValueError('Array inputs to associative_scan must have the same '
'first dimension. (saw: {})'
- .format([elems.shape for elem in elems_flat]))
+ .format([elem.shape for elem in elems_flat]))
# Summary of algorithm:
| Possible bug in shape mismatch error message
https://github.com/google/jax/blob/ecab743e5c4a4d87a6a6031cc2cbc599bb2e3383/jax/_src/lax/control_flow.py#L2513
Shouldn't it be elem.shape?
| Definitely should be `elem.shape`. Thanks for letting us know! | 2021-06-03T11:54:05 |
|
google/jax | 6,917 | google__jax-6917 | [
"6907"
] | 737efb5908d86b0c1c9f74c7751d15ccaac19044 | diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py
--- a/jax/interpreters/xla.py
+++ b/jax/interpreters/xla.py
@@ -1043,6 +1043,7 @@ class Token(object): pass
xla_shape_handlers[AbstractToken] = lambda _: (xc.Shape.token_shape(),)
xla_result_handlers[AbstractToken] = lambda _, __: lambda _: token
canonicalize_dtype_handlers[Token] = identity
+device_put_handlers[Token] = lambda x, _: (x,)
def _forward_method(attrname, self, fun, *args):
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -369,6 +369,16 @@ def test_trivial_computations(self):
self.assertIs(z3, x1)
self.assertEqual(z2, 1)
+ def test_trivial_computations_with_tokens(self):
+ @self.jit
+ def noop(arr, token):
+ return arr, token
+
+ arr = jax.numpy.ones(10)
+ token = jax.lax.create_token()
+
+ self.assertEqual(token, noop(arr, token)[1])
+
def test_jit_bad_input(self):
def f(x):
return x
| JITing no-ops breaks with tokens
Reproducer:
```python
>>> import jax
>>> @jax.jit
>>> def noop(arr, token):
... return arr, token
>>> arr = jax.numpy.ones(10)
>>> token = jax.lax.create_token()
>>> noop(arr, token)
Traceback (most recent call last):
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/interpreters/xla.py", line 119, in device_put
return device_put_handlers[type(x)](x, device)
KeyError: <class 'jax.interpreters.xla.Token'>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/_src/traceback_util.py", line 143, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/_src/api.py", line 426, in cache_miss
out_flat = xla.xla_call(
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/core.py", line 1565, in bind
return call_bind(self, fun, *args, **params)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/core.py", line 1556, in call_bind
outs = primitive.process(top_trace, fun, tracers, params)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/core.py", line 1568, in process
return trace.process_call(self, fun, tracers, params)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/core.py", line 609, in process_call
return primitive.impl(f, *tracers, **params)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/interpreters/xla.py", line 581, in _xla_call_impl
return compiled_fun(*args)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/interpreters/xla.py", line 905, in _execute_trivial
return [_copy_device_array_to_device(x, device) if type_is_device_array(x)
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/interpreters/xla.py", line 906, in <listcomp>
else h(*device_put(x, device)) for h, x in zip(handlers, outs)]
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/interpreters/xla.py", line 121, in device_put
raise TypeError(f"No device_put handler for type: {type(x)}") from err
jax._src.traceback_util.UnfilteredStackTrace: TypeError: No device_put handler for type: <class 'jax.interpreters.xla.Token'>
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/groups/ocean/dhaefner/miniconda3/envs/veros/lib/python3.8/site-packages/jax/interpreters/xla.py", line 121, in device_put
raise TypeError(f"No device_put handler for type: {type(x)}") from err
TypeError: No device_put handler for type: <class 'jax.interpreters.xla.Token'>
```
| 2021-06-07T20:20:43 |
|
google/jax | 6,926 | google__jax-6926 | [
"6922"
] | 72cd6d0072daf756ae44eae06b12fe96e1f263a3 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -25,7 +25,7 @@
from jax._src import dtypes
from jax.core import NamedShape
from jax._src.api import jit, vmap
-from jax._src.numpy.lax_numpy import _constant_like, _convert_and_clip_integer, asarray
+from jax._src.numpy.lax_numpy import _constant_like, _convert_and_clip_integer, _check_arraylike
from jax.lib import xla_bridge
from jax.lib import xla_client
from jax.lib import cuda_prng
@@ -86,14 +86,6 @@ def _is_prng_key(key: jnp.ndarray) -> bool:
### utilities
-# TODO(mattjj,jakevdp): add more info to error message, use this utility more
-def _asarray(x):
- """A more restrictive jnp.asarray, only accepts JAX arrays and np.ndarrays."""
- if not isinstance(x, (np.ndarray, jnp.ndarray)):
- raise TypeError(f"Function requires array input, got {x} of type {type(x)}.")
- return jnp.asarray(x)
-
-
def _make_rotate_left(dtype):
if not jnp.issubdtype(dtype, np.integer):
raise TypeError("_rotate_left only accepts integer dtypes.")
@@ -143,7 +135,7 @@ def rolled_loop_step(i, state):
x, ks, rotations = state
for r in rotations[0]:
x = apply_round(x, r)
- new_x = [x[0] + ks[0], x[1] + ks[1] + asarray(i + 1, dtype=np.uint32)]
+ new_x = [x[0] + ks[0], x[1] + ks[1] + jnp.asarray(i + 1, dtype=np.uint32)]
return new_x, rotate_list(ks), rotate_list(rotations)
def _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):
@@ -445,8 +437,9 @@ def _randint(key, shape, minval, maxval, dtype):
if not jnp.issubdtype(dtype, np.integer):
raise TypeError(f"randint only accepts integer dtypes, got {dtype}")
- minval = _asarray(minval)
- maxval = _asarray(maxval)
+ _check_arraylike("randint", minval, maxval)
+ minval = jnp.asarray(minval)
+ maxval = jnp.asarray(maxval)
if not jnp.issubdtype(minval.dtype, np.integer):
minval = minval.astype(int)
if not jnp.issubdtype(maxval.dtype, np.integer):
@@ -603,10 +596,11 @@ def choice(key: jnp.ndarray,
f"got {shape}")
if np.ndim(a) not in [0, 1]:
raise ValueError("a must be an integer or 1-dimensional")
+ _check_arraylike("choice", a)
if np.ndim(a) == 0:
- a = int(a)
+ a = core.concrete_or_error(int, a, "The error occurred in jax.random.choice()")
else:
- a = _asarray(a)
+ a = jnp.asarray(a)
n_inputs = int(a) if np.ndim(a) == 0 else len(a) # type: ignore[arg-type]
n_draws = prod(shape)
if n_draws == 0:
| jax.random bug if "jax_disable_jit" == True
jax.random.choice does not work as expected if jit is disabled.
```python
import jax
jax.config.update('jax_disable_jit', True)
jax.random.choice(jax.random.PRNGKey(1234), 8, shape=(100,))
```
Output:
```
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/usr/anaconda3/lib/python3.7/site-packages/jax/_src/random.py", line 619, in choice
ind = randint(key, shape, 0, n_inputs)
File "/home/usr/anaconda3/lib/python3.7/site-packages/jax/_src/random.py", line 438, in randint
return _randint(key, shape, minval, maxval, dtype)
File "/home/usr/anaconda3/lib/python3.7/site-packages/jax/_src/random.py", line 446, in _randint
minval = _asarray(minval)
File "/home/usr/anaconda3/lib/python3.7/site-packages/jax/_src/random.py", line 92, in _asarray
raise TypeError(f"Function requires array input, got {x} of type {type(x)}.")
TypeError: Function requires array input, got 0 of type <class 'int'>.
```
| Thanks for the report β I think this check is too restrictive. We should probably use the `_check_arraylike` utility from `lax_numpy.py`. | 2021-06-08T20:38:17 |
|
google/jax | 6,940 | google__jax-6940 | [
"6936"
] | 888db31edefa4dd1a791f61792f52d334074fc7d | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1064,7 +1064,7 @@ def sinc(x):
x, = _promote_dtypes_inexact(x)
eq_zero = lax.eq(x, lax._const(x, 0))
pi_x = lax.mul(lax._const(x, pi), x)
- safe_pi_x = where(eq_zero, lax._const(x, 0), pi_x)
+ safe_pi_x = where(eq_zero, lax._const(x, 1), pi_x)
return where(eq_zero, _sinc_maclaurin(0, pi_x),
lax.div(lax.sin(safe_pi_x), safe_pi_x))
| diff --git a/tests/debug_nans_test.py b/tests/debug_nans_test.py
--- a/tests/debug_nans_test.py
+++ b/tests/debug_nans_test.py
@@ -37,6 +37,10 @@ def setUp(self):
def tearDown(self):
config.update("jax_debug_nans", self.cfg)
+ def testSinc(self):
+ # Regression test for #6936
+ self.assertEqual(jnp.sinc(0.0), 1.0)
+
def testSingleResultPrimitiveNoNaN(self):
A = jnp.array([[1., 2.], [2., 3.]])
ans = jnp.tanh(A)
| `jnp.sinc` causes error with NaNs debugging enabled
The [current implementation of `sinc`](https://github.com/google/jax/blob/d39261497c12488632cf7c5d5f19d2c074a565dd/jax/_src/numpy/lax_numpy.py#L1061) function generates NaNs that are detected when `jax_debug_nans` is activated.
**To reproduce:**
```python
import jax
import jax.numpy as jnp
from jax.config import config
config.update("jax_debug_nans", True)
x = jnp.array([0.])
x = jnp.sinc(x)
```
Output:
```python
---------------------------------------------------------------------------
FloatingPointError Traceback (most recent call last)
<ipython-input-5-ca7132c5c117> in <module>()
6 x = jnp.array([0.])
7
----> 8 x = jnp.sinc(x)
[...]
/usr/local/lib/python3.7/dist-packages/jax/interpreters/xla.py in _check_special(name, xla_shape, buf)
374 if dtypes.issubdtype(xla_shape.element_type(), np.inexact):
375 if config.jax_debug_nans and np.any(np.isnan(buf.to_py())):
--> 376 raise FloatingPointError(f"invalid value (nan) encountered in {name}")
377 if config.jax_debug_infs and np.any(np.isinf(buf.to_py())):
378 raise FloatingPointError(f"invalid value (inf) encountered in {name}")
FloatingPointError: invalid value (nan) encountered in div
```
The error comes from the second branch of the `jnp.where` function [here](https://github.com/google/jax/blob/d39261497c12488632cf7c5d5f19d2c074a565dd/jax/_src/numpy/lax_numpy.py#L1068), which has NaNs where `safe_pi_x` is `0`. Those NaNs are filtered out by the `where` function but are nevertheless produced and detected by the NaNs debugger.
| Thanks for the report - this should be fixed by #6940 | 2021-06-10T16:07:11 |
google/jax | 6,952 | google__jax-6952 | [
"6859"
] | 3550732a74a16f1aba2d08e8dc82fe43d4cdaedb | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2801,15 +2801,19 @@ def stack(arrays, axis: int =0, out=None):
raise ValueError("Need at least one array to stack.")
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.stack is not supported.")
- _check_arraylike("stack", *arrays)
- shape0 = shape(arrays[0])
- axis = _canonicalize_axis(axis, len(shape0) + 1)
- new_arrays = []
- for a in arrays:
- if shape(a) != shape0:
- raise ValueError("All input arrays must have the same shape.")
- new_arrays.append(expand_dims(a, axis))
- return concatenate(new_arrays, axis=axis)
+ if isinstance(arrays, ndarray):
+ axis = _canonicalize_axis(axis, arrays.ndim)
+ return concatenate(expand_dims(arrays, axis + 1), axis=axis)
+ else:
+ _check_arraylike("stack", *arrays)
+ shape0 = shape(arrays[0])
+ axis = _canonicalize_axis(axis, len(shape0) + 1)
+ new_arrays = []
+ for a in arrays:
+ if shape(a) != shape0:
+ raise ValueError("All input arrays must have the same shape.")
+ new_arrays.append(expand_dims(a, axis))
+ return concatenate(new_arrays, axis=axis)
@_wraps(np.tile)
def tile(A, reps):
@@ -2868,32 +2872,41 @@ def concatenate(arrays, axis: int = 0):
@_wraps(np.vstack)
def vstack(tup):
- return concatenate([atleast_2d(m) for m in tup], axis=0)
+ if isinstance(tup, ndarray):
+ arrs = jax.vmap(atleast_2d)(tup)
+ else:
+ arrs = [atleast_2d(m) for m in tup]
+ return concatenate(arrs, axis=0)
row_stack = vstack
@_wraps(np.hstack)
def hstack(tup):
- arrs = [atleast_1d(m) for m in tup]
- if arrs[0].ndim == 1:
- return concatenate(arrs, 0)
- return concatenate(arrs, 1)
+ if isinstance(tup, ndarray):
+ arrs = jax.vmap(atleast_1d)(tup)
+ arr0_ndim = arrs.ndim - 1
+ else:
+ arrs = [atleast_1d(m) for m in tup]
+ arr0_ndim = arrs[0].ndim
+ return concatenate(arrs, axis=0 if arr0_ndim == 1 else 1)
@_wraps(np.dstack)
def dstack(tup):
- return concatenate([atleast_3d(m) for m in tup], axis=2)
+ if isinstance(tup, ndarray):
+ arrs = jax.vmap(atleast_3d)(tup)
+ else:
+ arrs = [atleast_3d(m) for m in tup]
+ return concatenate(arrs, axis=2)
@_wraps(np.column_stack)
def column_stack(tup):
- arrays = []
- for v in tup:
- arr = asarray(v)
- if arr.ndim < 2:
- arr = atleast_2d(arr).T
- arrays.append(arr)
- return concatenate(arrays, 1)
+ if isinstance(tup, ndarray):
+ arrs = jax.vmap(lambda x: atleast_2d(x).T)(tup) if tup.ndim < 3 else tup
+ else:
+ arrs = [atleast_2d(arr).T if arr.ndim < 2 else arr for arr in map(asarray, tup)]
+ return concatenate(arrs, 1)
@_wraps(np.choose, skip_params=['out'])
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2762,9 +2762,9 @@ def testDigitize(self, xshape, binshape, right, reverse, dtype):
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_{}".format(
- jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
- "shape": shape, "dtypes": dtypes}
+ {"testcase_name": "_{}_array={}".format(
+ jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
+ "shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
@@ -2772,19 +2772,23 @@ def testDigitize(self, xshape, binshape, right, reverse, dtype):
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
- for shape in [(), (2,), (3, 4), (1, 5)]))
- def testColumnStack(self, shape, dtypes):
+ for shape in [(), (2,), (3, 4), (1, 5)]
+ for array_input in [True, False]))
+ def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
- args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
+ if array_input:
+ args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
+ else:
+ args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_{}_axis={}".format(
- jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
- "shape": shape, "axis": axis, "dtypes": dtypes}
+ {"testcase_name": "_{}_axis={}_array={}".format(
+ jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
+ "shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
@@ -2793,19 +2797,23 @@ def testColumnStack(self, shape, dtypes):
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
- for axis in range(-len(shape), len(shape) + 1)))
- def testStack(self, shape, axis, dtypes):
+ for axis in range(-len(shape), len(shape) + 1)
+ for array_input in [True, False]))
+ def testStack(self, shape, axis, dtypes, array_input):
rng = jtu.rand_default(self.rng())
- args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
+ if array_input:
+ args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
+ else:
+ args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(partial(np.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_op={}_{}".format(
- op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
- "shape": shape, "op": op, "dtypes": dtypes}
+ {"testcase_name": "_op={}_{}_array={}".format(
+ op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
+ "shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[np.float32],
@@ -2814,10 +2822,14 @@ def testStack(self, shape, axis, dtypes):
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
- for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]))
- def testHVDStack(self, shape, op, dtypes):
+ for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
+ for array_input in [True, False]))
+ def testHVDStack(self, shape, op, dtypes, array_input):
rng = jtu.rand_default(self.rng())
- args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
+ if array_input:
+ args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
+ else:
+ args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(getattr(np, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
| hstack and vstack produce very inefficient jaxpr and jit slowly; possible fix with reshape?
hstack is very inefficient for tensors as it produces jaxpr code with length proportional to size of the traced array.
Compare:
```
{ lambda ; a b c.
let d = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] a
e = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] b
f = concatenate[ dimension=0 ] d e
g = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] b
h = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] c
i = concatenate[ dimension=0 ] g h
j = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4, 5)
shape=(1, 2, 2, 2, 3, 3) ] f
k = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4, 5)
shape=(1, 2, 2, 2, 3, 3) ] i
l = concatenate[ dimension=0 ] j k
m = slice[ limit_indices=(1, 2, 2, 2, 3, 3)
start_indices=(0, 0, 0, 0, 0, 0)
strides=(1, 1, 1, 1, 1, 1) ] l
n = squeeze[ dimensions=(0,) ] m
o = slice[ limit_indices=(2, 2, 2, 2, 3, 3)
start_indices=(1, 0, 0, 0, 0, 0)
strides=(1, 1, 1, 1, 1, 1) ] l
p = squeeze[ dimensions=(0,) ] o
q = concatenate[ dimension=1 ] n p
r = slice[ limit_indices=(1, 4, 2, 3, 3)
start_indices=(0, 0, 0, 0, 0)
strides=(1, 1, 1, 1, 1) ] q
s = squeeze[ dimensions=(0,) ] r
t = slice[ limit_indices=(2, 4, 2, 3, 3)
start_indices=(1, 0, 0, 0, 0)
strides=(1, 1, 1, 1, 1) ] q
u = squeeze[ dimensions=(0,) ] t
v = concatenate[ dimension=1 ] s u
w = slice[ limit_indices=(1, 4, 3, 3)
start_indices=(0, 0, 0, 0)
strides=(1, 1, 1, 1) ] v
x = squeeze[ dimensions=(0,) ] w
y = slice[ limit_indices=(2, 4, 3, 3)
start_indices=(1, 0, 0, 0)
strides=(1, 1, 1, 1) ] v
z = squeeze[ dimensions=(0,) ] y
ba = slice[ limit_indices=(3, 4, 3, 3)
start_indices=(2, 0, 0, 0)
strides=(1, 1, 1, 1) ] v
bb = squeeze[ dimensions=(0,) ] ba
bc = slice[ limit_indices=(4, 4, 3, 3)
start_indices=(3, 0, 0, 0)
strides=(1, 1, 1, 1) ] v
bd = squeeze[ dimensions=(0,) ] bc
be = concatenate[ dimension=1 ] x z bb bd
bf = slice[ limit_indices=(1, 12, 3)
start_indices=(0, 0, 0)
strides=(1, 1, 1) ] be
bg = squeeze[ dimensions=(0,) ] bf
bh = slice[ limit_indices=(2, 12, 3)
start_indices=(1, 0, 0)
strides=(1, 1, 1) ] be
bi = squeeze[ dimensions=(0,) ] bh
bj = slice[ limit_indices=(3, 12, 3)
start_indices=(2, 0, 0)
strides=(1, 1, 1) ] be
bk = squeeze[ dimensions=(0,) ] bj
bl = slice[ limit_indices=(4, 12, 3)
start_indices=(3, 0, 0)
strides=(1, 1, 1) ] be
bm = squeeze[ dimensions=(0,) ] bl
bn = concatenate[ dimension=1 ] bg bi bk bm
in (bn,) }
```
to a better, equivalent code that can be achieved using jnp.reshape
```
{ lambda ; a b c.
let d = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] a
e = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] b
f = concatenate[ dimension=0 ] d e
g = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] b
h = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4)
shape=(1, 2, 2, 3, 3) ] c
i = concatenate[ dimension=0 ] g h
j = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4, 5)
shape=(1, 2, 2, 2, 3, 3) ] f
k = broadcast_in_dim[ broadcast_dimensions=(1, 2, 3, 4, 5)
shape=(1, 2, 2, 2, 3, 3) ] i
l = concatenate[ dimension=0 ] j k
m = reshape[ dimensions=(0, 2, 4, 1, 3, 5)
new_sizes=(12, 12) ] l
in (m,) }
```
----
Probably `hstack` can be re-expressed in terms of reshape in general. I'm new to `jax` so maybe there are some negative side effects to such approach?
----
Code to reproduce issue:
```python
import jax
import jax.numpy as jnp
n = 2
mAA = 1.0*jnp.arange(3*n*3*n).reshape((n,n,3,3))
mBB = 10.0*jnp.arange(3*n*3*n).reshape((n,n,3,3))
mAB = 2.0*jnp.arange(3*n*3*n).reshape((n,n,3,3))
def stack_hard(AA,AB,BB):
return jnp.hstack(
jnp.hstack(
jnp.hstack(
jnp.hstack(
jnp.array(
[[AA,AB],[AB,BB]]
)
)
)
)
)
def stack_easy(AA,AB,BB):
return jax.lax.reshape(
jnp.array([[AA,AB],[AB,BB]]),
(6*n,6*n),
dimensions = (0,2,4,1,3,5)
)
# JIT is very slow in case of larger n
# fast_stack = jax.jit(stack_hard)
# fast_stack(mAA,mBB,mAB)
print('===========================')
print(
jax.make_jaxpr(stack_hard)(mAA,mAB,mBB)
)
print('===========================')
print(
jax.make_jaxpr(stack_easy)(mAA,mAB,mBB)
)
print(stack_easy(mAA,mAB,mBB))
print(stack_hard(mAA,mAB,mBB))
```
| Hi - the issue here is that the call signature of `hstack` is that it accepts a single argument, which is a tuple of arrays.
A tuple is a Python concept, not an XLA concept, so when you pass an array to something that expects a tuple, it must be converted into `N` array objects that are then passed back to XLA.
I'm not sure what we could do to "fix" this β maybe we could raise an error in the case that a single array is passed to `hstack`, to prevent this sort of silent conversion back to a numpy tuple, and require users to pass `tuple(arr)` explicitly. It would be less convenient, but it would make more apparent the computational cost implicit in the function's signature.
What do you think?
Also, I don't think it's generally true that `hstack` of a single array can be expressed in terms of a reshape. Here's a counter-example:
```python
>>> import jax.numpy as jnp
>>> x = jnp.arange(12).reshape(3, 2, 2)
>>> jnp.hstack(x)
DeviceArray([[ 0, 1, 4, 5, 8, 9],
[ 2, 3, 6, 7, 10, 11]], dtype=int32)
```
I'm not sure if there's any alternative here other than to split the array into three and pass them to ``lax.concat``, which is what `hstack` currently does.
Oh, I understand better why this happened. Perhaps we can improve just the case where only one `jnp` array is passed as argument.
I'm pretty sure in all cases `jnp.hstack` can be expressed with `jax.lax.reshape` (note: *not* `jnp.reshape`) due to it's cool feature of optional arg `dimensions`.
In case of your example it would be:
``` python
>>> import jax numpy as jnp
>>> import jax
>>> x = jnp.arange(12).reshape(3, 2, 2)
>>> jax.lax.reshape(x,(2,6),dimensions=(1,0,2)) - jnp.hstack(x)
DeviceArray([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]], dtype=int32)
```
Nice, I didn't know about that! If it's not too complicated, I think special-casing the single array case in terms of these kinds of reshapes would be worthwhile - maybe it could all be handled at the level of `jnp.concatenate`
> Hi - the issue here is that the call signature of `hstack` is that it accepts a single argument, which is a tuple of arrays.
>
> A tuple is a Python concept, not an XLA concept, so when you pass an array to something that expects a tuple, it must be converted into `N` array objects that are then passed back to XLA.
>
> I'm not sure what we could do to "fix" this β maybe we could raise an error in the case that a single array is passed to `hstack`, to prevent this sort of silent conversion back to a numpy tuple, and require users to pass `tuple(arr)` explicitly. It would be less convenient, but it would make more apparent the computational cost implicit in the function's signature.
>
> What do you think?
Is there a way to see which operations in a program might be undergoing such a forced conversion from XLA to numpy back to XLA? I am currently suffering a significant slow down in a program I have written and I am wondering if it due to similar issues going on in the background.
The main source of this kind of thing is calling `__iter__` on a `DeviceArray`.
> The main source of this kind of thing is calling `__iter__` on a `DeviceArray`.
How can I find out if this is the case?
Alternatively, I think it might make sense to just raise an error when a single array is passed into `hstack`/`vstack`/`stack`/`concatenate`. We would force the user to write something more explicit like `jnp.vstack(list(array))`, which hopefully has more obvious performance implications.
Thatβs two votes for erroring on implicit tuple conversion - maybe thatβs a cleaner route
> Thatβs two votes for erroring on implicit tuple conversion - maybe thatβs a cleaner route
Oh, I see you did suggest that already π
Erroring implicit tuple conversion :+1:
As for `hstack` and `vstack` I'd still prefer if there was a special case with reshape, especially since the workaround is not immediately obvious.
I think I have working code for this (up to edge cases such as passing a scalar etc.)
``` python
import jax.numpy as jnp
import jax
import numpy as np
def hstack_alternative(array):
shp = array.shape
return jax.lax.reshape(
array,
(shp[1], shp[0] * shp[2]) + shp[3:],
dimensions=((1, 0) + tuple(range(2, len(shp)))),
)
def vstack_alternative(array):
shp = array.shape
return jax.lax.reshape(array, (shp[0] * shp[1],) + shp[2:])
x = jnp.arange(12).reshape(3, 2, 2)
y = jnp.arange(2 * 3 * 5).reshape(2, 3, 5)
z = jnp.arange(2 * 3 * 5 * 7).reshape(2, 3, 5, 7)
def test_hstack_alternative():
assert np.allclose(jnp.hstack(x), hstack_alternative(x))
assert np.allclose(jnp.hstack(y), hstack_alternative(y))
assert np.allclose(jnp.hstack(z), hstack_alternative(z))
def test_vstack_alternative():
assert np.allclose(jnp.vstack(x), vstack_alternative(x))
assert np.allclose(jnp.vstack(y), vstack_alternative(y))
assert np.allclose(jnp.vstack(z), vstack_alternative(z))
```
That's very cool β it would be worth adding this code-path to `vstack` and `hstack` in my opinion.
Are you interested in putting together a PR?
I was playing with this a bit - here's the implementation of `jnp.concatenate` for array inputs in terms of `lax.reshape`:
```python
import jax.numpy as jnp
from jax import lax
from jax._src.util import canonicalize_axis
def _concatenate(x, axis=0):
assert isinstance(x, jnp.ndarray)
if x.ndim == 0:
raise ValueError("Need at least one array to concatenate.")
if x.ndim == 1:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
axis = canonicalize_axis(axis, x.ndim - 1)
shape = x.shape[1:axis + 1] + (x.shape[0] * x.shape[axis + 1],) + x.shape[axis + 2:]
dimensions = [*range(1, axis + 1), 0, *range(axis + 1, x.ndim)]
return lax.reshape(x, shape, dimensions)
```
Quickly tested with:
```python
import numpy as np
x = jnp.arange(2*3*4*5*4*3*2).reshape(2, 3, 4, 5, 4, 3, 2)
for axis in range(1 - x.ndim, x.ndim - 1):
c1 = jnp.concatenate(x, axis=axis)
c2 = _concatenate(x, axis=axis)
np.testing.assert_array_equal(c1, c2)
```
The implementation is simple enough that I think we should add it to JAX, along with similar approaches for `hstack`, `vstack`, and perhaps other related functions.
> That's very cool β it would be worth adding this code-path to `vstack` and `hstack` in my opinion.
>
> Are you interested in putting together a PR?
Sure :)
I'm new here so if you could help with the github side of things I'll be thankful.
Great! We have a bit of contribution information here: https://jax.readthedocs.io/en/latest/contributing.html#contributing-code-using-pull-requests
Feel free to open a work-in-progress PR if that would be helpful, and let me know if you have any questions
Hi @RadostW - just checking in. Is this still something you'd like to work on? If not, I can plan to put together the fix.
Either way, please let me know - thanks! | 2021-06-11T17:37:22 |
google/jax | 7,012 | google__jax-7012 | [
"7011"
] | 1d0c461ea67db683206d2643a5b377f2f8ff2e84 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -304,14 +304,15 @@ def _random_bits(key, bit_width, shape):
max_count = int(np.ceil(bit_width * size / 32))
nblocks, rem = divmod(max_count, jnp.iinfo(np.uint32).max)
+
if not nblocks:
bits = threefry_2x32(key, lax.iota(np.uint32, rem))
else:
- *subkeys, last_key = split(key, nblocks + 1)
- blocks = [threefry_2x32(k, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))
- for k in subkeys]
+ keys = split(key, nblocks + 1)
+ subkeys, last_key = keys[:-1], keys[-1]
+ blocks = vmap(threefry_2x32, in_axes=(0, None))(subkeys, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))
last = threefry_2x32(last_key, lax.iota(np.uint32, rem))
- bits = lax.concatenate(blocks + [last], 0)
+ bits = lax.concatenate([blocks.ravel(), last], 0)
dtype = _UINT_DTYPES[bit_width]
if bit_width == 64:
| eval_shape appears to hang when tracing a large random sample
Calling
```python
import jax
from jax import random
jax.eval_shape(lambda: random.normal(key=random.PRNGKey(0), shape=[int(1e11)]))
```
returns `ShapeDtypeStruct(shape=(100000000000,), dtype=float32)`. I'd expect that calling
```python
jax.eval_shape(lambda: random.normal(key=random.PRNGKey(0), shape=[int(1e11), int(1e11)]))
```
would similarly return `ShapeDtypeStruct(shape=(100000000000,100000000000), dtype=float32)` (of course, if we *actually* ran the function we'd get an OOM error).
But instead, this call seems to end up in an ~infinite loop (at least, it hangs for > 2 minutes). Reproduction here:
https://colab.research.google.com/drive/14IZF3WUTaq976jaUC5tehKTPwt3YhNTe?usp=sharing
Is this expected?
| It looks like this comes from the implementation of `_random_bits`, specifically this part: https://github.com/google/jax/blob/1d0c461ea67db683206d2643a5b377f2f8ff2e84/jax/_src/random.py#L306-L312
If the number of generated numbers is larger than the largest uint32, the implementation stacks multiple calls to the underlying random number generator in a Python loop. Since these calls are required for abstract evaluation, they are generated even for `jax.eval_shape`. And if the size is large enough, the abstract evaluation graph will become quite large and slow to evaluate.
You can see this if you wish by calling `make_jaxpr()` for your input.
We could maybe think about fixing this (perhaps compute it via `vmap`?), but it does seem like a pretty rare corner case. In what context did you encounter the problem? | 2021-06-17T23:11:58 |
|
google/jax | 7,027 | google__jax-7027 | [
"7026"
] | 9450b8f8f96cb4d57b3f6337dcc18d3d104ecf6b | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -1216,7 +1216,7 @@ def transpose(operand: Array, permutation: Sequence[int]) -> Array:
operator.
"""
permutation = tuple(permutation)
- if permutation == tuple(range(len(permutation))):
+ if permutation == tuple(range(np.ndim(operand))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
| jax.numpy.transpose does not sufficiently validate inputs
For example:
```python
>>> import numpy as np
>>> import jax.numpy as jnp
>>> x = np.zeros((2, 3, 4))
# jnp.transpose is silent on invalid permutations
>>> jnp.transpose(x, (0, 1, 2, 3))
array([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]]])
# numpy transpose raises an error.
>>> np.transpose(x, (0, 1, 2, 3))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<__array_function__ internals>", line 5, in transpose
File "/Users/vanderplas/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.8/site-packages/numpy/core/fromnumeric.py", line 658, in transpose
return _wrapfunc(a, 'transpose', axes)
File "/Users/vanderplas/.local/share/virtualenvs/jax-LBbfM5ix/lib/python3.8/site-packages/numpy/core/fromnumeric.py", line 58, in _wrapfunc
return bound(*args, **kwds)
ValueError: axes don't match array
```
| 2021-06-18T23:57:41 |
||
google/jax | 7,100 | google__jax-7100 | [
"6098"
] | 597879752889277adeaf36c26816b532918fee53 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -2408,7 +2408,8 @@ def __init__(self, shape, dtype):
return tree_unflatten(out_tree(), out)
-def checkpoint(fun: Callable, concrete: bool = False) -> Callable:
+def checkpoint(fun: Callable, concrete: bool = False, prevent_cse: bool = True,
+ ) -> Callable:
"""Make ``fun`` recompute internal linearization points when differentiated.
The :func:`jax.checkpoint` decorator, aliased to ``jax.remat``, provides a
@@ -2446,6 +2447,14 @@ def checkpoint(fun: Callable, concrete: bool = False) -> Callable:
control flow is optional, and disabled by default, because in some
edge-case compositions with :func:`jax.jit` it can lead to some extra
computation.
+ prevent_cse: Optional, boolean indicating whether to prevent common
+ subexpression elimination (CSE) optimizations in the HLO generated from
+ differentiation. This CSE prevention has costs because it can foil other
+ optimizations, and because it can incur high overheads on some backends,
+ especially GPU. The default is True because otherwise, under a ``jit`` or
+ ``pmap``, CSE can defeat the purpose of this decorator. But in some
+ settings, like when used inside a ``scan``, this CSE prevention mechanism
+ is unnecessary, in which case ``prevent_cse`` can be set to False.
Returns:
A function (callable) with the same input/output behavior as ``fun`` but
@@ -2496,7 +2505,8 @@ def fun_remat(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
flat_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
out_flat = pe.remat_call(flat_fun, *args_flat, name=flat_fun.__name__,
- concrete=concrete)
+ concrete=concrete, prevent_cse=prevent_cse,
+ differentiated=False)
return tree_unflatten(out_tree(), out_flat)
return fun_remat
remat = checkpoint
diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py
--- a/jax/interpreters/partial_eval.py
+++ b/jax/interpreters/partial_eval.py
@@ -814,12 +814,12 @@ def _remat_partial_eval(trace, _, f, tracers, params):
# dce jaxpr outputs
new_jaxpr = _dce_jaxpr(closed_jaxpr, out_unknowns, drop_outputs=True).jaxpr
- new_params = dict(params, call_jaxpr=new_jaxpr)
+ new_params = dict(params, call_jaxpr=new_jaxpr, differentiated=True)
# set up eqn for unknown outputs
in_tracers = (*const_tracers, *env_tracers, *instantiated_tracers)
- eqn = new_eqn_recipe(in_tracers, unknown_output_tracers, remat_call_p, new_params,
- source_info_util.current())
+ eqn = new_eqn_recipe(in_tracers, unknown_output_tracers, remat_call_p,
+ new_params, source_info_util.current())
for t in unknown_output_tracers: t.recipe = eqn
return _zip_knowns(known_output_tracers, unknown_output_tracers, out_unknowns)
call_partial_eval_rules[remat_call_p] = _remat_partial_eval
diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py
--- a/jax/interpreters/xla.py
+++ b/jax/interpreters/xla.py
@@ -1464,14 +1464,18 @@ def _remat_using_while(
def _remat_translation_rule(c, axis_env, in_nodes,
name_stack, backend, name, call_jaxpr,
- device=None, concrete=None):
+ prevent_cse, differentiated, concrete, device=None):
del device, concrete # Unused.
- if backend == "gpu":
- return _remat_using_while(
- c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)
+ if differentiated and prevent_cse:
+ if backend == "gpu":
+ return _remat_using_while(
+ c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)
+ else:
+ return _remat_using_cond(
+ c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)
else:
- return _remat_using_cond(
- c, axis_env, in_nodes, name_stack, backend, name, call_jaxpr)
+ outs = jaxpr_subcomp(c, call_jaxpr, backend, axis_env, (), "", *in_nodes)
+ return xops.Tuple(c, outs)
call_translations[pe.remat_call_p] = _remat_translation_rule # type: ignore
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -3214,6 +3214,40 @@ def g():
with self.assertRaisesRegex(core.UnexpectedTracerError, "global state"):
api.jit(f)()
+ def test_no_cse_widget_on_primals(self):
+ @api.remat
+ def g(x):
+ return lax.sin(lax.sin(x)), 3.
+
+ def f(x):
+ x, _ = g(x)
+ return x
+
+ c = api.xla_computation(f)(2.)
+ self.assertNotIn('while', c.as_hlo_text())
+ self.assertNotIn('conditional', c.as_hlo_text())
+
+ c = api.xla_computation(grad(f))(2.)
+ text = c.as_hlo_text()
+ self.assertTrue('while' in text or 'conditional' in text)
+
+ def test_no_cse_widget_with_prevent_cse_false(self):
+ @partial(api.remat, prevent_cse=False)
+ def g(x):
+ return lax.sin(lax.sin(x)), 3.
+
+ def f(x):
+ x, _ = g(x)
+ return x
+
+ c = api.xla_computation(f)(2.)
+ self.assertNotIn('while', c.as_hlo_text())
+ self.assertNotIn('conditional', c.as_hlo_text())
+
+ c = api.xla_computation(grad(f))(2.)
+ self.assertNotIn('while', c.as_hlo_text())
+ self.assertNotIn('conditional', c.as_hlo_text())
+
class JaxprTest(jtu.JaxTestCase):
| remat_call results in unnecessary CSE foiling when used without partial_eval.
With remat, we want the forward pass to be lowered unmodified, and for the backwards to include a forwards + backwards. However, XLA does common subexpression elimination (CSE), which threatens this - so the `remat_call` primitive lowers with a CSE foiling gadget.
However, foiling CSE has a cost, e.g. missed fusions and other optimizations, so we don't want to do it unless necessary. So, at the moment, the partial-eval-of-remat machinery removes the `remat_call` from the forward pass jaxpr, but keeps it in the backwards pass.
Lost in this logic is that sometimes we just... don't partial eval the `remat_call`, so it sticks around into the final jaxpr and is lowered with CSE-foiling even though it's not needed.
```python
import functools
import jax
import jax.numpy as jnp
@jax.remat
def f(x):
return jnp.sin(x)
print('Without partial_eval.')
print(jax.make_jaxpr(f)(jnp.zeros(4)))
print('\nWith partial_eval.')
print(jax.make_jaxpr(functools.partial(jax.vjp, f))(jnp.zeros(4)))
```
```
Without partial_eval.
{ lambda ; a.
let b = remat_call[ call_jaxpr={ lambda ; a.
let b = sin a
in (b,) }
concrete=False
name=f ] a
in (b,) }
With partial_eval.
{ lambda ; a.
let b = sin a
in (b, a) }
```
The conceptual error here is that one primitive is taking care of two concepts at once:
- Indicating that something is meant to be rematerialized.
- Indicating that we should foil XLA CSE around a certain block.
One solution: separate this into two calls, one is `remat_call` and the other is `atomic_call`. `remat_call` is inert when lowering; `atomic_call` isn't. partial-evaling `remat_call` produces a n`atomic_call` in the backwards pass.
| As discussed with @mattjj | 2021-06-24T22:17:56 |
google/jax | 7,177 | google__jax-7177 | [
"7174"
] | c97d63dec33845a13c9243a8f821a7d093ff5cfa | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
# CPU-only jaxlib can be installed via:
# $ pip install jax[cpu]
- 'cpu': [f'jaxlib>={_minimum_jaxlib_version}'],
+ 'cpu': [f'jaxlib=={_current_jaxlib_version}'],
# Cloud TPU VM jaxlib can be installed via:
# $ pip install jax[tpu] -f https://storage.googleapis.com/jax-releases/jax_releases.html
| Constraints for jaxlib version during jax installation
In v0.2.11 jax added the nice optional installation targets `cpu`, `cudaXXX` and `tpu` which install jaxlib for the desired platform. Currently, the setup script guarantees that the installed version of jaxlib is compatible by constraining the version number from below to the earliest known compatible version. However, no similar constraint is put for a maximum compatible version number. Given that jaxlib does not appear to necessarily maintain backward compatibility between updates, this is an issue for installing older versions of jax (as these will always pull the latest available jaxlib version, leading to crashes due to incompatibility).
Jax installation should always result in a working setup, i.e., only the latest compatible version of jaxlib should be installed when installing jax. This either requires version constraints in jax's setup.py or backward compatibility guarantees in jaxlib (or a combination of both).
Please:
- [x] Check for duplicate requests.
- [x] Describe your goal, and if possible provide a code snippet with a motivating example.
| Thanks for the report β I believe we do pin jaxlib to the current version at the time of the given JAX release for GPU and TPU installation; see the source here: https://github.com/google/jax/blob/c97d63dec33845a13c9243a8f821a7d093ff5cfa/setup.py#L43-L60
That said, pip's behavior surrounding extra requirements and version specifications has surprised me before, so it may be that pip doesn't respect these version pinnings during actual installation, but I would consider that a pip bug.
A potential TODO is to pin the extra requirements for cpu to `jaxlib==_current_jaxlib_version` rather than `jaxlib>=_minimum_jaxlib_version`. Would that fix your issue?
Ah, I was apparently focusing only on the `cpu` target and didn't notice that for GPU and TPU this is indeed pinned. The todo you suggest would prevent the incompatibility problem I was worrying about, at the (small?) cost of also preventing users from benefitting from updates to jaxlib that would still be compatible to the version of jax they are installing. But I think this latter point is something that cannot be helped without some backward-compatibility policy reflected in version numbering for jaxlib/jax, so for now I'd settle for this pinning.
Sounds good - are you interested in sending a PR? If not I'm happy to take care of it.
I don't have the time right now, so please go ahead :) | 2021-07-02T16:51:56 |
|
google/jax | 7,188 | google__jax-7188 | [
"6445"
] | d349086ca5c676f5bcbb26b07b2c8e5946598431 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -2328,7 +2328,35 @@ def _device_get(x):
else:
return copy()
-def device_get(x):
+def device_get(x: Any):
+ """Transfer ``x`` to host.
+
+ Args:
+ x: An array, scalar, DeviceArray or (nested) standard Python container thereof
+ representing the array to be transferred to host.
+
+ Returns:
+ An array or (nested) Python container thereof representing the
+ value of ``x``.
+
+ Examples:
+ Passing a DeviceArray:
+
+ >>> import jax
+ >>> x = jax.numpy.array([1., 2., 3.])
+ >>> jax.device_get(x)
+ array([1., 2., 3.], dtype=float32)
+
+ Passing a scalar (has no effect):
+
+ >>> jax.device_get(1)
+ 1
+
+ See Also:
+ - device_put
+ - device_put_sharded
+ - device_put_replicated
+ """
for y in tree_leaves(x):
try:
y.copy_to_host_async()
| Document jax.device_get
The function `jax.device_get` is not documented. Preferably, it should appear on this page
https://jax.readthedocs.io/en/latest/jax.html
The code for the function also does not provide any doc-string.
https://github.com/google/jax/blob/26e9ebcdae5e610d1173fac1bc3504a9c3f2011f/jax/_src/api.py#L2258
| 2021-07-04T12:13:49 |
||
google/jax | 7,258 | google__jax-7258 | [
"7256"
] | 651ddb5aa265fc7b13ec504f3cdc493ab3fdf7e1 | diff --git a/jax/_src/scipy/stats/gamma.py b/jax/_src/scipy/stats/gamma.py
--- a/jax/_src/scipy/stats/gamma.py
+++ b/jax/_src/scipy/stats/gamma.py
@@ -18,7 +18,7 @@
from jax._src.numpy.util import _wraps
from jax._src.numpy.lax_numpy import (_promote_args_inexact, _constant_like,
where, inf)
-from jax.scipy.special import gammaln
+from jax.scipy.special import gammaln, xlogy
@_wraps(osp_stats.gamma.logpdf, update_doc=False)
@@ -26,7 +26,7 @@ def logpdf(x, a, loc=0, scale=1):
x, a, loc, scale = _promote_args_inexact("gamma.logpdf", x, a, loc, scale)
one = _constant_like(x, 1)
y = lax.div(lax.sub(x, loc), scale)
- log_linear_term = lax.sub(lax.mul(lax.sub(a, one), lax.log(y)), y)
+ log_linear_term = lax.sub(xlogy(lax.sub(a, one), y), y)
shape_terms = lax.add(gammaln(a), lax.log(scale))
log_probs = lax.sub(log_linear_term, shape_terms)
return where(lax.lt(x, loc), -inf, log_probs)
| diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py
--- a/tests/scipy_stats_test.py
+++ b/tests/scipy_stats_test.py
@@ -231,6 +231,11 @@ def args_maker():
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
+ def testGammaLogPdfZero(self):
+ # Regression test for https://github.com/google/jax/issues/7256
+ self.assertAllClose(
+ osp_stats.gamma.pdf(0.0, 1.0), lsp_stats.gamma.pdf(0.0, 1.0), atol=1E-6)
+
@genNamedParametersNArgs(3)
def testLaplaceLogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
| jax.scipy.stats.gamma.pdf(x, a=1.) handles x=0 incorrectly
```python
scipy.stats.gamma.pdf(0., 1.) # = exp(0.)
# 1.0
jax.scipy.stats.gamma.pdf(0., 1.)
# DeviceArray(nan, dtype=float32)
```
| It's probably a simple matter of not using `xlogy` in the implementation, I can look later. | 2021-07-12T21:37:02 |
google/jax | 7,267 | google__jax-7267 | [
"7186"
] | 208dd1ac3f5c4835bc3e3986427ec703e5699e73 | diff --git a/build/build.py b/build/build.py
--- a/build/build.py
+++ b/build/build.py
@@ -417,6 +417,12 @@ def main():
parser,
"enable_rocm",
help_str="Should we build with ROCm enabled?")
+ add_boolean_argument(
+ parser,
+ "enable_nccl",
+ default=True,
+ help_str="Should we build with NCCL enabled? Has non effect for non-CUDA "
+ "builds.")
parser.add_argument(
"--cuda_path",
default=None,
@@ -505,6 +511,7 @@ def main():
print("CUDA version: {}".format(args.cuda_version))
if args.cudnn_version:
print("CUDNN version: {}".format(args.cudnn_version))
+ print("NCCL enabled: {}".format("yes" if args.enable_nccl else "no"))
print("TPU enabled: {}".format("yes" if args.enable_tpu else "no"))
@@ -545,6 +552,8 @@ def main():
if args.enable_cuda:
config_args += ["--config=cuda"]
config_args += ["--define=xla_python_enable_gpu=true"]
+ if not args.enable_nccl:
+ config_args += ["--config=nonccl"]
if args.enable_tpu:
config_args += ["--define=with_tpu_support=true"]
if args.enable_rocm:
| Doesn't compile on Nvidia Jetson Nano
| There's no action we can take here without more information, given we don't have access to such a device. What happens when you try? Can you share the logs from the build?
> There's no action we can take here without more information, given we don't have access to such a device. What happens when you try? Can you share the logs from the build?
Actually, I got it working! Instructions in my message below.
Main problem with the Jetsons is that they're very slow on updating their Ubuntu version so they're still on 18.04 and so Python 3.6. By the way these are great devices (RPi on steroids basically) and I think they have a great future. The linked instructions above should also work on the higher end Jetsons such as Xavier AGX, Xavier NX, and TX1/2.
### INSTRUCTIONS FOR INSTALLING GOOGLE JAX ML ON 4GB JETSON NANO WITH SSD ###
SORTING OUT SWAP SPACE
Before you do any compiling of Jax, Itβs really important to have plenty of swap space, because the Jetson Nano's 4GB is not nearly enough. Even 5GB of swap space is not enough, because gcc got killed for out of memory. Iβve gone with 10GB. For this reason you will almost certainly have to be running your Jetson off a USB3.0 SSD rather than the SD-CARD (If you want to do this, JetsonHacks has a good tutorial and helper utilities: [Jetson Nano - Boot from USB - JetsonHacks](https://www.jetsonhacks.com/2021/03/10/jetson-nano-boot-from-usb/) HOWEVER, do NOT take the final bit of advice to remove the SD-CARD. With the latest jetpack this causes SSD boot to fail. You have to keep the SD card in at the same time as the SSD is plugged into USB).
Unfortunately, the Jetson is natively configured to use 2 GB of ZRAM swap, which is almost useless (other than a mild amount of compression). You need to get rid of ZRAM:
`sudo systemctl disable nvzramconfig`
Probably best to reboot the Nano after this step, just in case (I did). Then use htop to check that you have no swap. Finally, create a 10GB swap file using these instructions (make sure you replace `1G` with `10G` in the fallocate stage).
 [linuxize.com β 28 Nov 18](https://linuxize.com/post/how-to-add-swap-space-on-ubuntu-18-04/)

### [How to Add Swap Space on Ubuntu 18.04](https://linuxize.com/post/how-to-add-swap-space-on-ubuntu-18-04/)
Now that you have a 10GB swap file (check it with htop), thereβs one more thing I recommend: unless you have a 4-amp or greater power supply (most are 3A), move the Jetson Nano over to its low power 5 watt mode. Thatβs because the MAXN 10watt mode will cause compilation on all 4 cores at 100%, for many hours, and especially if you have a fan and and SSD, the power draw might be bigger than your power supply can handle. Certainly was the case for me where it crashed twice mid-compile. So use the Nvidia menu in the taskbar to turn it over to 5 watt mode. No more problems.
CUDA
Now ensure your CUDA PATH and LD_LIBRARY_PATH are set for CUDA support. Add these to your `.bashrc` :
```
export PATH=/usr/local/cuda/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=/usr/local/cuda/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
```
And re-source it with `source ~/.bashrc` .
FIXING PYTHON
Python 3.69 and the related stack on the Jetson will result in a build, but it wonβt work. You have to run a later version of Python, and I succeeded using Python 3.9. However note that you donβt want to replace the Jetsonβs system Python 3.69 as the default python3 as this will break a lot of stuff. Instead, weβll use a **virtualenv** . But before we do that, letβs install Python 3.9 but without making it the default Python:
```
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.9 python3.9-dev
```
This will install `python3.9` in your /usr/bin directory. However youβll notice that when you type `python3` you still get the system python 3.69 which is what we want. To use python 3.9 you have to explicitly run `python3.9` . However weβll need a bunch of other things for python 3.9 and for that weβll use a virtual env. To install virtual env we must first install pip3:
```
sudo apt install python3-pip
```
Now:
```
sudo pip3 install virtualenv
```
Remember to sudo here because we need it for all users.
Next we create a python3.9 virtualenv, here Iβll create a βpy39β:
```
virtualenv -p /usr/bin/python3.9 py39
```
Activate it:
```
source ./py39/bin/activate
```
This is now virgin territory with no numpy scipy or anything (even though theyβre installed on the system python). So:
```
python3 -m pip install numpy scipy six wheel
```
BUILD
Now youβre ready to install `jaxlib` from source. Apt install necessary prerequisites:
```
sudo apt install g++
```
Clone the jax repository and cd into it:
```
git clone https://github.com/google/jax
cd jax
```
Youβre finally ready to compile:
```
python3 build/build.py --enable_cuda
```
This step will take 12 hours. Itβs a humongous codebase. Your Jetsonβs swap usage will go above 5GB at times.
Once compiled:
`pip3 install dist/*.whl `
β¦ but all this was just to get jaxlib up and running, not jax itself. Thereβs one last step:
```
pip3 install -e .
```
Done.
ADDENDUM:
Couple of things:
a) The first time you do anything with Jax on the Nano, (such as using `PRNGKey` function) it'll take a _really_ long time for it to respond. You might think it's crashed. It hasn't. It's doing some weird warmup in the background but once that's done, it'll work fine.
b) I highly recommend running LXDE and not Unity as this will free up a full gigabyte of RAM. Remember the Jetsons use system ram for the GPU as well (https://www.jetsonhacks.com/2020/11/07/save-1gb-of-memory-use-lxde-on-your-jetson/)
Awesome!
Let us know if there's something we could do to improve our documentation.
I think the things we could improve here might be: (a) to support cross-compilation so you can build jaxlib on some other beefier machine but target it for the Jetson device, and (b) look into the long delay on first computation. (a) might work right now β we've never tried. For (b), if you had a Python profile of running a script that runs a single op that might tell us something. It's not obvious to me what would take a lot of time but only on the first execution.
> It's not obvious to me what would take a lot of time but only on the first execution.
In other frameworks this is PTX jit-compilation for kernels that weren't compiled for the right GPU architecture, but JAX uses its own JIT and doesn't ship many precompiled kernels at all. Maybe NCCL (which contains the vast majority of CUDA code that JAX compiles from source) was built for the wrong set of compute capabilities?
> Awesome!
>
> Let us know if there's something we could do to improve our documentation.
>
> I think the things we could improve here might be: (a) to support cross-compilation so you can build jaxlib on some other beefier machine but target it for the Jetson device, and (b) look into the long delay on first computation. (a) might work right now β we've never tried. For (b), if you had a Python profile of running a script that runs a single op that might tell us something. It's not obvious to me what would take a lot of time but only on the first execution.
Yeah I thought the documentation was pretty good actually and the bazel-based build worked really smoothly. Certainly not always the case when building other libraries on Nvidia's Jetson family because they are "quirky" in some respects. Yes not sure why the warmup takes so long and I haven't played around enough with it in the last few days but as soon as I get more insight I'll post it here, with a profile dump. I can confirm though that Jax is working well as I'm getting a 5x speedup on big dot products versus the native, NEON-optimized numpy. 5x is great if not huge, but it certainly proves it's working and means people can play with CUDA-enabled JAX for 99 bucks (though of course, colab is even cheaper!). | 2021-07-13T13:15:17 |
|
google/jax | 7,281 | google__jax-7281 | [
"5201"
] | 79c8259e91536542cb142468561757d8e3ad82dc | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -1320,16 +1320,17 @@ def batched_fun(*args, **kwargs):
return batched_fun
def _mapped_axis_size(tree, vals, dims, name, *, kws=False):
- def _get_axis_size(name: str, i:int, shape: Tuple[int, ...], axis: int):
+ def _get_axis_size(name: str, shape: Tuple[int, ...], axis: int):
try:
return shape[axis]
except (IndexError, TypeError) as e:
- ranks = tree_unflatten(tree, [np.ndim(x) for x, d in zip(vals, dims)])
- raise ValueError(f"{name} got arg {i} of rank {len(shape)} but axis to be "
- f"mapped {axis}. The tree of ranks is:\n{ranks}") from e
+ min_rank = axis + 1 if axis >= 0 else -axis
+ raise ValueError(f"{name} was requested to map its argument along axis {axis}, "
+ f"which implies that its rank should be at least {min_rank}, "
+ f"but is only {len(shape)} (its shape is {shape})") from e
- mapped_axis_sizes = {_get_axis_size(name, i, np.shape(x), d)
- for i, (x, d) in enumerate(zip(vals, dims))
+ mapped_axis_sizes = {_get_axis_size(name, np.shape(x), d)
+ for x, d in zip(vals, dims)
if d is not None}
try:
size, = mapped_axis_sizes
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -1968,8 +1968,10 @@ def h(a, b):
r"\(10, \[2, 2\]\)"):
api.vmap(h, in_axes=(0, 1))(X, [U, U])
- with self.assertRaisesRegex(
- ValueError, "vmap got arg 0 of rank 0 but axis to be mapped 0"):
+ error = (r"vmap was requested to map its argument along axis 0, which "
+ r"implies that its rank should be at least 1, but is only 0 "
+ r"\(its shape is \(\)\)")
+ with self.assertRaisesRegex(ValueError, error):
# The mapped inputs cannot be scalars
api.vmap(lambda x: x)(1.)
@@ -1978,8 +1980,10 @@ def h(a, b):
# If the output is mapped, there must be a non-None in_axes
api.vmap(lambda x: x, in_axes=None)(jnp.array([1., 2.]))
- with self.assertRaisesRegex(
- ValueError, "vmap got arg 0 of rank 1 but axis to be mapped 1"):
+ error = (r"vmap was requested to map its argument along axis 1, which "
+ r"implies that its rank should be at least 2, but is only 1 "
+ r"\(its shape is \(2,\)\)")
+ with self.assertRaisesRegex(ValueError, error):
api.vmap(lambda x: x, in_axes=1)(jnp.array([1., 2.]))
# Error is: TypeError: only integer scalar arrays can be converted to a scalar index
diff --git a/tests/batching_test.py b/tests/batching_test.py
--- a/tests/batching_test.py
+++ b/tests/batching_test.py
@@ -1059,7 +1059,11 @@ def testNegativeAxes(self):
self.assertAllClose(jax.vmap(jnp.sum, in_axes=-1)(x),
jnp.sum(x, axis=(0, 1)))
- with self.assertRaisesRegex(ValueError, "vmap got arg 0 of rank 3 but axis to be mapped -4"):
+
+ error = (r"vmap was requested to map its argument along axis -4, which "
+ r"implies that its rank should be at least 4, but is only 3 "
+ r"\(its shape is \(3, 4, 5\)\)")
+ with self.assertRaisesRegex(ValueError, error):
jax.vmap(jnp.sum, in_axes=-4)(x)
id = lambda y: y
| Unclear error message phrasing in pmap (and possibly vmap and soft_pmap)
_mapped_axis_size will give the an error like the following if you attempt to inappropriately pmap a computation: "ValueError: pmap got arg 0 of rank 0 but axis to be mapped 0. The tree of ranks is:..." It is difficult to understand what the next steps are for debugging from this from the way it is phrased.
| I agree with @cleichner. It is very hard to understand what it means and what is to be done in case this error appears. | 2021-07-14T11:40:28 |
google/jax | 7,325 | google__jax-7325 | [
"7287"
] | 2ba686ca194e7606f16a4722810b66c903971249 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1103,7 +1103,8 @@ def _sinc_maclaurin_jvp(k, primals, tangents):
return _sinc_maclaurin(k, x), _sinc_maclaurin(k + 1, x) * t
_ARRAY_VIEW_DOC = """
-The JAX version of this function will return a copy rather than a view of the input.
+The JAX version of this function may in some cases return a copy rather than a
+view of the input.
"""
@_wraps(np.transpose, lax_description=_ARRAY_VIEW_DOC)
| ravel, flatten don't return copies of 1D arrays
The docstring for ravel states:
```
Return a contiguous flattened array.
LAX-backend implementation of :func:`ravel`.
The JAX version of this function will return a copy rather than a view of the input.
```
But this isn't true if the array is already flattened:
```python
import jax.numpy as jnp
x = jnp.array([1, 2, 3])
x.ravel().unsafe_buffer_pointer() == x.unsafe_buffer_pointer() #False
```
Why does this matter? It probably doesn't, but the behavior doesn't match the documentation.
| 2021-07-19T13:50:16 |
||
google/jax | 7,345 | google__jax-7345 | [
"5636"
] | b6e25fa00c31c12cc4d302e80a44baf6fee3327d | diff --git a/jax/core.py b/jax/core.py
--- a/jax/core.py
+++ b/jax/core.py
@@ -18,6 +18,7 @@
from contextlib import contextmanager
from collections import namedtuple
from functools import total_ordering
+import gc
import itertools as it
from weakref import ref
import threading
@@ -736,6 +737,17 @@ def reset_trace_state() -> bool:
def cur_sublevel() -> Sublevel:
return thread_local_state.trace_state.substack[-1]
+def maybe_find_leaked_tracers(x: Optional[Union[MainTrace, Sublevel]]):
+ """Find the leaked tracers holding a reference to the MainTrace or SubLevel.
+
+ It's possible there's none! eg. there's some cases where JAX itself holds a
+ reference to `x` inside of a lambda closure, and no tracers were leaked
+ by the user. In this case an empty list is returned.
+ """
+ traces = list(filter(lambda x: isinstance(x, Trace), gc.get_referrers(x)))
+ tracers = list(filter(lambda x: isinstance(x, Tracer), gc.get_referrers(*traces)))
+ return tracers
+
@contextmanager
def new_main(trace_type: Type[Trace],
dynamic: bool = False,
@@ -761,7 +773,9 @@ def new_main(trace_type: Type[Trace],
t = ref(main)
del main
if t() is not None:
- raise Exception(f'Leaked trace {t()}')
+ leaked_tracers = maybe_find_leaked_tracers(t())
+ if leaked_tracers:
+ raise Exception(f'Leaked level {t()}. Leaked tracer(s): {leaked_tracers}.')
@contextmanager
def new_base_main(trace_type: Type[Trace]) -> Generator[MainTrace, None, None]:
@@ -782,7 +796,9 @@ def new_base_main(trace_type: Type[Trace]) -> Generator[MainTrace, None, None]:
t = ref(main)
del main
if t() is not None:
- raise Exception('Leaked trace {}'.format(t()))
+ leaked_tracers = maybe_find_leaked_tracers(t())
+ if leaked_tracers:
+ raise Exception(f'Leaked level {t()}. Leaked tracer(s): {leaked_tracers}.')
@contextmanager
def eval_context():
@@ -802,7 +818,9 @@ def new_sublevel() -> Generator[None, None, None]:
t = ref(sublevel)
del sublevel
if t() is not None:
- raise Exception(f'Leaked sublevel {t()}.')
+ leaked_tracers = maybe_find_leaked_tracers(t())
+ if leaked_tracers:
+ raise Exception(f'Leaked sublevel {t()}. Leaked tracer(s): {leaked_tracers}.')
def full_lower(val):
if isinstance(val, Tracer):
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -2596,6 +2596,23 @@ def g(x):
with self.assertRaisesRegex(Exception, r"Leaked sublevel"):
f(3)
+ def test_leak_checker_avoids_false_positive_custom_jvp(self):
+ # see https://github.com/google/jax/issues/5636
+ with jax.checking_leaks():
+ @api.custom_jvp
+ def t(y):
+ return y
+
+ def t_jvp(p, t):
+ pass
+
+ t.defjvp(t_jvp)
+
+ @jit
+ def s(y):
+ return t(y)
+ s(3) # doesn't crash
+
def test_default_backend(self):
first_local_device = api.local_devices()[0]
self.assertEqual(first_local_device.platform, api.default_backend())
| Spurious leak checker error with custom JVP
I'd love to be able to use the (great-looking) new leaked tracer tools, but I'm running into a problem. After I set
```
export JAX_CHECK_TRACER_LEAKS=1
```
and then run:
```python
import jax
@jax.custom_jvp
def t(y):
return y
def t_jvp(primals, tangents):
assert False
t.defjvp(t_jvp)
@jax.jit
def s(y):
return t(y)
s(0.0)
```
I unfortunately get
```
Exception: Leaked trace MainTrace(0,DynamicJaxprTrace)
```
There's possibly a deeper bug here because I'm getting leaked tracers in my code wtihout the flag, so I'm hoping this is a spurious error.
| Thanks for this repro! The leak checker definitely tends towards false positives right now, so it's a good a priori guess.
Tricky! There are at least two issues, both pertaining to [the same line](https://github.com/google/jax/blob/0f7cc80d38822c8753e076ff6d12022d969c2dbb/jax/interpreters/partial_eval.py#L1131-L1132):
1. That `_memoize` decorator is acting like a cache, and it's a cache we forgot to disable along with the others in #5492 when the leak checker is active. It may be a simple matter of disabling that memoization; I don't think it's necessary for correctness, though I have paged this out and could be wrong about that. All the tests pass, at least!
2. That [`lambda`](https://github.com/google/jax/blob/0f7cc80d38822c8753e076ff6d12022d969c2dbb/jax/interpreters/partial_eval.py#L1132) being created on that line keeps a reference to the MainTrace instance, and it's not being cleaned up fast enough, so the leak checker spots it.
I think both may only arise in the case where the jvp rule raises an exception.
The fix to (1) is easy, as with the other caches:
```python
def _memoize(thunk):
if core.debug_state.check_leaks:
return thunk
cell = []
saved_state = core.thread_local_state.trace_state.copy()
def memoized():
nonlocal thunk
if not cell:
prev_state = core.thread_local_state.trace_state
core.thread_local_state.trace_state = saved_state
try:
cell.append(thunk())
finally:
del thunk
core.thread_local_state.trace_state = prev_state
return cell[0]
return memoized
```
The fix to (2) I'm not sure about yet...
(I can't work on this more at the moment but wanted to spend a few mins on an initial investigation!)
Thanks for looking into this! Just so you know, I did find the problem (I was jusing `jax.jit` instead of `hk.jit`), so this isn't holding me up anymore. Hope the MWE is useful to you though :)
Woo, that's great to hear! Sounds like a common issue as well. We're working with the Haiku folks to improve things.
cc @LenaMartens just to bring this leak checker thread to her attention
@mattjj That's great to hear! Just to keep you informed, there's a beautiful solution to my problem by shoyer here https://github.com/google/jax/pull/4117. I'm just now trying to code up the Haiku side of things to get it to work locally. | 2021-07-21T14:51:14 |
google/jax | 7,369 | google__jax-7369 | [
"6415"
] | d1e1d65631adb9b197b0fcce3f34179d66df6473 | diff --git a/jax/_src/custom_derivatives.py b/jax/_src/custom_derivatives.py
--- a/jax/_src/custom_derivatives.py
+++ b/jax/_src/custom_derivatives.py
@@ -19,7 +19,6 @@
from typing import Callable, Generic, Optional, Sequence, Tuple, TypeVar, Any
from jax import core
-from jax._src import dtypes
from jax import linear_util as lu
from jax.tree_util import (tree_flatten, tree_unflatten, tree_map,
tree_multimap, treedef_is_leaf, treedef_tuple,
@@ -831,18 +830,21 @@ def rev(objective_fn, res, g):
else:
return _closure_convert_for_avals(fun, in_tree, in_avals)
+def _is_perturbed(x: Any) -> bool:
+ if isinstance(x, ad.JVPTracer):
+ return True
+ elif isinstance(x, core.Tracer):
+ return any(_is_perturbed(attr) for name, attr in x._contents())
+ else:
+ return False
+
@cache()
def _closure_convert_for_avals(fun, in_tree, in_avals):
wrapped_fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)
jaxpr, out_pvals, consts = pe.trace_to_jaxpr_dynamic(wrapped_fun, in_avals)
out_tree = out_tree()
- # We only want to closure convert for constants with respect to which we're
- # differentiating. As a proxy for that, we hoist consts with float dtype.
- # TODO(frostig,mattjj): revise this approach
- from jax.numpy import inexact
- is_float = lambda c: dtypes.issubdtype(dtypes.dtype(c), inexact)
- (closure_consts, hoisted_consts), merge = partition_list(is_float, consts)
+ (closure_consts, hoisted_consts), merge = partition_list(_is_perturbed, consts)
num_consts = len(hoisted_consts)
def converted_fun(*args_hconsts):
diff --git a/jax/core.py b/jax/core.py
--- a/jax/core.py
+++ b/jax/core.py
@@ -568,7 +568,7 @@ def __getattr__(self, name):
def __repr__(self):
base = pp('Traced<{}>with<{}>'.format(self.aval, self._trace))
- contents = self._contents()
+ contents = [(name, pp(repr(attr))) for name, attr in self._contents()]
if contents:
base += pp(' with ') >> vcat(pp('{} = '.format(name)) >> pp_payload
for name, pp_payload in contents)
@@ -576,7 +576,7 @@ def __repr__(self):
def _contents(self):
try:
- return [(name, pp(repr(getattr(self, name)))) for name in self.__slots__]
+ return [(name, getattr(self, name)) for name in self.__slots__]
except AttributeError:
return ()
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -5211,40 +5211,82 @@ def f(x):
self.assertAllClose(api.grad(api.grad(f))(3.), 1., check_dtypes=False)
def test_closure_convert(self):
- def minimize(objective_fn, x0):
- converted_fn, aux_args = api.closure_convert(objective_fn, x0)
- return _minimize(converted_fn, x0, *aux_args)
+ def cos_after(fn, x):
+ converted_fn, aux_args = api.closure_convert(fn, x)
+ self.assertLessEqual(len(aux_args), 1)
+ return _cos_after(converted_fn, x, *aux_args)
@partial(api.custom_vjp, nondiff_argnums=(0,))
- def _minimize(objective_fn, x0, *args):
- _ = objective_fn(x0, *args)
- return jnp.cos(x0)
+ def _cos_after(fn, x, *args):
+ return jnp.cos(fn(x, *args))
- def fwd(objective_fn, x0, *args):
- y = _minimize(objective_fn, x0, *args)
- return y, (y, args)
+ def fwd(fn, x, *args):
+ y = _cos_after(fn, x, *args)
+ return y, (x, args)
- def rev(objective_fn, res, g):
- y, args = res
- x0_bar = 17. * y
+ def rev(fn, res, g):
+ x, args = res
+ x_bar = 17. * x
args_bars = [42. * a for a in args]
- return (x0_bar, *args_bars)
+ return (x_bar, *args_bars)
- _minimize.defvjp(fwd, rev)
+ _cos_after.defvjp(fwd, rev)
- def obj(c, x):
+ def dist(c, x):
return jnp.sum((x - c) ** 2.)
def solve(c, x):
def closure(x):
- return obj(c, x)
- return jnp.sum(minimize(closure, x))
+ return dist(c, x)
+ return cos_after(closure, x)
- c, x = jnp.ones(2), jnp.zeros(2)
- self.assertAllClose(solve(c, x), 2.0, check_dtypes=False)
+ c, x = 2. * jnp.ones(2), jnp.ones(2)
+ expected = jnp.cos(dist(c, x))
+ self.assertAllClose(solve(c, x), expected, check_dtypes=False)
g_c, g_x = api.grad(solve, argnums=(0, 1))(c, x)
- self.assertAllClose(g_c, 42. * jnp.ones(2), check_dtypes=False)
- self.assertAllClose(g_x, 17. * jnp.ones(2), check_dtypes=False)
+ self.assertAllClose(g_c, 42. * c, check_dtypes=False)
+ self.assertAllClose(g_x, 17. * x, check_dtypes=False)
+
+ def test_closure_convert_mixed_consts(self):
+ # Like test_closure_convert, but close over values that
+ # participate in AD as well as values that do not.
+ # See https://github.com/google/jax/issues/6415
+
+ def cos_after(fn, x):
+ converted_fn, aux_args = api.closure_convert(fn, x)
+ self.assertLessEqual(len(aux_args), 1)
+ return _cos_after(converted_fn, x, *aux_args)
+
+ @partial(api.custom_vjp, nondiff_argnums=(0,))
+ def _cos_after(fn, x, *args):
+ return jnp.cos(fn(x, *args))
+
+ def fwd(fn, x, *args):
+ y = _cos_after(fn, x, *args)
+ return y, (x, args)
+
+ def rev(fn, res, g):
+ x, args = res
+ x_bar = 17. * x
+ args_bars = [42. * a for a in args]
+ return (x_bar, *args_bars)
+
+ _cos_after.defvjp(fwd, rev)
+
+ def dist(c, s, x):
+ return jnp.sum(s * (x - c) ** 2.)
+
+ def solve(c, s, x):
+ def closure(x):
+ return dist(c, s, x)
+ return cos_after(closure, x)
+
+ c, s, x = 2. * jnp.ones(2), 3. * jnp.ones(2), jnp.ones(2)
+ expected = jnp.cos(dist(c, s, x))
+ self.assertAllClose(solve(c, s, x), expected, check_dtypes=False)
+ g_c, g_x = api.grad(solve, argnums=(0, 2))(c, s, x)
+ self.assertAllClose(g_c, 42. * c, check_dtypes=False)
+ self.assertAllClose(g_x, 17. * x, check_dtypes=False)
class CustomTransposeTest(jtu.JaxTestCase):
| Hiding constants from closure_convert and the AD system
Use case: Higher order functions (like ODE solvers and optimizers) with custom VJP rules.
Consider this example form the JAX documentation for `closure_convert`:
```
def minimize(objective_fn, x0):
converted_fn, aux_args = closure_convert(objective_fn, x0)
return _minimize(converted_fn, x0, *aux_args)
@partial(custom_vjp, nondiff_argnums=(0,))
def _minimize(objective_fn, x0, *args):
z = objective_fn(x0, *args)
# ... find minimizer x_opt ...
return x_opt
def fwd(objective_fn, x0, *args):
y = _minimize(objective_fn, x0, *args)
return y, (y, args)
def rev(objective_fn, res, g):
y, args = res
y_bar = g
# ... custom reverse-mode AD ...
return x0_bar, *args_bars
_minimize.defvjp(fwd, rev)
```
Imagine now that `objective_fn` depends on a large matrix of constants `c`, e.g. captured via closure. Then `closure_convert` pulls this matrix out and makes it an explicit argument of `converted_fn`.
During the reverse pass, `c_bar` has to be calculated as part of `args_bar`. This computation can be expensive and complex but it is not needed if differentiation with respect to `c` is never required or does not make sense.
There should be a way for the author of `objective_fn` to annotate `c` accordingly. This cannot be done at the level of the minimizer, where it is not known which `args` are needed in the VJP and which ones are not.
In simple cases, it is conceivable that XLA manages to optimize out the unused `c_bar`. That does not work for more complex cases, e.g. [the backward pass of an ODE integrator that has to integrate an augmented system of ODEs](https://github.com/google/jax/blob/3ac809ede30d961b873ca62f9828090709ced6be/jax/experimental/ode.py#L221).
In our concrete application (ODE integration where the dynamics function captures a large matrix), the impact of calculating `c_bar` is such that XLA consumes >44 GB of RAM during the compilation stage while attempting to generate code for the Jacobian of the dynamics function with respect to each and every element of `c`.
Can `closure_convert` be modified to not extract certain captured variables? At the moment, it ignores integer arrays, for example.
I also see parallels to https://github.com/google/jax/issues/5913, which is more focused on the JVP case.
| The `custom_vjp` convention is to return `None` for a symbolic zero cotangent. See [the example here](https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html#gradient-clipping). This would amount to setting the `c_bar` that you describe to `None` instead of filling a large matrix.
In principle, this would still define a cotangent (at very low cost), even in cases where there ought to be no meaningful derivative at all (zero or otherwise). Still, would this suffice as a workaround in your case?
Even if so, possible improvements from our end:
1. Mention the `None` convention in the `custom_vjp` docstring directly.
2. Allow for custom derivative arguments that do not participate in differentiation.
The custom VJP is defined on `_minimize`. The author of `_minimize` or some other higher-order function in a library cannot determine for which elements in `args` to return `None`. As `closure_convert` has to be used for correctness, the forward pass is handed a fairly opaque list of `args`.
Only the author of `objective_fn`, i.e. the user of the library, can determine which arguments or captured values should be ignored.
For comparison: [PyTorch autograd allows tensors to be excluded from the autograd DAG](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html#exclusion-from-the-dag).
Arguably, JAX autograd automatically opts in every floating point tensor. For the scenario described here, I see two solutions:
- An opt-out facility for the matrix of constants `c`.
- I believe that tracing should reveal that `c_bar` is unused. If that information could be made available to the backward pass, `None` could be returned for the appropriate arguments.
I wonder if it would help to dynamically create the `custom_vjp` decorated function _inside_ `minimize`? This might even be reasonably maintainable, as long as the library provides suitable helper functions for implementing the gradient rules.
> For comparison: [PyTorch autograd allows tensors to be excluded from the autograd DAG](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html#exclusion-from-the-dag).
>
> Arguably, JAX autograd automatically opts in every floating point tensor. For the scenario described here, I see two solutions:
>
> * An opt-out facility for the matrix of constants `c`.
> * I believe that tracing should reveal that `c_bar` is unused. If that information could be made available to the backward pass, `None` could be returned for the appropriate arguments.
JAX does have functionality like this, via `lax.stop_gradient`.
The problem is that it only works for normal JAX autodiff rules, i.e., with VJP implemented via transpose of JVP. This currently requires the lower-level JAX primitive API.
If possible, let's separate:
1. concerns about AD efficiency (unwittingly computing a cotangent that is never read downstream); from
2. concerns about AD correctness (a cotangent is mathematically ill-defined, but jax still requires a definition); from
3. limitations of the `closure_convert` utility.
Is this request captured entirely by any one of the above? A complete example would help.
Based on more recent comments, my new guess is that this is scoped to item 3. Today's `closure_convert` extracts all floating-point closure-captured arrays as a heuristic. If `closure_convert` hoisted only arguments that could possibly participate in differentiation instead, this would reduce the concerns of items 1 and 2 when using `closure_convert` together with `custom_vjp`. Does that sound right?
If so, then that's what [this TODO comment](https://github.com/google/jax/blob/ad342419b87fbe08afe9a5f959704dee3a654a0d/jax/custom_derivatives.py#L825-L827) from @mattjj and me is about, and we can use this issue to track it.
From my point of view:
The need to use `closure_convert` and its limitations (3) are negatively affecting AD efficiency (1).
OK, let's track that here then. I suspect it will be easiest to tackle `closure_convert`'s heuristic first. If not, we can consider our options for changing the calling convention of a `custom_jvp`'s forward and reverse steps, or maybe improving DCE in AD somehow.
By the way, #5913 is about the JVP analogue of (2).
Have any paths forward to modify `closure_convert` been identified? | 2021-07-24T00:38:46 |
google/jax | 7,412 | google__jax-7412 | [
"7411"
] | fcda67fb2acd72d2aaebb25b6501204ea7b7a58e | diff --git a/jax/core.py b/jax/core.py
--- a/jax/core.py
+++ b/jax/core.py
@@ -307,11 +307,13 @@ def extract_call_jaxpr(
return (params["call_jaxpr"], new_params)
+# TODO(mattjj): replace this approach with a primitive-keyed table of rules
def traverse_jaxpr_params(f, params):
"""Applies f to each jaxpr parameter and returns a tuple of returned values."""
- return {name: f(param)
+ return {name: f(p)
for name, param in params.items()
- if type(param) in (Jaxpr, ClosedJaxpr)}
+ for p in (param if isinstance(param, (tuple, list)) else [param])
+ if type(p) in (Jaxpr, ClosedJaxpr)}
def eval_jaxpr(jaxpr: Jaxpr, consts, *args):
| diff --git a/tests/pmap_test.py b/tests/pmap_test.py
--- a/tests/pmap_test.py
+++ b/tests/pmap_test.py
@@ -1701,6 +1701,17 @@ def func(_):
out_dtype = func(unused_arg).dtype
self.assertEqual(out_dtype, dtype)
+ def test_num_replicas_with_switch(self):
+ # https://github.com/google/jax/issues/7411
+ def identity(x):
+ return x
+
+ def cond_of_pmap(x):
+ y = lax.cond(True, jax.pmap(identity), jax.pmap(identity), x)
+ return y
+
+ cond_of_pmap(jnp.zeros((xla_bridge.device_count(), 2)))
+
class VmapOfPmapTest(jtu.JaxTestCase):
| num replicas not correctly detected with `lax.switch` / `lax.cond`
Thanks to @LenaMartens for the repro!
```python
import os
os.environ['XLA_FLAGS'] = '--xla_force_host_platform_device_count=32'
import jax
def identity(x):
return x
def cond_of_pmap(x):
y = jax.lax.cond(True, lambda x: jax.pmap(identity)(x), lambda x: jax.pmap(identity)(x), x)
return y
cond_of_pmap(jax.numpy.zeros((8, 2)))
```
```
AssertionError Traceback (most recent call last)
google3/third_party/py/jax/interpreters/xla.py in axis_groups(axis_env, name)
533 mesh_axes = tuple(unsafe_map(partial(axis_read, axis_env), name))
534 trailing_size, ragged = divmod(axis_env.nreps, prod(axis_env.sizes))
--> 535 assert not ragged
536 mesh_spec = axis_env.sizes + (trailing_size,)
537 return _axis_groups(mesh_spec, mesh_axes)
AssertionError:
```
The issue was in `core.traverse_jaxpr_params`, which assumed that all subjaxprs appear as values in the params dict, while `lax.switch` (aka `lax.cond`) actually has a param value which is a _tuple_ of jaxprs. Those tuple elements were being ignored!
I'm going to make a quick fix which just flattens params to one level, but I think a better approach here would be to have a primitive-keyed table of rules for traversing subjaxprs.
| 2021-07-29T17:35:13 |
|
google/jax | 7,447 | google__jax-7447 | [
"7421"
] | 5c3fc6c9f0ec10d46ce6878caecbab53220a4b61 | diff --git a/jax/experimental/optimizers.py b/jax/experimental/optimizers.py
--- a/jax/experimental/optimizers.py
+++ b/jax/experimental/optimizers.py
@@ -478,22 +478,24 @@ def broadcast_into(ndim, x, axis):
return x[tuple(idx)]
def init(x0):
+ x_shape = x0.shape
+ x0 = jnp.atleast_1d(x0)
vs = [jnp.zeros(sz, dtype=x0.dtype) for sz in x0.shape]
- return x0, jnp.zeros_like(x0), vs
+ return x0, jnp.zeros_like(x0), vs, x_shape
def update(i, g, state):
- x, m, vs = state
+ x, m, vs, x_shape = state
vs = [broadcast_into(g.ndim, v, i) for i, v in enumerate(vs)]
accum = functools.reduce(jnp.minimum, vs) + jnp.square(g)
accum_inv_sqrt = jnp.where(accum > 0, 1. / jnp.sqrt(accum), 0)
m = (1. - momentum) * (g * accum_inv_sqrt) + momentum * m
x = x - step_size(i) * m
vs = [accum.max(splice(range(x.ndim), j, [])) for j in range(x.ndim)]
- return x, m, vs
+ return x, m, vs, x_shape
def get_params(state):
- x, _, _ = state
- return x
+ x, _, _, x_shape = state
+ return x.reshape(x_shape)
return init, update, get_params
| diff --git a/tests/optimizers_test.py b/tests/optimizers_test.py
--- a/tests/optimizers_test.py
+++ b/tests/optimizers_test.py
@@ -138,7 +138,14 @@ def loss(xs):
x0 = (jnp.ones(2), jnp.ones((2, 2)))
self._CheckOptimizer(optimizers.adagrad, loss, x0, num_iters, step_size)
- def testSM3(self):
+ def testSM3Scalar(self):
+ def loss(x): return x**2
+ x0 = jnp.array(1.)
+ num_iters = 100
+ step_size = 0.1
+ self._CheckOptimizer(optimizers.sm3, loss, x0, num_iters, step_size)
+
+ def testSM3Vector(self):
def loss(xs):
x1, x2 = xs
return jnp.sum(x1 ** 2) + jnp.sum(x2 ** 2)
| sm3 optimizer doesnt work with scalar data
This is almost tutorial code, it doesnt work with sm3 because of scalar data
```python
import jax
import jax.numpy as jnp
import numpy as np
from typing import NamedTuple
import functools
class Params(NamedTuple):
weight: jnp.ndarray
bias: jnp.ndarray
def init(rng) -> Params:
"""Returns the initial model params."""
weights_key, bias_key = jax.random.split(rng)
weight = jax.random.normal(weights_key, ())
bias = jax.random.normal(bias_key, ())
return Params(weight, bias)
def loss_fn(params: Params, xs: jnp.ndarray, ys: jnp.ndarray) -> jnp.ndarray:
"""Computes the least squares error of the model's predictions on x against y."""
pred = params.weight * xs + params.bias
return jnp.mean((pred - ys) ** 2)
LEARNING_RATE = 0.005
true_w, true_b = 2, -1
xs = np.random.normal(size=(128, 1))
noise = 0.5 * np.random.normal(size=(128, 1))
ys = xs * true_w + true_b + noise
# Initialise parameters and replicate across devices.
params = init(jax.random.PRNGKey(123))
from jax.experimental.optimizers import sm3, adam
opt_init, opt_update, get_params = sm3(step_size=LEARNING_RATE)
opt_state = opt_init(params)
def update(params: Params, opt_state, xs: jnp.ndarray, ys: jnp.ndarray, i) -> Params:
"""Performs one SGD update step on params using the given data."""
# Compute the gradients on the given minibatch (individually on each device).
loss, grads = jax.value_and_grad(loss_fn)(params, xs, ys)
# Each device performs its own update, but since we start with the same params
# and synchronise gradients, the params stay in sync.
opt_state = opt_update(i, grads, opt_state)
params = get_params(opt_state)
return params, opt_state, loss
def type_after_update(name, obj):
print(f"after first `update()`, `{name}` is a", type(obj))
# Actual training loop.
for i in range(1000):
# This is where the params and data gets communicated to devices:
params, opt_state, loss = update(params, opt_state, xs, ys, i)
# The returned `replicated_params` and `loss` are now both ShardedDeviceArrays,
# indicating that they're on the devices.
# `x_split`, of course, remains a NumPy array on the host.
if i == 0:
type_after_update('replicated_params.weight', params.weight)
type_after_update('loss', loss)
type_after_update('x_split', xs)
if i % 100 == 0:
# Note that loss is actually an array of shape [num_devices], with identical
# entries, because each device returns its copy of the loss.
# So, we take the first element to print it.
print(f"Step {i:3d}, loss: {loss:.3f}")
```
The issue is in this line of code in sm3 optimizer
```
vs = [jnp.zeros(sz, dtype=x0.dtype) for sz in x0.shape]
```
If x0 is scalar then it will generate an empty array. Its possible to fix that with something like this
```
def s2v(s):
return s if s.ndim > 0 else jnp.array([s])
def init(x0):
vs = [jnp.zeros(sz, dtype=x0.dtype) for sz in s2v(x0).shape]
return x0, jnp.zeros_like(x0), vs
```
but it will lead to the output with 1-element arrays instead of scalars.
| If you define it this way:
```python
def s2v(s):
return jnp.asarray(s)
```
it should work even for scalar inputs.
Perhaps the optimizer should call `jnp.asarray` on its inputs to avoid this issue... would you be interested in contributing a PR?
Hello @jakevdp and thank you for the reply. I believe JAX treats scalars as 0-dimentional arrays and because of that calling `jnp.asarray` won't change anything. Probably, it's better to add `jnp.zeros(1)` to the input to use broadcasting.
```
def init(x0):
vs = [jnp.zeros(sz, dtype=x0.dtype) for sz in (x0+jnp.zeros(1)).shape]
return x0, jnp.zeros_like(x0), vs
```
I'll submit a PR for that. | 2021-08-02T16:01:25 |
google/jax | 7,455 | google__jax-7455 | [
"7390"
] | 6984f30d5ea701b7eff360a0724d0b1fb88ce59d | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -121,7 +121,7 @@ def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
axis=dims, keepdims=keepdims)),
amax)
sign = jnp.where(jnp.isnan(out), np.nan, 1.0).astype(out.dtype)
- sign = jnp.where(out == -np.inf, 0.0, sign)
+ sign = jnp.where(jnp.isneginf(out), 0.0, sign)
else:
expsub = lax.exp(lax.sub(a, amax_with_dims))
if b is not None:
| diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py
--- a/tests/lax_scipy_test.py
+++ b/tests/lax_scipy_test.py
@@ -25,6 +25,7 @@
import numpy as np
import scipy.special as osp_special
+import jax
from jax._src import api
from jax import numpy as jnp
from jax import lax
@@ -205,6 +206,13 @@ def testLogSumExpZeros(self):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker)
self._CompileAndCheck(lax_fun, args_maker)
+ def testLogSumExpOnes(self):
+ # Regression test for https://github.com/google/jax/issues/7390
+ args_maker = lambda: [np.ones(4, dtype='float32')]
+ with jax.debug_infs(True):
+ self._CheckAgainstNumpy(osp_special.logsumexp, lsp_special.logsumexp, args_maker)
+ self._CompileAndCheck(lsp_special.logsumexp, args_maker)
+
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
| Debug infs and logsumexp
I've found a difference in behavior between the latest versions and a (recent) previous version of the library I was using. I found this trying to debug a larger issue on an hpc application being build using jax but when trying to debug that, this was thrown which might point to the root cause of the larger issue.
I wanted to clarify if the following is expected:
Specifically it appears that when trying to use the debug_infs config option, with a special function that does some checking for np.inf itself (in this case logsumexp), the latest version of the library from pip will throw an error that was passed previously
MWE:
```python
from jax.scipy.special import logsumexp
from jax.config import config
config.update("jax_debug_infs", True)
import numpy as np
x=np.ones(10)
y=logsumexp(x)
print(y)
```
Using jaxlib 0.1.64 and jax 0.2.10 this prints the value fine (note none of the values in the array that is being summed are inf)
updating to jaxlib 0.1.69 and jax 0.2.18 the following error shows:
```
Traceback (most recent call last):
File "test_inf.py", line 9, in <module>
y=logsumexp(x)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/scipy/special.py", line 124, in logsumexp
sign = jnp.where(out == -np.inf, 0.0, sign)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 5871, in deferring_binary_op
return binary_op(self, other)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 421, in <lambda>
fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 326, in _promote_args
return _promote_shapes(fun_name, *_promote_dtypes(*args))
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 272, in _promote_dtypes
return [lax._convert_element_type(x, to_dtype, weak_type) for x in args]
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 272, in <listcomp>
return [lax._convert_element_type(x, to_dtype, weak_type) for x in args]
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/_src/lax/lax.py", line 454, in _convert_element_type
return convert_element_type_p.bind(operand, new_dtype=new_dtype,
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/core.py", line 264, in bind
out = top_trace.process_primitive(self, tracers, params)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/core.py", line 604, in process_primitive
return primitive.impl(*tracers, **params)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/interpreters/xla.py", line 262, in apply_primitive
return compiled_fun(*args)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/interpreters/xla.py", line 379, in _execute_compiled_primitive
check_special(prim.name, out_bufs)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/interpreters/xla.py", line 398, in check_special
_check_special(name, buf.xla_shape(), buf)
File "/rds/user/dy297/hpc-work/install/python/lib/python3.8/site-packages/jax/interpreters/xla.py", line 406, in _check_special
raise FloatingPointError(f"invalid value (inf) encountered in {name}")
FloatingPointError: invalid value (inf) encountered in convert_element_type
```
And it appears to be pulling up the -np.inf used to check inside `jnp.where(out == -np.inf, 0.0, sign)`
Is it expected that these sorts of condition checks inside special functions should fail the debug configs?
| Thanks for the report - I'm not sure why this changed between versions, but I think I know the fix. I'll try to get it in today | 2021-08-02T22:28:26 |
google/jax | 7,456 | google__jax-7456 | [
"7329"
] | 10bbd628e9c98af67fbd3d93dd16371b6d85d09b | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -4922,6 +4922,7 @@ def _unique_axis_sorted_mask(ar, axis):
size, *out_shape = aux.shape
aux = aux.reshape(size, _prod(out_shape)).T
if aux.shape[0] == 0:
+ size = 1
perm = zeros(1, dtype=int)
else:
perm = lexsort(aux[::-1])
@@ -5005,7 +5006,7 @@ def _rewriting_take(arr, idx, indices_are_sorted=False, unique_indices=False):
# All supported cases of indexing can be implemented as an XLA gather,
# followed by an optional reverse and broadcast_in_dim.
arr = asarray(arr)
- treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)
+ treedef, static_idx, dynamic_idx = _split_index_for_jit(idx, arr.shape)
return _gather(arr, treedef, static_idx, dynamic_idx, indices_are_sorted,
unique_indices)
@@ -5065,7 +5066,7 @@ def _gather(arr, treedef, static_idx, dynamic_idx, indices_are_sorted,
"newaxis_dims",
])
-def _split_index_for_jit(idx):
+def _split_index_for_jit(idx, shape):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
@@ -5075,7 +5076,7 @@ def _split_index_for_jit(idx):
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
- idx = _expand_bool_indices(idx)
+ idx = _expand_bool_indices(idx, shape)
leaves, treedef = tree_flatten(idx)
dynamic = [None] * len(leaves)
@@ -5328,16 +5329,16 @@ def _eliminate_deprecated_list_indexing(idx):
idx = (idx,)
return idx
-def _expand_bool_indices(idx):
+def _expand_bool_indices(idx, shape):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
- for i in idx:
+ for dim_number, i in enumerate(idx):
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
- or isinstance(i, list) and _all(_is_scalar(e) and issubdtype(_dtype(e), np.bool_) for e in i)):
+ or isinstance(i, list) and i and _all(_is_scalar(e) and issubdtype(_dtype(e), np.bool_) for e in i)):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
@@ -5346,6 +5347,11 @@ def _expand_bool_indices(idx):
# TODO(mattjj): improve this error by tracking _why_ the indices are not concrete
raise errors.NonConcreteBooleanIndexError(abstract_i)
else:
+ i_shape = _shape(i)
+ expected_shape = shape[len(out): len(out) + _ndim(i)]
+ if i_shape != expected_shape:
+ raise IndexError("boolean index did not match shape of indexed array in index "
+ f"{dim_number}: got {i_shape}, expected {expected_shape}")
out.extend(np.where(i))
else:
out.append(i)
diff --git a/jax/_src/ops/scatter.py b/jax/_src/ops/scatter.py
--- a/jax/_src/ops/scatter.py
+++ b/jax/_src/ops/scatter.py
@@ -64,7 +64,7 @@ def _scatter_update(x, idx, y, scatter_op, indices_are_sorted,
y = jnp.asarray(y)
# XLA gathers and scatters are very similar in structure; the scatter logic
# is more or less a transpose of the gather equivalent.
- treedef, static_idx, dynamic_idx = jnp._split_index_for_jit(idx)
+ treedef, static_idx, dynamic_idx = jnp._split_index_for_jit(idx, x.shape)
return _scatter_impl(x, y, scatter_op, treedef, static_idx, dynamic_idx,
indices_are_sorted, unique_indices, normalize_indices)
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -833,6 +833,25 @@ def testBooleanIndexingWithEmptyResult(self):
expected = np.array([-1])[np.array([False])]
self.assertAllClose(ans, expected, check_dtypes=False)
+ def testBooleanIndexingShapeMismatch(self):
+ # Regression test for https://github.com/google/jax/issues/7329
+ x = jnp.arange(4)
+ idx = jnp.array([True, False])
+ with self.assertRaisesRegex(IndexError, "boolean index did not match shape.*"):
+ x[idx]
+
+ def testNontrivialBooleanIndexing(self):
+ # Test nontrivial corner case in boolean indexing shape validation
+ rng = jtu.rand_default(self.rng())
+ index = (rng((2, 3), np.bool_), rng((6,), np.bool_))
+
+ args_maker = lambda: [rng((2, 3, 6), np.int32)]
+ np_fun = lambda x: np.asarray(x)[index]
+ jnp_fun = lambda x: jnp.asarray(x)[index]
+
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
+ self._CompileAndCheck(jnp_fun, args_maker)
+
def testFloatIndexingError(self):
BAD_INDEX_TYPE_ERROR = "Indexer must have integer or boolean type, got indexer with type"
with self.assertRaisesRegex(TypeError, BAD_INDEX_TYPE_ERROR):
| Boolean indexing/masking with the wrong shape should fail
This example results in an error with NumPy but not jax.numpy:
```python
import jax.numpy as jnp
x = jnp.arange(4)
y = jnp.array([True, False])
x[y] # DeviceArray([0], dtype=int32)
```
It looks like JAX converts the boolean indexer array into an integer array for indexing without checking the shape.
We should raise an error in this case: indexing an array of shape (4,) with a boolean array of shape (2,) is never valid.
| 2021-08-02T23:17:33 |
|
google/jax | 7,462 | google__jax-7462 | [
"7431"
] | c149cc92361b731bdc1ababee1fdad571cd91460 | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -3552,6 +3552,23 @@ def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision,
precision_config=_precision_config(precision),
preferred_element_type=preferred_element_type)
+def _dot_general_cpu_translation_rule(c, lhs, rhs, *, dimension_numbers, precision,
+ preferred_element_type: Optional[DType]):
+ if preferred_element_type is not None:
+ preferred_element_type = xla_client.dtype_to_etype(preferred_element_type)
+
+ # TODO(b/195364460): Work around slow XLA/CPU implementation of float16 matmul
+ if c.get_shape(lhs).numpy_dtype() == np.float16:
+ lhs = xops.ConvertElementType(lhs, xla_client.dtype_to_etype(np.float32))
+ rhs = xops.ConvertElementType(rhs, xla_client.dtype_to_etype(np.float32))
+ preferred_element_type = (preferred_element_type or
+ xla_client.dtype_to_etype(np.float16))
+
+ return xops.DotGeneral(lhs, rhs,
+ xc.make_dot_dimension_numbers(dimension_numbers),
+ precision_config=_precision_config(precision),
+ preferred_element_type=preferred_element_type)
+
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision,
preferred_element_type: Optional[DType]):
@@ -3572,6 +3589,9 @@ def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
+xla.backend_specific_translations["cpu"][dot_general_p] = \
+ _dot_general_cpu_translation_rule
+
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
| diff --git a/jax/experimental/jax2tf/tests/jax2tf_limitations.py b/jax/experimental/jax2tf/tests/jax2tf_limitations.py
--- a/jax/experimental/jax2tf/tests/jax2tf_limitations.py
+++ b/jax/experimental/jax2tf/tests/jax2tf_limitations.py
@@ -405,7 +405,11 @@ def dot_general(cls, harness: primitive_harness.Harness):
devices="gpu",
modes=("eager", "graph", "compiled"),
enabled=(harness.params["preferred_element_type"] is not None),
- skip_comparison=True)
+ skip_comparison=True),
+ # JAX performs float16 matmuls in float32 on CPU, so the JAX result
+ # may be more precise.
+ custom_numeric(dtypes=[np.float16], devices=["cpu"], tol=1e-2,
+ modes=("eager", "graph", "compiled")),
]
@classmethod
| jnp.float16 hangs matmul
I'm not sure if this is a bug or expected behavior but couldn't find a similar issue here so thought I'd post
Repro
The below code just runs seemingly forever where the `float32` and `bfloat16` variants run just fine. Is `float16` supported? I tried both a GPU and TPU in collab and noticed the same issue.
```python
import jax.numpy as jnp
from jax import grad, jit, vmap
from jax import random
import jax; jax.config.update('jax_platform_name', 'cpu')
key = random.PRNGKey(0)
x = random.normal(key, (10,))
print(x)
size = 3000
x = random.normal(key, (size, size), dtype=jnp.float16)
%timeit jnp.dot(x, x.T).block_until_ready() # runs on the GPU
```
`x = random.normal(key, (size, size), dtype=jnp.float32)` runs just fine
| Thanks for the report & the clear repro. This seems like something that @hawkinsp might have ideas about. | 2021-08-03T04:04:57 |
google/jax | 7,516 | google__jax-7516 | [
"7507"
] | 4152237da7270fe2b5b2203af99cc58cfa6f6b91 | diff --git a/build/build.py b/build/build.py
--- a/build/build.py
+++ b/build/build.py
@@ -209,7 +209,7 @@ def get_bazel_path(bazel_path_flag):
def check_bazel_version(bazel_path):
try:
- version_output = shell([bazel_path, "--bazelrc=/dev/null", "version"])
+ version_output = shell([bazel_path, "--version"])
except subprocess.CalledProcessError:
return False
match = re.search("Build label: *([0-9\\.]+)[^0-9\\.]", version_output)
| Use bazel --version to check the bazel version
Please use `bazel --version` to check the version of Bazel. The currently used variant ignores the bazel-startup-options and actually starts a full bazel server which is a) overkill and b) wrong in case of options like `--output-user-root` and no or readonly home folders
Potential change:
```
try:
version_output = shell([bazel_path, "--version"])
except subprocess.CalledProcessError:
return False
match = re.search("(Build label:|bazel) *([0-9\\.]+)", version_output)
```
I kept the regexp generic enough so that you can still fall back to the current `bazel version` call and parse both kinds of output
| 2021-08-05T16:46:01 |
||
google/jax | 7,560 | google__jax-7560 | [
"7533"
] | 17a606a95d8f059e2a069dd1cfa8b2dfb8e93255 | diff --git a/jaxlib/cusparse.py b/jaxlib/cusparse.py
--- a/jaxlib/cusparse.py
+++ b/jaxlib/cusparse.py
@@ -137,8 +137,9 @@ def csr_matmat(c, data, indices, indptr, B, *, shape, transpose=False, compute_d
dtype = np.dtype(c.get_shape(data).element_type())
index_dtype = np.dtype(c.get_shape(indices).element_type())
B_dtype = np.dtype(c.get_shape(B).element_type())
+ B_shape = c.get_shape(B).dimensions()
rows, cols = shape
- _, Ccols = c.get_shape(B).dimensions()
+ _, Ccols = B_shape
nnz, = c.get_shape(data).dimensions()
if compute_dtype is None:
@@ -154,11 +155,10 @@ def csr_matmat(c, data, indices, indptr, B, *, shape, transpose=False, compute_d
b"cusparse_csr_matmat",
operands=(data, indices, indptr, B),
operand_shapes_with_layout=(
- # All are 1D, so no layout necessary
c.get_shape(data),
c.get_shape(indices),
c.get_shape(indptr),
- c.get_shape(B),
+ _Shape.array_shape(B_dtype, B_shape, (1, 0)),
),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(compute_dtype, (out_size, Ccols), (1, 0)),
@@ -272,8 +272,9 @@ def coo_matmat(c, data, row, col, B, *, shape, transpose=False, compute_dtype=No
dtype = np.dtype(c.get_shape(data).element_type())
index_dtype = np.dtype(c.get_shape(row).element_type())
B_dtype = np.dtype(c.get_shape(B).element_type())
+ B_shape = c.get_shape(B).dimensions()
rows, cols = shape
- _, Ccols = c.get_shape(B).dimensions()
+ _, Ccols = B_shape
nnz, = c.get_shape(data).dimensions()
if compute_dtype is None:
@@ -289,11 +290,10 @@ def coo_matmat(c, data, row, col, B, *, shape, transpose=False, compute_dtype=No
b"cusparse_coo_matmat",
operands=(data, row, col, B),
operand_shapes_with_layout=(
- # All are 1D, so no layout necessary
c.get_shape(data),
c.get_shape(row),
c.get_shape(col),
- c.get_shape(B),
+ _Shape.array_shape(B_dtype, B_shape, (1, 0)),
),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(compute_dtype, (out_size, Ccols), (1, 0)),
| diff --git a/tests/sparse_test.py b/tests/sparse_test.py
--- a/tests/sparse_test.py
+++ b/tests/sparse_test.py
@@ -202,7 +202,6 @@ def test_coo_matvec(self, shape, dtype, transpose):
self.assertAllClose(op(M) @ v, matvec(*args), rtol=MATMUL_TOL)
self.assertAllClose(op(M) @ v, jit(matvec)(*args), rtol=MATMUL_TOL)
- @unittest.skipIf(jtu.device_under_test() != "gpu", "test requires GPU")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_T={}".format(jtu.format_shape_dtype_string(shape, dtype), transpose),
"shape": shape, "dtype": dtype, "transpose": transpose}
@@ -229,6 +228,23 @@ def test_coo_matmat(self, shape, dtype, transpose):
y, dy = jvp(lambda x: sparse.coo_matmat(x, M.row, M.col, B, shape=shape, transpose=transpose).sum(), (M.data, ), (jnp.ones_like(M.data), ))
self.assertAllClose((op(M) @ B).sum(), y, rtol=MATMUL_TOL)
+ def test_coo_matmat_layout(self):
+ # Regression test for https://github.com/google/jax/issues/7533
+ d = jnp.array([1.0, 2.0, 3.0, 4.0])
+ i = jnp.array([0, 0, 1, 2])
+ j = jnp.array([0, 2, 0, 0])
+ shape = (3, 3)
+
+ x = jnp.arange(9).reshape(3, 3).astype(d.dtype)
+
+ def f(x):
+ return sparse.coo_matmat(d, i, j, x.T, shape=shape)
+
+ result = f(x)
+ result_jit = jit(f)(x)
+
+ self.assertAllClose(result, result_jit)
+
@unittest.skipIf(jtu.device_under_test() != "gpu", "test requires GPU")
def test_gpu_translation_rule(self):
version = xla_bridge.get_backend().platform_version
| jit changes behaviour of sparse.coo_matmat with transposed argument
JIT gives erroneous results for the multiplication of a sparse matrix with a transposed dense matrix.
```python
import jax
import jax.numpy as jnp
import jax.experimental.sparse as jsparse
d = jnp.r_[0.1, 2., 3., -2]
x = jax.random.normal(jax.random.PRNGKey(0), (3, 3))
def ok(d, x):
i = jnp.r_[0, 0, 1, 2]
j = jnp.r_[0, 2, 0, 0]
shape = (3, 3)
return jsparse.coo_matmat(d, i, j, x, shape=shape)
def NOT_ok(d, x):
i = jnp.r_[0, 0, 1, 2]
j = jnp.r_[0, 2, 0, 0]
shape = (3, 3)
return jsparse.coo_matmat(d, i, j, x.T, shape=shape)
e_ok = ok(d, x) - jax.jit(ok)(d, x)
e_not_ok = NOT_ok(d, x) - jax.jit(NOT_ok)(d, x)
print('ok: error = \n', e_ok)
print('\nNOT ok: error = \n',e_not_ok)
## OUTPUT
#ok: error =
# [[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
#
#NOT ok: error =
# [[ 0.97765154 0.7773348 -0.0488826 ]
# [ 0. -3.0031524 -1.4664774 ]
# [ 0. 2.0021017 0.9776516 ]]
```
| Are you seeing this only on a GPU backend?
I don't reproduce the issue on CPU; I suspect it has to do with the GPU-only lowering to cusparse. We need to change the layout definition here for a transposed matrix: https://github.com/google/jax/blob/c94f41290cc5c28fba059e172279994107c3103e/jaxlib/cusparse.py#L285
But I'm not certain how to tell at this level whether the input buffer is transposed... perhaps XLA operators somehow expose their strides?
Yes, I'm seeing this on a GPU backend only.
I took a closer look at this and realized that in the transposed version, you're computing the matrix product of a (3,3) matrix and a (4, 3) matrix, which should be an error! The issue is poor input validation, which should be fixed by #7557.
The reason that there are different answers between GPU/JIT and non-jit approaches is that the two proceed based on different sets assumptions about the input, both of which are incorrect in their own way. With #7557, this should correctly result in an assertion error in both cases.
> I took a closer look at this and realized that in the transposed version, you're computing the matrix product of a (3,3) matrix and a (4, 3) matrix, which should be an error! The issue is poor input validation, which should be fixed by #7557.
Sorry, I think I was distracted while making this minimum working example, in my application where this bug first surfaced the dimensions matched and the JIT messed up the results. I've changed the dimension of the `x` matrix in the example to a square matrix and the error persists.
> The reason that there are different answers between GPU/JIT and non-jit approaches is that the two proceed based on different sets assumptions about the input, both of which are incorrect in their own way. With #7557, this should correctly result in an assertion error in both cases.
In the up side, I guess this helped improve the package in other ways with the validation.
Great, thanks for checking! I'll try to get to the bottom of the new & improved repro.
I think I found the issue β should be fixed by #7560 | 2021-08-09T23:50:46 |
google/jax | 7,572 | google__jax-7572 | [
"7561"
] | 5d93680adbb68bcc608715358f79aba21bcb7213 | diff --git a/examples/advi.py b/examples/advi.py
--- a/examples/advi.py
+++ b/examples/advi.py
@@ -86,8 +86,8 @@ def objective(params, t):
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
- x_limits = [-2, 2]
- y_limits = [-4, 2]
+ x_limits = (-2, 2)
+ y_limits = (-4, 2)
target_dist = lambda x, _: jnp.exp(funnel_log_density(x))
approx_dist = lambda x, params: jnp.exp(diag_gaussian_logpdf(x, *params))
| script "examples/advi.py" throws "ValueError" exception
Run:
```python3 jax/examples/advi.py```
Output:
```
Optimizing variational parameters...
Iteration 0 lower bound 0.4957694113254547
Traceback (most recent call last):
File "jax/examples/advi.py", line 138, in <module>
callback(params, t)
File "jax/examples/advi.py", line 98, in callback
X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)
File "jax/examples/advi.py", line 67, in mesh_eval
return _mesh_eval(func, x_limits, y_limits, params, num_ticks)
ValueError: Non-hashable static arguments are not supported. An error occured while trying to hash an object of type <class 'list'>, [-2, 2]. The error was:
TypeError: unhashable type: 'list'
```
| 2021-08-10T17:43:06 |
||
google/jax | 7,585 | google__jax-7585 | [
"6614"
] | 67b190eec399bbd98fa378b90475960853dc044c | diff --git a/jax/interpreters/xla.py b/jax/interpreters/xla.py
--- a/jax/interpreters/xla.py
+++ b/jax/interpreters/xla.py
@@ -619,7 +619,7 @@ def _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name,
compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
*unsafe_map(arg_spec, args))
try:
- return compiled_fun(*args)
+ out = compiled_fun(*args)
except FloatingPointError:
assert config.jax_debug_nans or config.jax_debug_infs # compiled_fun can only raise in this case
print("Invalid value encountered in the output of a jit function. "
@@ -630,11 +630,13 @@ def _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name,
# but which config.jax_debug_nans is meant to opt into, we'll be re-executing
# any linear_util.py-style side effects, i.e. re-populating Stores created
# by any transformation_with_aux's applied to fun. Since this is
- # intentional here, to avoid "Store occupied" errors we reset the stores to
- # be empty.
- for store in fun.stores: store and store.reset()
+ # intentional here, to avoid "Store occupied" errors we clone the WrappedFun
+ # with empty stores.
+ stores = [lu.Store() for _ in fun.stores]
+ clone = lu.WrappedFun(fun.f, fun.transforms, stores, fun.params)
with core.new_sublevel():
- return fun.call_wrapped(*args) # probably won't return
+ _ = clone.call_wrapped(*args) # probably won't return
+ return out
def flatten_shape(s: XlaShape) -> Sequence[Tuple[Sequence[int], XlaShape]]:
"""Expands a given shape tree into a flat list of indices to arrays.
| diff --git a/tests/debug_nans_test.py b/tests/debug_nans_test.py
--- a/tests/debug_nans_test.py
+++ b/tests/debug_nans_test.py
@@ -201,5 +201,18 @@ def f(x):
with self.assertRaisesRegex(FloatingPointError, msg):
f(1)
+ def testDebugNansDoesntCorruptCaches(self):
+ # https://github.com/google/jax/issues/6614
+ @jax.jit
+ def f(x):
+ return jnp.divide(x, x)
+
+ for _ in range(2):
+ try:
+ with jax.debug_nans(True):
+ jax.grad(f)(0.)
+ except FloatingPointError:
+ pass
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| Store empty exception when rerunning a computation that failed a NaN check
```
import jax
try:
with jax.debug_nans(True):
jax.grad(jax.numpy.linalg.norm)(jax.numpy.zeros((3, 3)))
except FloatingPointError as e:
pass
with jax.debug_nans(True):
jax.grad(jax.numpy.linalg.norm)(jax.numpy.zeros((3, 3)))
```
```
Traceback (most recent call last):
File "/Users/phawkins/p/jax/tt.py", line 8, in <module>
jax.grad(jax.numpy.linalg.norm)(jax.numpy.zeros((3, 3)))
File "/Users/phawkins/p/jax/jax/_src/traceback_util.py", line 140, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/Users/phawkins/p/jax/jax/_src/api.py", line 810, in grad_f
_, g = value_and_grad_f(*args, **kwargs)
File "/Users/phawkins/p/jax/jax/_src/traceback_util.py", line 140, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/Users/phawkins/p/jax/jax/_src/api.py", line 879, in value_and_grad_f
g = vjp_py(np.ones((), dtype=dtype))
File "/Users/phawkins/p/jax/jax/_src/api.py", line 1872, in _vjp_pullback_wrapper
ans = fun(*args)
File "/Users/phawkins/p/jax/jax/interpreters/ad.py", line 121, in unbound_vjp
arg_cts = backward_pass(jaxpr, consts, dummy_args, cts)
File "/Users/phawkins/p/jax/jax/interpreters/ad.py", line 215, in backward_pass
cts_out = get_primitive_transpose(eqn.primitive)(
File "/Users/phawkins/p/jax/jax/interpreters/ad.py", line 542, in call_transpose
out_flat = primitive.bind(fun, *all_args, **new_params)
File "/Users/phawkins/p/jax/jax/core.py", line 1561, in bind
return call_bind(self, fun, *args, **params)
File "/Users/phawkins/p/jax/jax/core.py", line 1552, in call_bind
outs = primitive.process(top_trace, fun, tracers, params)
File "/Users/phawkins/p/jax/jax/core.py", line 1564, in process
return trace.process_call(self, fun, tracers, params)
File "/Users/phawkins/p/jax/jax/core.py", line 605, in process_call
return primitive.impl(f, *tracers, **params)
File "/Users/phawkins/p/jax/jax/interpreters/xla.py", line 577, in _xla_call_impl
compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
File "/Users/phawkins/p/jax/jax/linear_util.py", line 262, in memoized_fun
fun.populate_stores(stores)
File "/Users/phawkins/p/jax/jax/linear_util.py", line 150, in populate_stores
self_store.store(other_store.val)
File "/Users/phawkins/p/jax/jax/linear_util.py", line 106, in val
raise StoreException("Store empty")
jax.linear_util.StoreException: Store empty
```
| Simplified repro:
```python
import jax
import jax.numpy as jnp
@jax.jit
def f(x):
return jnp.divide(x, x)
try:
with jax.debug_nans(True):
jax.grad(f)(0.)
except FloatingPointError as e:
pass
with jax.debug_nans(True):
jax.grad(f)(0.)
```
We're corrupting the cache by [clearing the stores here](https://github.com/google/jax/blob/712d5f7e2bc99d2a1856a453f190da69d855b98b/jax/interpreters/xla.py#L593). I am dumb for not realizing this earlier.
We could remove the cache entry when we clear the stores. Or we could find some other way to suppress the 'store occupied' errors that this line was originally meant to avoid. | 2021-08-11T14:48:55 |
google/jax | 7,591 | google__jax-7591 | [
"7586"
] | 8f0edbb9af74af3e938066b9a51ff72959a130e7 | diff --git a/jax/_src/image/scale.py b/jax/_src/image/scale.py
--- a/jax/_src/image/scale.py
+++ b/jax/_src/image/scale.py
@@ -253,7 +253,8 @@ def _resize(image, shape: Sequence[int], method: Union[str, ResizeMethod],
# since all of the current resize methods (kernels) are interpolating, so the
# output = input under an identity warp.
spatial_dims = tuple(np.nonzero(np.not_equal(image.shape, shape))[0])
- scale = [float(shape[d]) / image.shape[d] for d in spatial_dims]
+ scale = [1.0 if shape[d] == 0 else float(shape[d]) / image.shape[d]
+ for d in spatial_dims]
return _scale_and_translate(image, shape, spatial_dims,
scale, [0.] * len(spatial_dims), kernel,
antialias, precision)
| diff --git a/tests/image_test.py b/tests/image_test.py
--- a/tests/image_test.py
+++ b/tests/image_test.py
@@ -178,6 +178,29 @@ def testResizeGradients(self, dtype, image_shape, target_shape, method,
antialias=antialias)
jtu.check_grads(jax_fn, args_maker(), order=2, rtol=1e-2, eps=1.)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": "_shape={}_target={}_method={}_antialias={}".format(
+ jtu.format_shape_dtype_string(image_shape, dtype),
+ jtu.format_shape_dtype_string(target_shape, dtype), method,
+ antialias),
+ "dtype": dtype, "image_shape": image_shape,
+ "target_shape": target_shape,
+ "method": method, "antialias": antialias}
+ for dtype in [np.float32]
+ for image_shape, target_shape in [
+ ([1], [0]),
+ ([5, 5], [5, 0]),
+ ([5, 5], [0, 1]),
+ ([5, 5], [0, 0])
+ ]
+ for method in ["nearest", "linear", "lanczos3", "lanczos5", "cubic"]
+ for antialias in [False, True]))
+ def testResizeEmpty(self, dtype, image_shape, target_shape, method, antialias):
+ # Regression test for https://github.com/google/jax/issues/7586
+ image = np.ones(image_shape, dtype)
+ out = jax.image.resize(image, shape=target_shape, method=method, antialias=antialias)
+ self.assertArraysEqual(out, jnp.zeros(target_shape, dtype))
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_target={}_method={}".format(
jtu.format_shape_dtype_string(image_shape, dtype),
| `jax.image.resize` failing `linear` interpolation if output shape is `0`
```python
import jax
from jax import numpy as np
image = np.ones((1,))
b = jax.image.resize(image, shape=(0,), method='linear')
```
Gives
```python
---------------------------------------------------------------------------
UnfilteredStackTrace Traceback (most recent call last)
<ipython-input-2-2ad1b76cc7df> in <module>()
4 image = np.ones((1,))
----> 5 b = jax.image.resize(image, shape=(0,), method='linear')
20 frames
UnfilteredStackTrace: ZeroDivisionError: float division by zero
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
ZeroDivisionError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/jax/_src/image/scale.py in compute_weight_mat(input_size, output_size, scale, translation, kernel, antialias)
48 kernel: Callable,
49 antialias: bool):
---> 50 inv_scale = 1. / scale
51 # When downsampling the kernel should be scaled since we want to low pass
52 # filter and interpolate, but when upsampling it should not be since we only
ZeroDivisionError: float division by zero
```
Although arguably the output should just be an empty array of the same type as input (and it works with `method="nearest"`).
| 2021-08-11T18:20:54 |
|
google/jax | 7,592 | google__jax-7592 | [
"7129"
] | 8d6ff968af83983cf9cedc8b4c031a5c6ab6ff0a | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2321,11 +2321,12 @@ def count_nonzero(a, axis: Optional[Union[int, Tuple[int, ...]]] = None,
typically compatible with JIT. The JAX version adds the optional `size` argument which
specifies the size of the output arrays: it must be specified statically for ``jnp.nonzero``
to be traced. If specified, the first `size` nonzero elements will be returned; if there
-are fewer nonzero elements than `size` indicates, the index arrays will be zero-padded.
+are fewer nonzero elements than `size` indicates, the result will be padded with ``fill_value``,
+which defaults to zero.
"""
@_wraps(np.nonzero, lax_description=_NONZERO_DOC)
-def nonzero(a, *, size=None):
+def nonzero(a, *, size=None, fill_value=None):
a = atleast_1d(a)
mask = a != 0
if size is None:
@@ -2337,7 +2338,13 @@ def nonzero(a, *, size=None):
return tuple(zeros(size, int) for dim in a.shape)
flat_indices = cumsum(bincount(cumsum(mask), length=size))
strides = np.cumprod(a.shape[::-1])[::-1] // a.shape
- return tuple((flat_indices // stride) % size for stride, size in zip(strides, a.shape))
+ out = tuple((flat_indices // stride) % size for stride, size in zip(strides, a.shape))
+ if size is not None and fill_value is not None:
+ if ndim(fill_value) != 0:
+ raise ValueError(f"fill_value must be a scalar; got {fill_value}")
+ fill_mask = arange(size) >= mask.sum()
+ out = tuple(where(fill_mask, fill_value, entry) for entry in out)
+ return out
@_wraps(np.flatnonzero, lax_description=_NONZERO_DOC)
def flatnonzero(a, *, size=None):
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -940,13 +940,14 @@ def testNonzero(self, shape, dtype):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_shape={}_size={}".format(
- jtu.format_shape_dtype_string(shape, dtype), size),
- "shape": shape, "dtype": dtype, "size": size}
+ {"testcase_name": "_shape={}_size={}_fill_value={}".format(
+ jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
+ "shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
for shape in nonempty_array_shapes
for dtype in all_dtypes
+ for fill_value in [None, -1]
for size in [1, 5, 10]))
- def testNonzeroSize(self, shape, dtype, size):
+ def testNonzeroSize(self, shape, dtype, size, fill_value):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
@jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
@@ -955,9 +956,9 @@ def np_fun(x):
if size <= len(result[0]):
return tuple(arg[:size] for arg in result)
else:
- return tuple(np.concatenate([arg, np.zeros(size - len(arg), arg.dtype)])
+ return tuple(np.concatenate([arg, np.full(size - len(arg), fill_value or 0, arg.dtype)])
for arg in result)
- jnp_fun = lambda x: jnp.nonzero(x, size=size)
+ jnp_fun = lambda x: jnp.nonzero(x, size=size, fill_value=fill_value)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
| `jnp.nonzero` zero padding behavior is confusing
I noticed that `jax.numpy.nonzero` now allows the wonderful `size` parameter which allows jitting: #6501. This is wonderful! Thanks!
However, the semantics is that missing zeros are zero-padded. This seems unworkable in some situations, consider:
```python
x = jnp.zeros(5)
print(jnp.nonzero(x, size=1)) # Prints [ 0 ]
x = jnp.ones(5)
print(jnp.nonzero(x, size=1)) # Prints [ 0 ]
```
There's no way to tell between these two cases.
Instead, perhaps you could consider something like `-1`-padding?
Though, performance-wise, I'm not sure how hard it would be to make that on-par with the current implementation
| Note that this only occurs when `size=1`.
So perhaps at least we should document this corner case since this seems like a problem that could cause unexpected program behaviors.
Thanks for the question β we chose to make the padding zero because it's the natural value that falls out of the algorithm used to implement the function. Zero padding makes sense in many applications, and any other fill value would require additional post-processing of the result. If you wish, you can do this post-processing yourself. For example:
```python
import jax.numpy as jnp
def nonzero_with_configurable_padding(arr, *, size=None, fill_value=-1):
out = jnp.nonzero(arr, size=size)
if size is not None:
num_nonzero = (arr != 0).sum()
mask = jnp.arange(len(out[0])) < num_nonzero
out = tuple(jnp.where(mask, entry, fill_value) for entry in out)
return out
dx = jnp.zeros(5)
print(nonzero_with_configurable_padding(x, size=1))
# [ -1 ]
x = jnp.ones(5)
print(nonzero_with_configurable_padding(x, size=1))
# [ 0 ]
```
@jakevdp Thanks for the explanation and your example.
A couple questions for this overall issue:
1) Is there an alternative algorithm that can a) use arbitrary `fill_value` b) retain the same efficiency?
2) Should we document this behavior for `size=1`? I would say that we could even ban `size=1` since it produces ambiguous return value.
3) Perhaps you can add this implementation of `fill_value` parameter to `nonzero`? The default is zero so the fast code path is enabled.
Ideally, someone will think of solution to 1), but before that, I think we should do either 2) or 3).
WDYT?
I think the behavior for size=1 is already documented, no? The true size is zero, so the result is padded with zeros up to the requested size.
> The true size is zero, so the result is padded with zeros up to the requested size.
Well, sure. I'm simply thinking that we should add a note, that `size=1` will have this problem of ambiguous output.
I mean, from a user's perspective, I do hope that when I write some code with `size=1`, I can get fair amount of warning of this behavior. Maybe it's just me, but I only realized this issue after I observed unexpected output.
I can do a PR on the docs.
The only reason I haven't done so is because I'm not sure if 1) or 3) is a viable/better option?
I think a `fill_value` option may be a reasonable feature to add. The reason I didn't before is that it adds a maintenance/testing burden for something that didn't have any concrete use-case to motivate it, and the relevant information (the true number of nonzeros) is straightforward to determine separately.
Can you give an example of code you might write with a configurable fill value?
Sure. The code I had which ran into this problem was a jittable [Schur-Transform](https://en.wikipedia.org/wiki/LehmerβSchur_algorithm). The implementation is pretty short so I'd paste it here (note that most of it is just comments).
```python
import jax.numpy as jnp
def _first_non_zero(x):
return jnp.nonzero(x, size=1)[0][0]
# return jnp.nonzero(x, size=1, fill_value=x.size)[0][0]
def schur_transform(coeffs,
n_leading_zeros: int):
"""Schur Transform of a polynomial.
Computes
..math::
T(p) = \\bar{p(0)}p - \\bar{p^*(0)}p^*
where :math:`p^*(z)` is the reciprocal adjoint polynomial
..math::
p^*(z) = z^n \cdot \\bar{p(z^{-1})}
and bar denotes complex conjugation.
See https://en.wikipedia.org/wiki/LehmerβSchur_algorithm for more details.
Args:
coeffs: a complex array of polynomial coefficients, with possibily some
leading zero terms.
n_leading_zeros: the number of leading zeros in the polynomial.
Returns:
the transformed polynomial coefficients, and the number of leading zeros in
it.
"""
# The reciprocal adjoint polynomial
coeffs_ra = jnp.roll(coeffs.conj(), shift=-n_leading_zeros, axis=0)[::-1]
# Schur Transformation
coeffs = (coeffs[-1].conj() * coeffs - coeffs_ra[-1].conj() * coeffs_ra)
# Update number of leading zeros
n_leading_zeros = _first_non_zero(coeffs)
return coeffs, n_leading_zeros
```
The test could be
```
coeffs = jnp.array([1, 2, 1], dtype=jnp.complex64) # f(x) = x^2 + 2x + 1
transformed_coeffs, n_leading_zeros = schur_transform(coeffs, n_leading_zeros=0)
assert jnp.all(transformed_coeffs == 0)
assert n_leading_zeros == 3 # Currently fails because `fill_value` is not available.
``` | 2021-08-11T18:55:36 |
google/jax | 7,626 | google__jax-7626 | [
"7621"
] | a3387aeb0c26e8334d874160a00db7d5e174264b | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -1256,8 +1256,6 @@ def vmap(fun: F, in_axes=0, out_axes=0, axis_name=None) -> F:
docstr += "\n\nOriginal documentation:\n\n"
docstr += fun.__doc__
- axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
-
if isinstance(in_axes, list):
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py
--- a/jax/interpreters/batching.py
+++ b/jax/interpreters/batching.py
@@ -66,7 +66,7 @@ def _match_axes(axis_size, axis_name, in_dims, out_dims_thunk, out_dim_dests,
out_dims = out_dims_thunk()
for od, od_dest in zip(out_dims, out_dim_dests):
if od is not None and not isinstance(od_dest, int):
- if not isinstance(axis_name, core._TempAxisName):
+ if not isinstance(axis_name, core._TempAxisName) and axis_name is not None:
msg = f"vmap has mapped output (axis_name={axis_name}) but out_axes is {od_dest}"
else:
msg = f"vmap has mapped output but out_axes is {od_dest}"
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -2949,6 +2949,20 @@ def loss(A, x):
with jax.checking_leaks():
_ = jax.grad(loss)(A, x) # doesn't crash
+ def test_vmap_caching(self):
+ # https://github.com/google/jax/issues/7621
+
+ f = lambda x: jnp.square(x).mean()
+ jf = jax.jit(f)
+ x = jax.random.uniform(jax.random.PRNGKey(0), shape=(8, 4))
+
+ with jtu.count_jit_and_pmap_compiles() as count: # noqa: F841
+ jax.hessian(jf)(x).block_until_ready()
+ jax.hessian(jf)(x).block_until_ready()
+ jax.hessian(jf)(x).block_until_ready()
+
+ self.assertEqual(count[0], 2)
+
class RematTest(jtu.JaxTestCase):
| Memory leak when computing the hessian of a jitted function
Please:
- [x] Check for duplicate issues.
- [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this:
```python
import jax
import jax.numpy as jnp
f = lambda x: jnp.square(x).mean()
jf = jax.jit(f)
x = jax.random.uniform(jax.random.PRNGKey(0), shape=(8, 4))
# Start monitoring memory with htop
while True:
try:
# let this run for a bit, notice memory usage doesn't increase
y = jax.hessian(f)(x)
except KeyboardInterrupt:
pass
while True:
# let this run, notice memory usage starts increasing immediately
# also, the memory usage doesn't appear to stop increasing ever
y = jax.hessian(jf)(x)
```
Also, note that the equivalent code with `jax.grad` (or `jax.jacfwd` for multidimensional functions) replacing `jax.hessian` *does not* cause a memory leak.
As a result of this leak, my python process ends up getting killed by my OS (due to OOM), so I don't ever see any python or jax traces.
| Thanks for ~the question~ raising this, and with a clear reproducer!
At first I thought could be an artifact of [asynchronous dispatch](https://jax.readthedocs.io/en/latest/async_dispatch.html). (The `jit` dispatch path is different from the non-`jit` dispatch path, so that may play a role in why `f` and `jf` behave differently.)
But I ran this version, which forces synchronizations, and it seems like memory is still growing without bound (on the CPU backend):
```python
while True:
jax.hessian(jf)(x).block_until_ready()
```
I then tried running the script like this:
```
env JAX_LOG_COMPILES=1 python issue7621.py
```
and saw that we're doing lots of recompiles here:
```
...
WARNING:absl:Compiling backward_pass (140099626713344) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626782592) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626820800) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626782144) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626862272) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626817664) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626755712) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626837568) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626919552) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626921344) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626884480) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626944512) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626883712) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
WARNING:absl:Compiling backward_pass (140099626911936) for args (ShapedArray(float32[8,4]), ShapedArray(float32[]), ShapedArray(float32[1]), ShapedArray(float32[32,8,4])).
...
```
Maybe that should've been obvious from your clue about `grad`/`jacfwd` behaving differently.
Somehow `hessian` is causing compilation cache misses here...
@mattjj Nice catch, I wasn't aware of the `JAX_LOG_COMPILES` variable. I wonder if declaring the function `h = hessian(jf)` outside the loop and calling `h(x)` within the loop would still cause the issue. I'll give it a try (though admittedly I don't know much about how `jit` works, so this may not make sense ;) )
**Edit**: the hessian outside the loop trick doesn't work, though that probably isn't surprising.
This _should_ be getting cache hits, so it's definitely a bug. The situation should be basically the same as for `grad`, so there's some difference between `grad` and `hessian` which we need to pin down...
I see the issue. It's a problem with `vmap`. It's a bit hard to unpack in full detail, so for now I'm just going to leave some notes for myself and other jax devs: in [this cache key](https://github.com/google/jax/blob/a3387aeb0c26e8334d874160a00db7d5e174264b/jax/linear_util.py#L255), on each invocation we're getting different values because `key[0][7][1][0].payload` is a different [temporary axis name](https://github.com/google/jax/blob/a3387aeb0c26e8334d874160a00db7d5e174264b/jax/_src/api.py#L1259) each time. That's happening in turn because `jacrev's` [`pullback`](https://github.com/google/jax/blob/a3387aeb0c26e8334d874160a00db7d5e174264b/jax/_src/api.py#L1033-L1035), which gets vmapped, is a fresh Python callable each time.
More notes to self:
We added temporary axis names in #917 for soft_pmap (now replaced by xmap). But the issue here is about vmap using temporary axis names.
The reason we needed them for soft_pmap/xmap is that those implementations split axes, and so they need to generate new names which could be used in collectives (and hence have to be distinguished from one another, etc).
But here with vmap, when we aren't given a name, the axis can't be involved in any collectives. So it seems like we should just leave it as None. Unless I'm forgetting something...
Adam and I added this TempAxisName stuff to vmap in #4005. I wonder if that was just a bug... | 2021-08-13T21:55:15 |
google/jax | 7,646 | google__jax-7646 | [
"7645"
] | f6f1debf700b55ad8c0ecc1daa0ed1f677c2bf00 | diff --git a/jax/_src/scipy/stats/beta.py b/jax/_src/scipy/stats/beta.py
--- a/jax/_src/scipy/stats/beta.py
+++ b/jax/_src/scipy/stats/beta.py
@@ -18,7 +18,7 @@
from jax._src.numpy.util import _wraps
from jax._src.numpy.lax_numpy import (_promote_args_inexact, _constant_like,
where, inf, logical_or)
-from jax.scipy.special import betaln
+from jax.scipy.special import betaln, xlogy, xlog1py
@_wraps(osp_stats.beta.logpdf, update_doc=False)
@@ -27,8 +27,8 @@ def logpdf(x, a, b, loc=0, scale=1):
one = _constant_like(x, 1)
shape_term = lax.neg(betaln(a, b))
y = lax.div(lax.sub(x, loc), scale)
- log_linear_term = lax.add(lax.mul(lax.sub(a, one), lax.log(y)),
- lax.mul(lax.sub(b, one), lax.log1p(lax.neg(y))))
+ log_linear_term = lax.add(xlogy(lax.sub(a, one), y),
+ xlog1py(lax.sub(b, one), lax.neg(y)))
log_probs = lax.sub(lax.add(shape_term, log_linear_term), lax.log(scale))
return where(logical_or(lax.gt(x, lax.add(loc, scale)),
lax.lt(x, loc)), -inf, log_probs)
| diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py
--- a/tests/scipy_stats_test.py
+++ b/tests/scipy_stats_test.py
@@ -148,6 +148,13 @@ def args_maker():
self._CompileAndCheck(lax_fun, args_maker,
rtol={np.float32: 2e-3, np.float64: 1e-4})
+ def testBetaLogPdfZero(self):
+ # Regression test for https://github.com/google/jax/issues/7645
+ a = b = 1.
+ x = np.array([0., 1.])
+ self.assertAllClose(
+ osp_stats.beta.pdf(x, a, b), lsp_stats.beta.pdf(x, a, b), atol=1E-6)
+
@genNamedParametersNArgs(3)
def testCauchyLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
| jax.scipy.stats.beta.pdf handles x=0, x=1 incorrectly
```
a = b = 1.
x = np.array([0., 1.])
scipy.stats.beta.pdf(x, a, b)
# array([1., 1.])
jax.scipy.stats.beta.pdf(x, a, b)
# DeviceArray([nan, nan], dtype=float32)
```
Same issue and resolution as #7256 basically--use xlogy/xlog1py here:
https://github.com/google/jax/blob/f6f1debf700b55ad8c0ecc1daa0ed1f677c2bf00/jax/_src/scipy/stats/beta.py#L30-L31
| Thanks for the report! | 2021-08-17T16:52:40 |
google/jax | 7,648 | google__jax-7648 | [
"7634"
] | 476642578be3092db77adf0bdeca5cd4867f12e1 | diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -120,8 +120,8 @@ def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
out = lax.add(lax.log(jnp.sum(lax.exp(lax.sub(a, amax_with_dims)),
axis=dims, keepdims=keepdims)),
amax)
- sign = jnp.where(jnp.isnan(out), np.nan, 1.0).astype(out.dtype)
- sign = jnp.where(jnp.isneginf(out), 0.0, sign)
+ sign = jnp.where(jnp.isnan(out), out, 1.0)
+ sign = jnp.where(jnp.isneginf(out), 0.0, sign).astype(out.dtype)
else:
expsub = lax.exp(lax.sub(a, amax_with_dims))
if b is not None:
| diff --git a/tests/lax_scipy_test.py b/tests/lax_scipy_test.py
--- a/tests/lax_scipy_test.py
+++ b/tests/lax_scipy_test.py
@@ -214,6 +214,13 @@ def testLogSumExpOnes(self):
self._CheckAgainstNumpy(osp_special.logsumexp, lsp_special.logsumexp, args_maker)
self._CompileAndCheck(lsp_special.logsumexp, args_maker)
+ def testLogSumExpNans(self):
+ # Regression test for https://github.com/google/jax/issues/7634
+ with jax.debug_nans(True):
+ with jax.disable_jit():
+ result = lsp_special.logsumexp(1.0)
+ self.assertEqual(result, 1.0)
+
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
| logsumexp: "invalid value (nan) encountered in convert_element_type"
I get this following error in logsumexp:
```
FloatingPointError: invalid value (nan) encountered in convert_element_type
/home/foo/miniconda3/envs/sica/lib/python3.7/site-packages/jax/_src/scipy/special.py(126)logsumexp()
```
Unfortunately, I have not been able to reproduce this in any reasonably sized version -- I only seem to encounter it within a much larger jitted and scanned code that I am not able to share at the moment due to anonymity requirements (earlier version of the code currently under review). I tried to figure out what's going on by debugging the logsumexp function and found this strange:
```python
> out = lax.add(lax.log(jnp.sum(lax.exp(lax.sub(a, amax_with_dims))
> (Pdb) out
> Traced<ShapedArray(float64[2])>with<BatchTrace(level=1/0)>
> with val = DeviceArray([[-0.8201989 , -0.8201989 ],
> [-0.82572116, -0.82572116],
> [-0.86567182, -0.86567182]], dtype=float64)
> batch_dim = 0
> (Pdb) jnp.where(jnp.isnan(out), np.nan, 1.0).astype(out.dtype)
>*** FloatingPointError: invalid value (nan) encountered in convert_element_type
> (Pdb) jnp.where(jnp.isnan(out), 0.0, 1.0).astype(out.dtype)
```
Yet
```python
>jnp.where(jnp.isnan(out), 0.0, 1.0).astype(out.dtype)
> Traced<ShapedArray(float64[2])>with<BatchTrace(level=1/0)>
> with val = DeviceArray([[1., 1.],
> [1., 1.],
> [1., 1.]], dtype=float64)
> batch_dim = 0
```
What makes this more strange, is that this only happens sometimes. e.g. if `out` is 2d rather than 3d then in never get this error.
Any ideas as to what might be going on? I will try to produce a minimal example somehow.
| Thanks for the report. One point of clarification: is this with `jax_debug_nans` enabled?
It is indeed.
(the issue happened originally without jax_debug_nans). However, I just updated JAX&JAXlib in hopes that might help but am now instead getting
> Segmentation fault (core dumped)
So seems like there is maybe something else going on as well -- will try to fix that first and then see if this issue still persists.
So what seems to have happened: 1. there was a NaN coming from somewhere 2. I turned on jax_debug_nans in attempt to find out where it's coming from 3. This pointed to the logsumexp function 4. As per your comments, I turned off jax_debug_nans and debugged manually (by first disabling jit) 5. I found out the NaN was coming from matrix inversion -- completely different place than where jax_debug_nans pointed to...
Ah, thanks for the update. Sorry for the suboptimal behavior in `debug_nans`. It sounds like an issue we should investigate β is it possible to reproduce the issue compactly?
I created a much simplified version of my code that captures similar behaviour. If I run this as it is, it points to the error being in logsumexp but if I comment out with` jax.disable_jit():` then it appropriately shows the error being in Cholesky. Now I suppose the issue then is the interplay between debug_nans=True and disable_jit(), and a much simpler example would have sufficed -- should I not be using these two simultaneously? Reason why I did is that often in my code that has loads of scans and nested functions, in the past, I have only found debug_nans useful if jit is disabled...
```
import jax
import jax.numpy as jnp
from jax import jit
import pdb
from jax.lax import scan
from jax.scipy.special import logsumexp
from jax.config import config
config.update("jax_debug_nans", True)
# inv(L*L.T)*Y
def invcholp(L, Y):
D = jax.scipy.linalg.solve_triangular(L, Y, lower=True)
B = jax.scipy.linalg.solve_triangular(L.T, D, lower=False)
return B
# inv(X)*Y
def invmp(X, Y):
return invcholp(jnp.linalg.cholesky(X), Y)
def tree_prepend(prep, tree):
preprended = jax.tree_multimap(
lambda a, b: jnp.vstack((a[None], b)), prep, tree
)
return preprended
def scan_body(msg_in, msg_current):
eta_A, eta_B = msg_current
fwd_t = logsumexp(eta_A.T + msg_in[None, :], axis=1)
msg_out = fwd_t+eta_B
return msg_out, fwd_t
def tree_stuff(eta_pi, eta_A):
msg_in = jnp.array([0.04, 0.98])
eta_B = jnp.log(eta_A + 1000)
tree = tree_prepend(eta_pi, scan(scan_body, msg_in, (eta_A, eta_B))[1])
return tree
@jit
def run(V, eta_pi, eta_A):
eta_A = jnp.repeat(eta_A, 1000, 0)
tree_stuff(eta_pi, eta_A)
Q = invmp(V, jnp.eye(V.shape[0]))
return Q
if __name__ == "__main__":
V = jnp.array([[-10, 20], [-10, 20]])
eta_pi = jnp.array([0.1])
eta_A = jnp.array([[0.5, 0.08],
[0.08, 0.3]])
with jax.disable_jit():
run(V, eta_pi, eta_A)
```
Also, if instead of `jax.disable_jit()` I comment out the jit decorator of the `run` function, as a way to disable jit, then behaviour is as expected.
Are you using complex numbers by any chance? I have somewhat recently made a PR (#6855) to support complex numbers in `logsumexp`. The code path used for `complex64/128` is different by the one used for the other dtypes.
Thanks for the repro: I think the issue is that `tree_stuff(eta_pi, eta_A)` is dead code (it returns a value that is unused within the function). One optimization done by `jit` is removal of dead code.
So, without `jit` this dead code executes and gives an error. With `jit`, the dead code is removed and a different error comes from the next line, `Q = invmp(V, jnp.eye(V.shape[0]))`
This appears to all be working as expected.
> Are you using complex numbers by any chance? I have somewhat recently made a PR (#6855) to support complex numbers in `logsumexp`. The code path used for `complex64/128` is different by the one used for the other dtypes.
No complex numbers.
> Thanks for the repro: I think the issue is that tree_stuff(eta_pi, eta_A) is dead code (it returns a value that is unused within the function). One optimization done by jit is removal of dead code.
>
> So, without jit this dead code executes and gives an error. With jit, the dead code is removed and a different error comes from the next line, Q = invmp(V, jnp.eye(V.shape[0]))
>
> This appears to all be working as expected.
Hmm interesting. This does sounds like it makes sense here, but in my original code the output of `tree_stuff` is definitely used within `run`.
Thanks - trying to simplify things a bit I do find something strange:
```python
import jax
import jax.numpy as jnp
from jax import jit
from jax.lax import scan
from jax.scipy.special import logsumexp
from jax.config import config
config.update("jax_debug_nans", True)
def scan_body(msg_in, msg_current):
eta_A, eta_B = msg_current
fwd_t = logsumexp(eta_A + msg_in, axis=0)
return fwd_t + eta_B, fwd_t
@jit
def run(V, eta_pi, eta_A):
eta_A = jnp.repeat(eta_A, 1000, 0)
msg_in = jnp.array([0.04, 0.98])
eta_B = jnp.log(eta_A + 1000)
return scan(scan_body, msg_in, (eta_A, eta_B))[1]
V = jnp.array([[-10, 20], [-10, 20]])
eta_pi = jnp.array([0.1])
eta_A = jnp.array([[0.5, 0.08],
[0.08, 0.3]])
# runs without issue
print(run(V, eta_pi, eta_A))
# runs without issue
print(jit(run)(V, eta_pi, eta_A))
# raises an error due to NaN in convert_element_type
with jax.disable_jit():
print(jit(run)(V, eta_pi, eta_A))
```
When the function is run without jit or with jit, it succeeds. But when it is jitted and jit is disabled, it fails. I'm not sure what is going on with that.
Yes I only saw issues whenever I had `with jax.disable_jit():`. Disabling all jit decorators manually didnt produce any issues.
> Thanks - trying to simplify things a bit I do find something strange:
>
> ```python
> import jax
> import jax.numpy as jnp
> from jax import jit
> from jax.lax import scan
> from jax.scipy.special import logsumexp
> from jax.config import config
> config.update("jax_debug_nans", True)
>
> def scan_body(msg_in, msg_current):
> eta_A, eta_B = msg_current
> fwd_t = logsumexp(eta_A + msg_in, axis=0)
> return fwd_t + eta_B, fwd_t
>
> @jit
> def run(V, eta_pi, eta_A):
> eta_A = jnp.repeat(eta_A, 1000, 0)
> msg_in = jnp.array([0.04, 0.98])
> eta_B = jnp.log(eta_A + 1000)
> return scan(scan_body, msg_in, (eta_A, eta_B))[1]
>
> V = jnp.array([[-10, 20], [-10, 20]])
> eta_pi = jnp.array([0.1])
> eta_A = jnp.array([[0.5, 0.08],
> [0.08, 0.3]])
>
> # runs without issue
> print(run(V, eta_pi, eta_A))
>
> # runs without issue
> print(jit(run)(V, eta_pi, eta_A))
>
> # raises an error due to NaN in convert_element_type
> with jax.disable_jit():
> print(jit(run)(V, eta_pi, eta_A))
> ```
>
> When the function is run without jit or with jit, it succeeds. But when it is jitted and jit is disabled, it fails. I'm not sure what is going on with that.
(though when I run your simplified code I don't see any errors, perhaps because the matrix V is not inverted anywhere)
> (though when I run your simplified code I don't see any errors, perhaps because the matrix V is not inverted anywhere)
That's my point: the error does not seem related to a matrix inversion, but rather to the interaction between `scan` and `disable_jit` (note that even in your original code, the `logsumexp` error is raised before any matrix inversion takes place).
The simplified code as written errors when I run it on CPU with JAX v0.2.19. What version of JAX are you using, on which backend?
Apologies, I had not activated my proper jax conda env so it was running it with much older 0.1.55 . Switching to CPU JAX 0.2.19 errors our precisely as you describe (unfortunately I dont have GPU access at the moment so can't check if that would change things).
Here's an even more stripped-down version of the error, showing that `debug_nans` triggers with only if `scan` is run under `disable_jit`:
```python
import jax
from jax.lax import scan
import jax.numpy as jnp
from jax.scipy.special import logsumexp
from jax.config import config
config.update("jax_debug_nans", True)
def scan_body(carry, x):
return logsumexp(x), x
carry = 0.0
x = jnp.ones(2)
print(scan(scan_body, carry, x))
# (DeviceArray(1., dtype=float32), DeviceArray([1., 1.], dtype=float32))
with jax.disable_jit():
print(scan(scan_body, carry, x))
# FloatingPointError: invalid value (nan) encountered in convert_element_type
```
This seems to be a consequence of the alternate implementation of `scan` under `disable_jit`: https://github.com/google/jax/blob/476642578be3092db77adf0bdeca5cd4867f12e1/jax/_src/lax/control_flow.py#L1252-L1263
Though I'm not entirely certain why this would be different in terms of generating nans.
Why would it be that changing the "official" logsumexp code from
`sign = jnp.where(jnp.isnan(out), np.nan, 1.0).astype(out.dtype)
`
to something else like
`sign = jnp.where(jnp.isnan(out), 0., 1.0).astype(out.dtype)`
doesnt cause error in your new simplified example. Is there some problem with using ´np.nan¨ from original numy?
Fix is in #7648.
The issue is that if you use `np.nan` in `_where()` with `disable_jit` and `debug_nans`, it triggers a NaN error because the value is treated as a constant rather than being traced. We can work around it by explicitly casting the value to a JAX type with `jnp.array`
Awesome that makes sense -- thanks very much! | 2021-08-17T20:29:59 |
google/jax | 7,715 | google__jax-7715 | [
"7714"
] | 7a40aa0114e46a7a8975e04b175dfbc7b0b388ef | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1037,6 +1037,11 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None and weights.shape != (N,):
raise ValueError("should have one weight for each sample.")
+ if range is not None and (
+ len(range) != D or _any(r is not None and len(r) != 2 for r in range)):
+ raise ValueError(f"For sample.shape={(N, D)}, range must be a sequence "
+ f"of {D} pairs or Nones; got range={range}")
+
try:
num_bins = len(bins)
if num_bins != D:
@@ -1051,7 +1056,8 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=None):
dedges = D*[None]
for i in builtins.range(D):
- bin_edges = histogram_bin_edges(sample[:, i], bins[i], range, weights)
+ range_i = None if range is None else range[i]
+ bin_edges = histogram_bin_edges(sample[:, i], bins[i], range_i, weights)
bin_idx = searchsorted(bin_edges, sample[:, i], side='right')
bin_idx = where(sample[:, i] == bin_edges[-1], bin_idx - 1, bin_idx)
bin_idx_by_dim[i] = bin_idx
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -3147,25 +3147,23 @@ def testHistogram(self, shape, dtype, bins, density, weights):
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_{}_bins={}_weights={}_density={}".format(
- jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
- "shape": shape,
- "dtype": dtype,
- "bins": bins,
- "weights": weights,
- "density": density
+ {"testcase_name": "_{}_bins={}_weights={}_density={}_range={}".format(
+ jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),
+ "shape": shape, "dtype": dtype, "bins": bins, "weights": weights, "density": density, "range": range,
}
for shape in [(5,), (12,)]
for dtype in int_dtypes
for bins in [2, [2, 2], [[0, 1, 3, 5], [0, 2, 3, 4, 6]]]
for weights in [False, True]
for density in [False, True]
+ for range in [None, [(-1, 1), None], [(-1, 1), (-2, 2)]]
))
- def testHistogram2d(self, shape, dtype, bins, weights, density):
+ def testHistogram2d(self, shape, dtype, bins, weights, density, range):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
- np_fun = lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
- jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
+ np_fun = jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")(
+ lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range))
+ jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density, range=range)
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
@@ -3177,25 +3175,23 @@ def testHistogram2d(self, shape, dtype, bins, weights, density):
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_{}_bins={}_weights={}_density={}".format(
- jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
- "shape": shape,
- "dtype": dtype,
- "bins": bins,
- "weights": weights,
- "density": density
+ {"testcase_name": "_{}_bins={}_weights={}_density={}_range={}".format(
+ jtu.format_shape_dtype_string(shape, dtype), bins, weights, density, range),
+ "shape": shape, "dtype": dtype, "bins": bins, "weights": weights, "density": density, "range": range,
}
for shape in [(5, 3), (10, 3)]
for dtype in int_dtypes
for bins in [(2, 2, 2), [[-5, 0, 4], [-4, -1, 2], [-6, -1, 4]]]
for weights in [False, True]
for density in [False, True]
+ for range in [None, [(-1, 1), None, None], [(-1, 1), (-2, 2), (-3, 3)]]
))
- def testHistogramdd(self, shape, dtype, bins, weights, density):
+ def testHistogramdd(self, shape, dtype, bins, weights, density, range):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
- np_fun = lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density)
- jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density)
+ np_fun = jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")(
+ lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range))
+ jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density, range=range)
args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
| histogram2d error when specifying `range`
```python
from jax import numpy as jnp
a = [-2,2]
b=jnp.array([a,a])
print(b)
jnp.histogram2d(jnp.ones(10), jnp.ones(10), bins=10, range=b)
```
Suggested fix from the user: [this line](https://github.com/google/jax/blob/7a40aa0114e46a7a8975e04b175dfbc7b0b388ef/jax/_src/numpy/lax_numpy.py#L1054) should probably have `range[i]`. (That seems to fix it, but I didn't try writing tests!)
@jakevdp could you take a look?
| 2021-08-25T16:31:11 |
|
google/jax | 7,781 | google__jax-7781 | [
"7063"
] | 7fbbb9585853e3d2c53336d41a38b15d3fe26e6a | diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py
--- a/jax/_src/lax/control_flow.py
+++ b/jax/_src/lax/control_flow.py
@@ -373,7 +373,7 @@ def _pred_bcast_select(c, pred, x, y, x_y_aval: core.AbstractValue):
elif x_y_aval is core.abstract_token:
return xops.AfterAll(c, [x, y])
else:
- assert pred_shape == x_shape[:len(pred_shape)] == y_shape[:len(pred_shape)]
+ assert pred_shape == x_shape[:len(pred_shape)] == y_shape[:len(pred_shape)], (pred_shape, x_shape, y_shape)
bcast_pred = xops.BroadcastInDim(pred, x_shape, list(range(len(pred_shape))))
return xops.Select(bcast_pred, x, y)
@@ -383,43 +383,76 @@ def _while_loop_batching_rule(args, dims, axis_name, main_type,
size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}
orig_batched = [d is not batching.not_mapped for d in dims]
cconst_bat, bconst_bat, init_bat = split_list(orig_batched, [cond_nconsts, body_nconsts])
+ cconsts, bconsts, init = split_list(args, [cond_nconsts, body_nconsts])
+ cconst_dims, bconst_dims, init_dims = split_list(dims, [cond_nconsts, body_nconsts])
+ carry_bat = init_bat
# Fixpoint computation of which carry are batched: either
# batched from init, or the carry out is batched. Each iteration promotes
- # at least one carry to batched. We need at most len(carry) iterations,
- # but we need one last iteration to prepare the jaxpr based on the final
- # carry_bat.
- carry_bat = init_bat
+ # at least one carry to batched. We need at most len(carry) iterations to
+ # reach a fixpoint.
for _ in range(1 + len(carry_bat)):
- batched = bconst_bat + carry_bat
- body_jaxpr_batched, carry_bat_out = batching.batch_jaxpr(
- body_jaxpr, size, batched, instantiate=carry_bat,
+ _, carry_bat_out = batching.batch_jaxpr(
+ body_jaxpr, size, bconst_bat + carry_bat, instantiate=False,
axis_name=axis_name, main_type=main_type)
- cond_jaxpr_batched, (pred_bat,) = batching.batch_jaxpr(
- cond_jaxpr, size, cconst_bat + carry_bat,
- instantiate=bool(cond_jaxpr.out_avals[0].shape),
- axis_name=axis_name, main_type=main_type)
- carry_bat_out = _map(partial(operator.or_, pred_bat), carry_bat_out)
- if carry_bat_out == carry_bat:
+ if carry_bat == carry_bat_out:
break
- else:
- carry_bat = _map(operator.or_, carry_bat, carry_bat_out)
+ carry_bat = safe_map(operator.or_, carry_bat, carry_bat_out)
else:
assert False, "Fixpoint not reached"
- consts, init = split_list(args, [cond_nconsts + body_nconsts])
- const_dims, init_dims = split_list(dims, [cond_nconsts + body_nconsts])
- new_consts = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0
- else x for x, d in zip(consts, const_dims)]
- new_init = [batching.broadcast(x, size, 0) if now_bat and not was_bat
- else batching.moveaxis(x, d, 0) if now_bat and d != 0 else x
- for x, d, was_bat, now_bat in zip(init, init_dims, init_bat, carry_bat)]
+ # Knowing how the carry is batched now, we can determine if the predicate is
+ # batched.
+ _, (pred_bat,) = batching.batch_jaxpr(
+ cond_jaxpr, size, cconst_bat + carry_bat, instantiate=False,
+ axis_name=axis_name, main_type=main_type)
+
+ if pred_bat:
+ # If the predicate is batched, we have to batch *all* of the carry
+ # regardless of if the body needs it.
+ carry_bat = [True] * len(carry_bat)
+ carry_dims = [0] * len(carry_bat)
+ body_jaxpr_batched, _ = batching.batch_jaxpr_axes(
+ body_jaxpr, size, bconst_dims + carry_dims,
+ carry_dims, axis_name=axis_name, main_type=main_type)
+ cond_jaxpr_batched, _ = batching.batch_jaxpr_axes(
+ cond_jaxpr, size, cconst_dims + carry_dims, [0],
+ axis_name=axis_name, main_type=main_type)
+ else:
+ # If the predicate is not batched, we can look at the `cond_jaxpr`'s out
+ # shape to determine the rank of the predicate. From this rank
+ # we pick the dims of the carry to be batched to ensure that the predicate
+ # shape is a prefix of the carry in and out shapes. We can then batch
+ # the `body_jaxpr` according to these new batch dims.
+ cond_rank = len(cond_jaxpr.out_avals[0].shape)
+ carry_dims = [cond_rank if b else None for b in carry_bat]
+ body_jaxpr_batched, _ = batching.batch_jaxpr_axes(
+ body_jaxpr, size, bconst_dims + carry_dims, carry_dims,
+ axis_name=axis_name, main_type=main_type)
+ # Now we need to rebatch the `cond_jaxpr` according to the new dims of the
+ # carry.
+ cond_jaxpr_batched, _ = batching.batch_jaxpr_axes(
+ cond_jaxpr, size, cconst_dims + carry_dims, (None,),
+ axis_name=axis_name, main_type=main_type)
+
+ # To prepare the `init` to the `while_p`, we broadcast values if they are
+ # unbatched and need to have an out axis. If their current batch axis does not
+ # match the one it needs to be for the translation rule to work, we move it
+ # into place.
+ new_init = []
+ for x, old_axis, new_axis in zip(init, init_dims, carry_dims):
+ if old_axis is batching.not_mapped and new_axis is not batching.not_mapped:
+ new_init.append(batching.broadcast(x, size, new_axis))
+ elif old_axis is batching.not_mapped and new_axis is batching.not_mapped:
+ new_init.append(x)
+ else:
+ assert new_axis is not batching.not_mapped
+ new_init.append(batching.moveaxis(x, old_axis, new_axis))
- outs = while_p.bind(*(new_consts + new_init),
+ outs = while_p.bind(*(cconsts + bconsts + new_init),
cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr_batched,
body_nconsts=body_nconsts, body_jaxpr=body_jaxpr_batched)
- out_bdims = [0 if b else batching.not_mapped for b in carry_bat]
- return outs, out_bdims
+ return outs, carry_dims
def _while_loop_jvp(primals, tangents, cond_nconsts, cond_jaxpr, body_nconsts,
body_jaxpr):
@@ -551,7 +584,7 @@ def _while_transpose_error(*_, **kwargs):
"lax.while_loop or lax.fori_loop. "
"Try using lax.scan instead.")
-while_p = lax.Primitive('while')
+while_p = core.Primitive('while')
while_p.multiple_results = True
while_p.def_impl(partial(xla.apply_primitive, while_p))
while_p.def_abstract_eval(_while_loop_abstract_eval)
diff --git a/jax/interpreters/batching.py b/jax/interpreters/batching.py
--- a/jax/interpreters/batching.py
+++ b/jax/interpreters/batching.py
@@ -470,17 +470,31 @@ def bdim_at_front(x, bdim, size):
return moveaxis(x, bdim, 0)
+zero_if_mapped = object()
+
def batch_jaxpr(closed_jaxpr, axis_size, in_batched, instantiate, axis_name, main_type):
+ if instantiate is None:
+ instantiate = False
+ if isinstance(instantiate, bool):
+ instantiate = [instantiate] * len(closed_jaxpr.out_avals)
+ out_axes = [0 if inst else zero_if_mapped for inst in instantiate]
+ return batch_jaxpr_axes(
+ closed_jaxpr, axis_size,
+ [0 if b else not_mapped for b in in_batched],
+ out_axes,
+ axis_name, main_type)
+
+def batch_jaxpr_axes(closed_jaxpr, axis_size, in_axes, out_axes, axis_name, main_type):
f = lu.wrap_init(core.jaxpr_as_fun(closed_jaxpr))
- f, out_batched = batch_subtrace_instantiate(f, instantiate, axis_size)
- f = batchfun(f, axis_name, axis_size, [0 if b else None for b in in_batched], main_type)
- avals_in = [core.unmapped_aval(axis_size, 0, aval) if b else aval
- for aval, b in zip(closed_jaxpr.in_avals, in_batched)]
+ f, out_batched = batch_subtrace_instantiate(f, axis_size, out_axes)
+ f = batchfun(f, axis_name, axis_size, in_axes, main_type)
+ avals_in = [core.unmapped_aval(axis_size, b, aval) if b is not not_mapped
+ else aval for aval, b in zip(closed_jaxpr.in_avals, in_axes)]
jaxpr_out, _, consts = pe.trace_to_jaxpr_dynamic(f, avals_in)
return core.ClosedJaxpr(jaxpr_out, consts), out_batched()
@lu.transformation_with_aux
-def batch_subtrace_instantiate(instantiate, axis_size, main, in_dims, *in_vals):
+def batch_subtrace_instantiate(axis_size, out_axes, main, in_dims, *in_vals):
# this is like `batch_subtrace` but we take an extra `instantiate` arg
# analogue of `jvp_subtrace` in ad.py
trace = main.with_cur_sublevel()
@@ -490,13 +504,12 @@ def batch_subtrace_instantiate(instantiate, axis_size, main, in_dims, *in_vals):
out_tracers = map(trace.full_raise, outs)
out_vals, out_dims = unzip2((t.val, t.batch_dim) for t in out_tracers)
- if type(instantiate) is bool:
- instantiate = [instantiate] * len(out_vals)
- out_vals = [moveaxis(x, d, 0) if d is not not_mapped and d != 0
- else broadcast(x, axis_size, 0) if d is not_mapped and inst else x
- for x, d, inst in zip(out_vals, out_dims, instantiate)]
- out_batched = [d is not not_mapped or inst
- for d, inst in zip(out_dims, instantiate)]
+ out_axes = [(None if od is not_mapped else 0) if out_axis is zero_if_mapped else out_axis
+ for od, out_axis in zip(out_dims, out_axes)]
+ out_vals = [moveaxis(x, d, od) if d is not not_mapped
+ else broadcast(x, axis_size, od) if od is not None else x
+ for x, d, od in zip(out_vals, out_dims, out_axes)]
+ out_batched = [od is not None for od in out_axes]
yield out_vals, out_batched
@lu.transformation_with_aux
| diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py
--- a/tests/lax_control_flow_test.py
+++ b/tests/lax_control_flow_test.py
@@ -36,6 +36,7 @@
from jax import test_util as jtu
from jax import tree_util
from jax._src.util import unzip2
+from jax.experimental import maps
from jax.lib import xla_bridge
from jax.interpreters import xla
import jax.numpy as jnp # scan tests use numpy
@@ -2739,5 +2740,30 @@ def side_effecting_scan(carry, val):
lax.scan(side_effecting_scan, None, jnp.ones((2, 2)))
lst[0] += 1
+ def test_while_loop_fixed_point_with_nested_named_axes(self):
+ def f(x):
+ z = x + lax.axis_index('a')
+ y = x + lax.axis_index('b')
+ def cond(carry):
+ i, x = carry
+ return x < 5
+ def body(carry):
+ i, x = carry
+ return i + 1, x + lax.psum(y, 'b')
+ return lax.while_loop(cond, body, (0, z))[1]
+ maps.xmap(f, axis_sizes=dict(a=2, b=10), out_axes=(['a']), in_axes={})(1.)
+
+ def test_while_loop_fixed_point_with_batched_pred_and_consts(self):
+ def f(i, x):
+ def cond(carry):
+ i, x = carry
+ return i < 5
+ def body(carry):
+ i, z = carry
+ # Close over const with batch dim = 1
+ return i + 1, z + x
+ return lax.while_loop(cond, body, (i, jnp.ones(3)))[1]
+ jax.vmap(f, in_axes=(0, 1))(jnp.arange(4), jnp.ones((3, 4)))
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| Loops don't track axis names correctly
As reported by @sharadmv:
```py
import jax
from jax import lax
from jax.experimental import maps
def f(x):
z = x + lax.axis_index('a')
y = x + lax.axis_index('b')
def cond(carry):
i, x = carry
return x < 5
def body(carry):
i, x = carry
return i + 1, x + lax.psum(y, 'b')
return lax.while_loop(cond, body, (0, z))[1]
maps.xmap(f, axis_sizes=dict(a=2, b=10), out_axes=(['a']), in_axes={})(1.)
```
| 2021-09-01T22:10:06 |
|
google/jax | 7,824 | google__jax-7824 | [
"7823"
] | 0dee3550250cf8e0afe8f1c0d24399844648584f | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -5946,6 +5946,20 @@ def _nbytes(arr):
return size(arr) * _dtype(arr).itemsize
+def _clip(number, min=None, max=None, out=None, *, a_min=None, a_max=None):
+ # ndarray.clip has a slightly different API from clip (min -> a_min, max -> a_max)
+ # TODO: remove after deprecation window
+ if a_min is not None or a_max is not None:
+ warnings.warn('`a_min` and `a_max` keyword arguments to ndarray.clip are deprecated '
+ 'in favor of `min` and `max` for compatibility with numpy. '
+ 'They will be removed in JAX 0.22.2', FutureWarning)
+ if min is None and a_min is not None:
+ min = a_min
+ if max is None and a_max is not None:
+ max = a_max
+ return clip(number, a_min=min, a_max=max, out=out)
+
+
def _view(arr, dtype=None, type=None):
lax._check_user_dtype_supported(dtype, "view")
if type is not None:
@@ -6082,7 +6096,7 @@ def _operator_round(number, ndigits=None):
# These numpy.ndarray methods are just refs to an equivalent numpy function
_nondiff_methods = ["all", "any", "argmax", "argmin", "argpartition", "argsort",
"nonzero", "searchsorted", "round"]
-_diff_methods = ["choose", "clip", "conj", "conjugate", "cumprod", "cumsum",
+_diff_methods = ["choose", "conj", "conjugate", "cumprod", "cumsum",
"diagonal", "dot", "max", "mean", "min", "prod", "ptp",
"ravel", "repeat", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "tile", "trace", "var"]
@@ -6313,6 +6327,7 @@ def _set_shaped_array_attributes(shaped_array):
setattr(shaped_array, "astype", core.aval_method(_astype))
setattr(shaped_array, "view", core.aval_method(_view))
setattr(shaped_array, "nbytes", core.aval_property(_nbytes))
+ setattr(shaped_array, "clip", core.aval_method(_clip))
setattr(shaped_array, "_array_module", staticmethod(__array_module__))
setattr(shaped_array, "broadcast", core.aval_method(lax.broadcast))
@@ -6340,6 +6355,7 @@ def _set_device_array_base_attributes(device_array):
setattr(device_array, "astype", _astype)
setattr(device_array, "view", _view)
setattr(device_array, "nbytes", property(_nbytes))
+ setattr(device_array, "clip", _clip)
_set_device_array_base_attributes(DeviceArray)
| Keyword arguments to jnp.ndarray.clip() do not follow the documentation
The following should work according to the docs (https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.ndarray.html), and it does work when using `numpy` instead of `jax.numpy`:
```python
import jax.numpy as jnp
jnp.zeros(3).clip(min=1)
```
However, in contrast to the docs, `jnp.ndarray.clip` actually expects the arguments to be called `a_min` and `a_max` respectively, just like `jnp.clip` does β i.e. `jnp.zeros(3).clip(a_min=1)` works.
| Thanks for the report β currently the DeviceArray `clip` method delegates directly to `jnp.clip`:
https://github.com/google/jax/blob/0dee3550250cf8e0afe8f1c0d24399844648584f/jax/_src/numpy/lax_numpy.py#L6085
https://github.com/google/jax/blob/0dee3550250cf8e0afe8f1c0d24399844648584f/jax/_src/numpy/lax_numpy.py#L6332-L6333
It looks like that has to be changed, given the different API between the function and the method. Are you interested in sending a PR? | 2021-09-06T16:25:24 |
|
google/jax | 7,879 | google__jax-7879 | [
"7821"
] | 8ca275abbcddb141936a6f6739b19157c4e2a39b | diff --git a/jax/_src/prng.py b/jax/_src/prng.py
--- a/jax/_src/prng.py
+++ b/jax/_src/prng.py
@@ -125,6 +125,7 @@ def dtype(self):
'deprecated `dtype` attribute of PRNG key arrays', FutureWarning)
return np.uint32
+ @property
def shape(self):
# TODO(frostig): simplify once we always enable_custom_prng
if config.jax_enable_custom_prng:
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -1095,6 +1095,22 @@ class LaxRandomWithCustomPRNGTest(LaxRandomTest):
def seed_prng(self, seed):
return prng.seed_with_impl(double_threefry_prng_impl, seed)
+ def test_split_shape(self):
+ key = self.seed_prng(73)
+ keys = random.split(key, 10)
+ self.assertEqual(keys.shape, (10,))
+
+ def test_vmap_fold_in_shape(self):
+ key = self.seed_prng(73)
+ keys = vmap(lambda i: random.fold_in(key, i))(jnp.arange(3))
+ self.assertEqual(keys.shape, (3,))
+
+ def test_cannot_add(self):
+ key = self.seed_prng(73)
+ self.assertRaisesRegex(
+ TypeError, r'unsupported operand type\(s\) for \+*',
+ lambda: key + 47)
+
def _sampler_unimplemented_with_custom_prng(*args, **kwargs):
raise SkipTest('sampler only implemented for default RNG')
| Jax PRNGKeyArray.shape is a method, not a property
Was this an intended change?
```python
>>> import jax
>>> jax.config.update('jax_enable_custom_prng', True)
>>> k=jax.random.PRNGKey(0)
>>> k.shape
<bound method PRNGKeyArray.shape of PRNGKeyArray:
shape = ()
impl = PRNGImpl:
key_shape = (2,)
seed = <function threefry_seed at 0x12e6f00d0>
split = <function threefry_split at 0x10cc2aaf0>
random_bits = <CompiledFunction object at 0x12e6c8880>
fold_in = <function threefry_fold_in at 0x10cc2a790>>
>>> k.shape()
()
```
| 2021-09-11T01:41:47 |
|
google/jax | 7,956 | google__jax-7956 | [
"7955"
] | 62230f65256728f580c5ecfa8867cac69a681cb1 | diff --git a/build/build.py b/build/build.py
--- a/build/build.py
+++ b/build/build.py
@@ -248,13 +248,13 @@ def write_bazelrc(python_bin_path=None, remote_build=None,
.format(cudnn_version=cudnn_version))
if cuda_compute_capabilities:
f.write(
- f'build:cuda --action_env TF_CUDA_COMPUTE_CAPABILITIES="{cuda_compute_capabilities}"')
+ f'build:cuda --action_env TF_CUDA_COMPUTE_CAPABILITIES="{cuda_compute_capabilities}"\n')
if rocm_toolkit_path:
f.write("build --action_env ROCM_PATH=\"{rocm_toolkit_path}\"\n"
.format(rocm_toolkit_path=rocm_toolkit_path))
if rocm_amdgpu_targets:
f.write(
- f'build:rocm --action_env TF_ROCM_AMDGPU_TARGETS="{rocm_amdgpu_targets}"')
+ f'build:rocm --action_env TF_ROCM_AMDGPU_TARGETS="{rocm_amdgpu_targets}"\n')
if cpu is not None:
f.write("build --distinct_host_configuration=true\n")
f.write(f"build --cpu={cpu}\n")
| Cuda Configuration Error: Invalid compute capability: compute_70build:rocm
> python3 build/build.py --enable_cuda
```
Cuda Configuration Error: Invalid compute capability: compute_70build:rocm
INFO: Found applicable config definition build:cuda in file /home/skoonce/jax/jax/.bazelrc: --repo_env TF_NEED_CUDA=1 --action_env TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0 --crosstool_top=@local_config_cuda//crosstool:toolchain --@local_config_cuda//:enable_cuda --define=xla_python_enable_gpu=true
ERROR: @local_config_cuda//:enable_cuda :: Error loading option @local_config_cuda//:enable_cuda:
Cuda Configuration Error: Invalid compute capability: compute_70build:rocm
b''
Traceback (most recent call last):
File "build/build.py", line 521, in <module>
main()
File "build/build.py", line 516, in main
shell(command)
File "build/build.py", line 53, in shell
output = subprocess.check_output(cmd)
File "/usr/lib/python3.8/subprocess.py", line 415, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/lib/python3.8/subprocess.py", line 516, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['./bazel-4.1.0-linux-x86_64', 'run', '--verbose_failures=true', '--config=avx_posix', '--config=mkl_open_source_only', '--config=cuda', ':build_wheel', '--', '--output_path=/home/skoonce/jax/jax/dist', '--cpu=x86_64']' returned non-zero exit status 2.
```
ping @yashk2810 eg d0acd9f3435a7eff924c9be937dc2abb1716e79a
| Ahh, there's a `\n` missing. I'll fix it when I have access to my computer or you can send a PR too π
@yashk2810 kk, give me a sec! π€
Can you test it too with the command you ran?
Thank you! | 2021-09-18T19:31:15 |
|
google/jax | 7,983 | google__jax-7983 | [
"7949"
] | 484b5af5c915fb6356f2cd1de8c9c826eb7d1938 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -1118,9 +1118,8 @@ def hessian(fun: Callable, argnums: Union[int, Sequence[int]] = 0,
def _std_basis(pytree):
leaves, _ = tree_flatten(pytree)
ndim = sum(map(np.size, leaves))
- # TODO(mattjj): use a symbolic identity matrix here
dtype = dtypes.result_type(*leaves)
- flat_basis = np.eye(ndim, dtype=dtype)
+ flat_basis = jax.numpy.eye(ndim, dtype=dtype)
return _unravel_array_into_pytree(pytree, 1, flat_basis)
def _unravel_array_into_pytree(pytree, axis, arr):
| Slow compilation when slicing
I'm trying to reduce the compilation time of a big program (currently 30s-60s). Here are three minimal examples of code that might be the reason for some of the overhead.
#### Snippet 1:
`jacrev` for slicing takes a lot longer than for `jacfwd`. Is this expected? Without slicing this effect doesn't occur.
```python
import time
import jax.numpy as jnp
from jax import jacfwd, jacrev, jit
def f1(x):
return x
def f2(x):
return x[1:]
def jitting(f, *args):
s = time.time()
f(*args).block_until_ready()
print(f"{f.__name__} jit time: {time.time() - s}")
s = time.time()
f(*args).block_until_ready()
print(f"{f.__name__} cached time: {time.time() - s}")
x = jnp.ones([1000])
print("jit(f1)")
jitting(jit(f1), x)
print("jit(f2)")
jitting(jit(f2), x)
print("jit(jacrev(f1))")
jitting(jit(jacrev(f1)), x)
print("jit(jacrev(f2))")
jitting(jit(jacrev(f2)), x)
print("jit(jacfwd(f1))")
jitting(jit(jacfwd(f1)), x)
print("jit(jacfwd(f2))")
jitting(jit(jacfwd(f2)), x)
```
```
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
jit(f1)
f1 jit time: 0.0003452301025390625
f1 cached time: 8.082389831542969e-05
jit(f2)
f2 jit time: 0.02623915672302246
f2 cached time: 0.00027489662170410156
jit(jacrev(f1))
jacfun jit time: 0.0028450489044189453
jacfun cached time: 0.00012683868408203125
jit(jacrev(f2))
jacfun jit time: 3.8852109909057617
jacfun cached time: 0.0007808208465576172
jit(jacfwd(f1))
jacfun jit time: 0.0038161277770996094
jacfun cached time: 0.0013599395751953125
jit(jacfwd(f2))
jacfun jit time: 0.08190584182739258
jacfun cached time: 0.001171112060546875
```
#### Snippet 2:
`vjp_fun` with `vmap` compilation time depends heavily on matrix that is mapped over. Is this expected?
```python
import time
import jax.numpy as jnp
from jax import jit, jvp, vjp, vmap
from matplotlib import pyplot as plt
def f(x):
return x + x
def jvp_vmap(x, v):
y, vjp_fun = vjp(f, x)
J, = vmap(vjp_fun)(v)
return J
x = jnp.ones([250])
timings = []
for i in range(1, 50):
V = jnp.ones([i, x.size])
s = time.time()
jit(jvp_vmap)(x, V).block_until_ready()
timings.append(time.time() - s)
plt.plot(timings)
plt.show()
```

#### Snippet 3:
Similar for a gauss-newton vector product
```python
import time
import jax.numpy as jnp
from jax import jit, jvp, vjp, vmap
from matplotlib import pyplot as plt
def f1(x):
return x[1:]
def gnvp_fun(
x: jnp.ndarray, vs: jnp.ndarray
) -> jnp.ndarray:
jvp_fun = lambda input: jvp(f1, (x,), (input,))[1]
y, vjp_fun = vjp(f1, x)
return vmap(lambda input: vjp_fun(jvp_fun(input))[0], in_axes=1)(vs).T
x = jnp.ones([100])
timings = []
for i in range(1, 50):
V = jnp.ones([x.size, i])
s = time.time()
jit(gnvp_fun)(x, V).block_until_ready()
timings.append(time.time() - s)
plt.plot(timings)
plt.show()
```

Additionally I have some loops of size 2-4 which are unrolled and increase compilation time. When using `jax.vmap` instead the compilation time reduces but the runtime (CPU) increases significantly. Is this common?
| Seems like I am not alone to complain that [`vmap` is slower than `lax.scan` loop](https://github.com/google/jax/issues/7449#issuecomment-922076745), maybe `vmap` is not made for doing auto-batching for small batch size?
> maybe vmap is not made for doing auto-batching for small batch size?
I think that's correct: `vmap` does not really have anything to do with auto-batching, it's more about auto-vectorization.
@tetterl thanks for the report. The shape-dependent compilation time is strange, but note that it has nothing to do with vmap or vjp; you can get the same behavior with the unadorned function `f`:
```python
import time
import jax.numpy as jnp
from jax import jit
from matplotlib import pyplot as plt
def f(x):
return x + x
timings = []
for i in range(1, 50):
V = jnp.ones([i, 250])
s = time.time()
jit(f)(V).block_until_ready()
timings.append(time.time() - s)
plt.plot(timings)
plt.show()
```

This is quite surprising βΒ I'm not certain what might be causing it, but I'll ask around to see if anyone has ideas.
@mattjj points out that it may be due to some kind of shape-dependent loop unrolling within LLVM.
@jakevdp thanks for the info. I guess that won't be a quick fix then.
Do you have any clue why Snippet 1 is that slow? I still haven't found a way to speed up compilation for code where I use such slicing.
I looked into that βΒ it seems the issue is that for `jacrev` on a slice of size `N`, the HLO embeds a literal encoding of an `N x N` matrix. For example:
```python
import jax
import jax.numpy as jnp
def make_hlo(f, optimize=False, metadata=False, platform=None):
client = jax.lib.xla_bridge.get_backend(platform)
print_opts = jax.lib.xla_client._xla.HloPrintOptions.short_parsable()
print_opts.print_metadata = metadata
def wrapped_fn(*args, **kwargs):
c = jax.xla_computation(f)(*args, **kwargs)
if optimize:
out = client.compile(c).hlo_modules()[0].to_string(print_opts)
else:
out = c.as_hlo_module().to_string(print_opts)
return out.strip()
return wrapped_fn
f = jax.jacrev(lambda x: x[1:])
x = jnp.arange(10.0)
print(make_hlo(f)(x))
```
```
HloModule xla_computation_jacfun__8.25
primitive_computation_add.9 {
parameter.10 = f32[] parameter(0)
parameter.11 = f32[] parameter(1)
ROOT add.12 = f32[] add(parameter.10, parameter.11)
}
scatter_add_reducer__11.18 {
parameter.19 = f32[] parameter(0)
parameter.20 = f32[] parameter(1)
ROOT add.21 = f32[] add(parameter.19, parameter.20)
}
ENTRY xla_computation_jacfun__8.25 {
constant.3 = pred[] constant(false)
parameter.2 = f32[10]{0} parameter(0)
constant.4 = s32[] constant(1)
broadcast.5 = s32[1]{0} broadcast(constant.4), dimensions={}
gather.6 = f32[9]{0} gather(parameter.2, broadcast.5), offset_dims={0}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=0, slice_sizes={9}, indices_are_sorted=true
broadcast.7 = f32[9]{0} broadcast(gather.6), dimensions={0}
constant.15 = f32[] constant(0)
broadcast.16 = f32[10]{0} broadcast(constant.15), dimensions={}
broadcast.17 = f32[9,10]{1,0} broadcast(broadcast.16), dimensions={1}
constant.1 = f32[9,9]{1,0} constant({ { 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1 } })
constant.8 = f32[] constant(0)
reduce.13 = f32[9,9]{1,0} reduce(constant.1, constant.8), dimensions={}, to_apply=primitive_computation_add.9
broadcast.14 = f32[9,9]{1,0} broadcast(reduce.13), dimensions={0,1}
scatter.22 = f32[9,10]{1,0} scatter(broadcast.17, broadcast.5, broadcast.14), update_window_dims={0,1}, inserted_window_dims={}, scatter_dims_to_operand_dims={1}, index_vector_dim=0, indices_are_sorted=true, unique_indices=true, to_apply=scatter_add_reducer__11.18
slice.23 = f32[9,10]{1,0} slice(scatter.22), slice={[0:9], [0:10]}
ROOT tuple.24 = (f32[9,10]{1,0}) tuple(slice.23)
}
```
In your case, `N` is approaching 1000, so the size of the generated HLO becomes quite large. I suspect that is what's causing the slow compilation.
I'm not entirely sure where this large constant would be coming from, but I think this could probably be considered a bug.
@jakevdp Thanks for looking into this and for providing an example how I can look at the HLO by myself :)
Haven't looked at the inner workings of JAX yet but I suspect that's the matrix that is vmapped over (`jnp.eye(10)[1:, 1:]`) in the implementation of `jacrev`: `J, = vmap(vjp_fun, in_axes=0)(jnp.eye(len(y)))`?
When using `jacfwd` we get:
```
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
HloModule xla_computation_jacfun.13
ENTRY xla_computation_jacfun.13 {
constant.3 = pred[] constant(false)
parameter.2 = f32[10]{0} parameter(0)
constant.4 = s32[] constant(1)
broadcast.5 = s32[1]{0} broadcast(constant.4), dimensions={}
gather.6 = f32[9]{0} gather(parameter.2, broadcast.5), offset_dims={0}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=0, slice_sizes={9}, indices_are_sorted=true
broadcast.8 = f32[9]{0} broadcast(gather.6), dimensions={0}
constant.1 = f32[10,10]{1,0} constant({ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } })
gather.7 = f32[10,9]{1,0} gather(constant.1, broadcast.5), offset_dims={0,1}, collapsed_slice_dims={}, start_index_map={1}, index_vector_dim=0, slice_sizes={10,9}, indices_are_sorted=true
broadcast.9 = f32[10,9]{1,0} broadcast(gather.7), dimensions={0,1}
transpose.10 = f32[9,10]{0,1} transpose(broadcast.9), dimensions={1,0}
slice.11 = f32[9,10]{1,0} slice(transpose.10), slice={[0:9], [0:10]}
ROOT tuple.12 = (f32[9,10]{1,0}) tuple(slice.11)
}
```
So the size of the HLO is also getting large with `jacfwd` but compilation time is still fast. So this doesn't seem to be the main issue? But I agree that embedding a constant like an identity matrix doesn't seem to be optimal, especially since knowledge about the fact that it's an identity matrix could help optimizing.
PS: The constant is also embedded without the slicing.
OK, the slicing issue should be fixed by #7983 - thanks for the report! | 2021-09-22T22:04:02 |
|
google/jax | 8,016 | google__jax-8016 | [
"8007"
] | f3b9cac75d8eb9c7f139e057d440318362c08536 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3515,8 +3515,16 @@ def atleast_3d(*arys):
return [atleast_3d(arr) for arr in arys]
-@_wraps(np.array)
-def array(object, dtype=None, copy=True, order="K", ndmin=0):
+_ARRAY_DOC = """
+This function will create arrays on JAX's default device. For control of the
+device placement of data, see :func:`jax.device_put`. More information is
+available in the JAX FAQ at :ref:`faq-data-placement` (full FAQ at
+https://jax.readthedocs.io/en/latest/faq.html).
+"""
+
+
+@_wraps(np.array, lax_description=_ARRAY_DOC)
+def array(object, dtype=None, copy=True, order="K", ndmin=0, *, device=None):
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
@@ -3577,7 +3585,7 @@ def _can_call_numpy_array(x):
for l in tree_leaves(x))
-@_wraps(np.asarray)
+@_wraps(np.asarray, lax_description=_ARRAY_DOC)
def asarray(a, dtype=None, order=None):
lax._check_user_dtype_supported(dtype, "asarray")
dtype = dtypes.canonicalize_dtype(dtype) if dtype is not None else dtype
| jax.numpy.array should allow specifying placement properties (or docs should say why not)
A jax.numpy.array has a device and a committed/uncommitted state, so it seems strange that jax.numpy.array() does not have a way to specify these aspects of jax array state when an array is created.
The specific use case I have is that I want to convert a very large regular numpy array into a jax.numpy.array placed on the CPU device, ideally without copying it.
So it seems that I ought to be able to do something like:
jax_array = jax.numpy.array(numpy_array, copy=False, device=jax.devices("cpu")[0])
Thus, this feature request is to provide new parameters for jax.numpy.array to allow the device and commited/uncommited state to be specified.
If it's impossible or difficult to provide this functionality, the jax.numpy.array documentation ought to be amended to discuss this point, since it is a "surprise". Ideally the docs would also suggest the appropriate idiom(s) for handling this case, particularly in Eager mode.
I think I can find other workarounds for this problem in my particular case, so it's not a blocking issue, but I'm filing the bug because this situation feels like bad API ergonomics to me.
| Thanks for the report. I think the issue here is that the bare-metal way to create & control `DeviceArray` properties is using [`jax.device_put`](https://jax.readthedocs.io/en/latest/jax.html#jax.device_put), whereas `jnp.array` and similar functions are just wrappers of the NumPy API for convenience and familiarity. `np.array` doesn't have any `device` argument so `jnp.array` does not either.
Given that `jnp.array` has become by far the most used API for creating JAX arrays, we could talk about extending the API in this case rather than making people use the less familiar `device_put` API. What do you think? If `device_put` were mentioned in the `jnp.array` docstring, would that be sufficient to solve the issue you encountered?
Either fix would be acceptable. Looking just at this specific issue, I would slightly prefer that the jnp.array() call be extended, but I don't know what your global policy is on "extending" the np.XXX calls. If you consistently avoid extending those calls, then I wouldn't advocate breaking consistency for this case, since there is an acceptable doc fix.
One more thought: I understand that it's possible to use a jnp.array followed by a jax.device_put. But in eager mode, won't that lead to the array being temporarily stored on the "wrong" device? So perhaps it is necessary to extend jnp.array() to avoid this problem.
The recommended approach would be to avoid `jnp.array` and use `device_put` directly (assuming your input is a numpy array). If you don't have a numpy array, you can first convert your input to a numpy array before passing it to `device_put`. This is roughly what `jnp.array` does.
In case it's helpful, you can find more information on device placement in the FAQ: https://jax.readthedocs.io/en/latest/faq.html#controlling-data-and-computation-placement-on-devices | 2021-09-27T17:41:26 |
|
google/jax | 8,030 | google__jax-8030 | [
"8027"
] | c949c3045dd8db0451b7db7d84e568c8167dbf02 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -550,10 +550,10 @@ def _multivariate_normal(key, mean, cov, shape, dtype, method) -> jnp.ndarray:
if method == 'svd':
(u, s, _) = svd(cov)
- factor = u * jnp.sqrt(s)
+ factor = u * jnp.sqrt(s[..., None, :])
elif method == 'eigh':
(w, v) = eigh(cov)
- factor = v * jnp.sqrt(w)
+ factor = v * jnp.sqrt(w[..., None, :])
else: # 'cholesky'
factor = cholesky(cov)
normal_samples = normal(key, shape + mean.shape[-1:], dtype)
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -771,18 +771,19 @@ def testMultivariateNormal(self, dim, dtype, method):
self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_dim={}_mean_batch_size={}_cov_batch_size={}_shape={}"\
- .format(dim, mean_batch_size, cov_batch_size, shape),
+ {"testcase_name": "_dim={}_mean_batch_size={}_cov_batch_size={}_shape={}_method={}"\
+ .format(dim, mean_batch_size, cov_batch_size, shape, method),
"dim": dim,
"mean_batch_size": mean_batch_size,
"cov_batch_size": cov_batch_size,
- "shape": shape}
+ "shape": shape, "method": method}
for dim in [1, 2, 4]
for mean_batch_size in [(), (3,), (2, 3)]
for cov_batch_size in [(), (3,), (2, 3)]
- for shape in [(), (1,), (5,)]))
+ for shape in [(), (1,), (5,)]
+ for method in ['cholesky', 'svd', 'eigh']))
def testMultivariateNormalShapes(self, dim, mean_batch_size, cov_batch_size,
- shape):
+ shape, method):
r = np.random.RandomState(0)
key = self.seed_prng(0)
eff_batch_size = mean_batch_size \
@@ -793,7 +794,7 @@ def testMultivariateNormalShapes(self, dim, mean_batch_size, cov_batch_size,
cov += 1e-3 * np.eye(dim)
shape = shape + eff_batch_size
with jax.numpy_rank_promotion('allow'):
- samples = random.multivariate_normal(key, mean, cov, shape=shape)
+ samples = random.multivariate_normal(key, mean, cov, shape=shape, method=method)
assert samples.shape == shape + (dim,)
def testMultivariateNormalCovariance(self):
| random.multivariate_normal broadcasting error
There appears to be a broadcasting bug in jax.random.multivariate_normal for the SVD and eigh methods that
does not exist for the cholseky method. Example code with the resulting error message (for the SVD) case are appended below.
```python
import jax.numpy as jnp
from jax import random
key=random.PRNGKey(42)
key, subkey =random.split(key) # Jax random number generator key
process_dim = 100
nsamp = 10
mean = jnp.zeros((nsamp, process_dim))
covar = (jnp.ones(nsamp)[..., None, None])*(jnp.diag(jnp.ones(process_dim))[None, ...])
print('mean.shape = ', mean.shape)
print('covar.shape = ', covar.shape)
# Cholesky works as expected
norm_samp_cholesky = random.multivariate_normal(subkey, mean, covar, shape=(nsamp,), method='cholesky')
# SVD and eigh fail with broadcasting error message
norm_samp_svd = random.multivariate_normal(subkey, mean, covar, shape=(nsamp,), method='svd')
norm_samp_eigh = random.multivariate_normal(subkey, mean, covar, shape=(nsamp,), method='eigh')
```
/opt/miniconda3/envs/qso_fitting/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in fn(x1, x2)
683 def _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn, lax_doc=False):
684 def fn(x1, x2):
--> 685 x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
686 return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)
687 fn = jit(fn, inline=True)
/opt/miniconda3/envs/qso_fitting/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in _promote_args(fun_name, *args)
575 _check_arraylike(fun_name, *args)
576 _check_no_float0s(fun_name, *args)
--> 577 return _promote_shapes(fun_name, *_promote_dtypes(*args))
578
579 def _promote_args_inexact(fun_name, *args):
/opt/miniconda3/envs/qso_fitting/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in _promote_shapes(fun_name, *args)
494 if config.jax_numpy_rank_promotion != "allow":
495 _rank_promotion_warning_or_error(fun_name, shapes)
--> 496 result_rank = len(lax.broadcast_shapes(*shapes))
497 return [broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)
498 for arg, shp in zip(args, shapes)]
[... skipping hidden 2 frame]
/opt/miniconda3/envs/qso_fitting/lib/python3.9/site-packages/jax/_src/lax/lax.py in broadcast_shapes(*shapes)
92 result_shape = _try_broadcast_shapes(shapes)
93 if result_shape is None:
---> 94 raise ValueError("Incompatible shapes for broadcasting: {}"
95 .format(tuple(map(tuple, shapes))))
96 return result_shape
ValueError: Incompatible shapes for broadcasting: ((10, 100, 100), (1, 10, 100))
| 2021-09-28T16:59:36 |
|
google/jax | 8,033 | google__jax-8033 | [
"6478"
] | d697ce2047e1da3e29f67983d5f020e6031498f6 | diff --git a/jax/_src/lax/linalg.py b/jax/_src/lax/linalg.py
--- a/jax/_src/lax/linalg.py
+++ b/jax/_src/lax/linalg.py
@@ -42,6 +42,7 @@
from jax._src.lib import xla_client
from jax._src.lib import xla_bridge as xb
+from jax._src.lib import version as jaxlib_version
xops = xla_client.ops
@@ -1441,3 +1442,119 @@ def tridiagonal_solve(dl, d, du, b):
raise ValueError(f'Only f32/f64 are supported, got {t}')
return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t)
+
+
+# Schur Decomposition
+
+
+def schur(x,
+ compute_schur_vectors=True,
+ sort_eig_vals=False,
+ select_callable=None):
+ return schur_p.bind(
+ x,
+ compute_schur_vectors=compute_schur_vectors,
+ sort_eig_vals=sort_eig_vals,
+ select_callable=select_callable)
+
+
+def _schur_impl(operand, *, compute_schur_vectors, sort_eig_vals,
+ select_callable):
+ return xla.apply_primitive(
+ schur_p,
+ operand,
+ compute_schur_vectors=compute_schur_vectors,
+ sort_eig_vals=sort_eig_vals,
+ select_callable=select_callable)
+
+
+def _schur_translation_rule(c, operand, *, compute_schur_vectors,
+ sort_eig_vals):
+ raise NotImplementedError(
+ "Schur decomposition is only implemented on the CPU backend.")
+
+
+def _schur_abstract_eval(operand, *, compute_schur_vectors, sort_eig_vals,
+ select_callable):
+
+ if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:
+ raise ValueError("Argument to Schur decomposition must have "
+ "shape [..., n, n], got shape {}".format(operand.shape))
+
+ batch_dims = operand.shape[:-2]
+ n = operand.shape[-1]
+ dtype = operand.dtype
+ dtype = dtypes.canonicalize_dtype(dtype)
+ T = operand.update(shape=batch_dims + (n, n), dtype=dtype)
+ vs = operand.update(shape=batch_dims + (n, n), dtype=dtype)
+
+ return (T, vs) if compute_schur_vectors else (T,)
+
+
+def _schur_cpu_translation_rule(c, operand, *, compute_schur_vectors,
+ sort_eig_vals, select_callable):
+ shape = c.get_shape(operand)
+ batch_dims = shape.dimensions()[:-2]
+
+ if jaxlib_version < (0, 1, 72):
+ raise NotImplementedError(
+ "The Schur primitive is only implemented for jaxlib versions >= 0.1.72"
+ )
+
+ _cpu_gees = lapack.gees
+
+ if sort_eig_vals:
+ T, vs, sdim, info = _cpu_gees(
+ c,
+ operand,
+ jobvs=compute_schur_vectors,
+ sort=sort_eig_vals,
+ select=select_callable)
+ else:
+ T, vs, info = _cpu_gees(
+ c,
+ operand,
+ jobvs=compute_schur_vectors,
+ sort=sort_eig_vals,
+ select=select_callable)
+
+ ok = xops.Eq(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
+ T = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), T,
+ _nan_like(c, T))
+ output = [T]
+ if compute_schur_vectors:
+ vs = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vs,
+ _nan_like(c, vs))
+
+ output.append(vs)
+
+ return xops.Tuple(c, output)
+
+
+def _schur_batching_rule(batched_args, batch_dims, *, compute_schur_vectors,
+ sort_eig_vals, select_callable):
+ x, = batched_args
+ bd, = batch_dims
+ x = batching.moveaxis(x, bd, 0)
+
+ return schur_p.bind(
+ x,
+ compute_schur_vectors=compute_schur_vectors,
+ sort_eig_vals=sort_eig_vals,
+ select_callable=select_callable), (0,) * (1 + compute_schur_vectors)
+
+
+def _schur_jvp_rule(primals, tangents, *, compute_schur_vectors, sort_eig_vals):
+ raise NotImplementedError(
+ 'The differentiation rules for the Schur factorization have not been implemented.'
+ )
+
+
+schur_p = Primitive('schur')
+schur_p.multiple_results = True
+schur_p.def_impl(_schur_impl)
+schur_p.def_abstract_eval(_schur_abstract_eval)
+xla.translations[schur_p] = _schur_translation_rule
+xla.backend_specific_translations['cpu'][schur_p] = _schur_cpu_translation_rule
+batching.primitive_batchers[schur_p] = _schur_batching_rule
+ad.primitive_jvps[schur_p] = _schur_jvp_rule
diff --git a/jax/lax/linalg.py b/jax/lax/linalg.py
--- a/jax/lax/linalg.py
+++ b/jax/lax/linalg.py
@@ -31,4 +31,6 @@
triangular_solve_p,
tridiagonal_solve,
tridiagonal_solve_p,
+ schur,
+ schur_p
)
diff --git a/jaxlib/lapack.py b/jaxlib/lapack.py
--- a/jaxlib/lapack.py
+++ b/jaxlib/lapack.py
@@ -584,3 +584,79 @@ def geev(c, a, jobvl=True, jobvr=True):
else:
return (_ops.GetTupleElement(out, 2), _ops.GetTupleElement(out, 3),
_ops.GetTupleElement(out, 4), _ops.GetTupleElement(out, 5))
+
+# # gees : Schur factorization
+
+def gees(c, a, jobvs=True, sort=False, select=None):
+ a_shape = c.get_shape(a)
+ dtype = a_shape.element_type()
+ dims = a_shape.dimensions()
+ assert len(dims) >= 2
+ m, n = dims[-2:]
+ assert m == n
+ batch_dims = tuple(dims[:-2])
+ num_bd = len(batch_dims)
+ b = 1
+ for d in batch_dims:
+ b *= d
+ layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
+
+ if sort:
+ raise NotImplementedError(
+ "The sort feature of LAPACK's gees routine is not implemented.")
+
+ jobvs = ord('V' if jobvs else 'N')
+ sort = ord('S' if sort else 'N')
+
+ if dtype == np.float32 or dtype == np.float64:
+ fn = b"lapack_sgees" if dtype == np.float32 else b"lapack_dgees"
+ schurvecs_type = dtype
+ workspaces = (Shape.array_shape(np.dtype(schurvecs_type), dims, layout),)
+ eigvals = (Shape.array_shape(
+ np.dtype(dtype), batch_dims + (n,), tuple(range(num_bd, -1, -1))),
+ Shape.array_shape(
+ np.dtype(dtype), batch_dims + (n,),
+ tuple(range(num_bd, -1, -1))))
+ elif dtype == np.complex64 or dtype == np.complex128:
+ fn = b"lapack_cgees" if dtype == np.complex64 else b"lapack_zgees"
+ schurvecs_type = dtype
+ workspaces = (
+ Shape.array_shape(np.dtype(schurvecs_type), dims, layout),
+ Shape.array_shape(
+ np.dtype(np.float32 if dtype == np.complex64 else np.float64),
+ (n,), (0,)))
+ eigvals = (Shape.array_shape(
+ np.dtype(dtype), batch_dims + (n,), tuple(range(num_bd, -1, -1))),)
+ else:
+ raise NotImplementedError("Unsupported dtype {}".format(dtype))
+
+ out = _ops.CustomCallWithLayout(
+ c,
+ fn,
+ operands=(
+ _constant_s32_scalar(c, b),
+ _constant_s32_scalar(c, n),
+ _ops.Constant(c, np.uint8(jobvs)),
+ _ops.Constant(c, np.uint8(sort)),
+ #figure out how to put the callable select function here
+ a),
+ shape_with_layout=Shape.tuple_shape(workspaces + eigvals + (
+ Shape.array_shape(np.dtype(schurvecs_type), dims, layout),
+ Shape.array_shape(
+ np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),
+ Shape.array_shape(
+ np.dtype(np.int32), batch_dims, tuple(range(num_bd -
+ 1, -1, -1))))),
+ operand_shapes_with_layout=(
+ Shape.array_shape(np.dtype(np.int32), (), ()),
+ Shape.array_shape(np.dtype(np.int32), (), ()),
+ Shape.array_shape(np.dtype(np.uint8), (), ()),
+ Shape.array_shape(np.dtype(np.uint8), (), ()),
+ Shape.array_shape(dtype, dims, layout),
+ ))
+ if sort == ord('S'):
+ return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 3),
+ _ops.GetTupleElement(out, 4), _ops.GetTupleElement(out, 5))
+ else:
+ return (_ops.GetTupleElement(out, 0), _ops.GetTupleElement(out, 3),
+ _ops.GetTupleElement(out, 5))
| diff --git a/tests/linalg_test.py b/tests/linalg_test.py
--- a/tests/linalg_test.py
+++ b/tests/linalg_test.py
@@ -1440,5 +1440,42 @@ def test_tridiagonal_solve(self, dtype):
A[[0, 1], [1, 2]] = du[:-1]
np.testing.assert_allclose(A @ X, B, rtol=1e-6, atol=1e-6)
+ @parameterized.named_parameters(
+ jtu.cases_from_list({
+ "testcase_name":
+ "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
+ "shape": shape, "dtype": dtype
+ } for shape in [(4, 4), (15, 15), (50, 50), (100, 100)]
+ for dtype in float_types + complex_types))
+ @jtu.skip_on_devices("gpu", "tpu")
+ def testSchur(self, shape, dtype):
+ if jax._src.lib.version < (0, 1, 72):
+ self.skipTest("Schur LAPACK wrapper only implemented for jaxlib versions >= 0.1.72")
+ rng = jtu.rand_default(self.rng())
+ args_maker = lambda: [rng(shape, dtype)]
+
+ self._CheckAgainstNumpy(osp.linalg.schur, lax.linalg.schur, args_maker)
+ self._CompileAndCheck(lax.linalg.schur, args_maker)
+
+ @parameterized.named_parameters(
+ jtu.cases_from_list({
+ "testcase_name":
+ "_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
+ "shape": shape, "dtype": dtype
+ } for shape in [(2, 2), (4, 4), (15, 15), (50, 50), (100, 100)]
+ for dtype in float_types + complex_types))
+ @jtu.skip_on_devices("gpu", "tpu")
+ def testSchurBatching(self, shape, dtype):
+ if jax._src.lib.version < (0, 1, 72):
+ self.skipTest("Schur LAPACK wrapper only implemented for jaxlib versions >= 0.1.72")
+ rng = jtu.rand_default(self.rng())
+ batch_size = 10
+ shape = (batch_size, ) + shape
+ args = rng(shape, dtype)
+ reconstruct = vmap(lambda S, T: S @ T @ jnp.conj(S.T))
+
+ Ts, Ss = vmap(lax.linalg.schur)(args)
+ self.assertAllClose(reconstruct(Ss, Ts), args, atol=1e-4)
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| scipy.linalg.schur decomposition
Looks like schur decomposition isn't implemented at the moment. Are there any plans to support this? Also, anything specific that might block me from implementing this?
| This sounds pretty reasonable to me. I don't think there are any current plan to implement this but we would welcome contributions.
See these files for examples of how we wrap LAPACK/CuSolver for new low-level solvers:
https://github.com/google/jax/blob/master/jaxlib/lapack.pyx
https://github.com/google/jax/blob/master/jaxlib/cusolver.py
(I'm guessing we don't want to implement this method ourselves)
Implemented a wrapper for the required lapack routine, but I'm facing several issues while building jaxlib from source to incorporate these changes. Are jaxlib wheels generated by the CI/CD if I open a PR? That way I can avoid generating them locally.
Tested in ubuntu 18.04 (docker).
No, the github CI uses the `_minimum_jaxlib_version` specified in `setup.py` and does not cover changes to jaxlib sources. There is some testing involving jaxlib compilation outside github CI once the `Pull Ready` tag is added, but that is unfortunately fairly opaque to non-Google employees. The best course would be to figure out how to test your changes locally.
@ayush-1506 But we can probably help you figure out what's going wrong with your jaxlib build. Have you seen https://jax.readthedocs.io/en/latest/developer.html#building-from-source ?
@hawkinsp Yes, I'm following the same instructions. The error seems trivial:
```
Server terminated abruptly (error code: 14, error message: 'Socket closed', log file: '/root/.cache/bazel/_bazel_root/deb80d6610824a92deeac7b7fd0f3e3c/server/jvm.out')
```
My guess is that this is happening due to oom.
@ayush-1506 Yes, that is plausible. You might try setting the `--jobs` argument to Bazel; using fewer jobs than you have cores might help if you are RAM constrained. You can pass extra Bazel options through the `build.py` script using `--bazel_options="--jobs 4"` or something like that.
@hawkinsp Thanks, reducing the number of jobs seems to work. However, I ran into another issue after this. (This doesn't seem to be related to oom). I'm building this without rocm/cuda/tpu.
```
[0 / 17] [Prepa] BazelWorkspaceStatusAction stable-status.txt
[136 / 1,670] Compiling com_google_protobuf/src/google/protobuf/compiler/java/java_message_lite.cc; 1s local ... (2 actions, 1 running)
[348 / 1,871] Compiling com_google_absl/absl/time/internal/cctz/src/time_zone_info.cc; 1s local ... (2 actions, 1 running)
[534 / 1,871] Compiling org_tensorflow/tensorflow/core/platform/hash.cc; 1s local ... (2 actions, 1 running)
[805 / 1,871] Compiling external/org_tensorflow/tensorflow/core/framework/full_type.pb.cc; 0s local ... (2 actions, 1 running)
[933 / 1,871] Compiling org_tensorflow/tensorflow/core/platform/default/posix_file_system.cc; 2s local ... (2 actions, 1 running)
ERROR: /root/.cache/bazel/_bazel_root/deb80d6610824a92deeac7b7fd0f3e3c/external/org_tensorflow/tensorflow/core/BUILD:1593:16: C++ compilation of rule '@org_tensorflow//tensorflow/core:framework_internal_impl' failed (Exit 1): gcc failed: error executing command
(cd /root/.cache/bazel/_bazel_root/deb80d6610824a92deeac7b7fd0f3e3c/execroot/__main__ && \
exec env - \
PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin \
PWD=/proc/self/cwd \
TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0 \
TF_ROCM_AMDGPU_TARGETS=gfx803,gfx900,gfx906,gfx1010 \
/usr/bin/gcc -U_FORTIFY_SOURCE -fstack-protector -Wall -Wunused-but-set-parameter -Wno-free-nonheap-object -fno-omit-frame-pointer -g0 -O2 '-D_FORTIFY_SOURCE=1' -DNDEBUG -ffunction-sections -fdata-sections '-std=c++11' -MD -MF bazel-out/k8-opt/bin/external/org_tensorflow/tensorflow/core/_objs/framework_internal_impl/batch_util.pic.d '-frandom-seed=bazel-out/k8-opt/bin/external/org_tensorflow/tensorflow/core/_objs/framework_internal_impl/batch_util.pic.o' -fPIC -DHAVE_SYS_UIO_H -DTF_USE_SNAPPY -DEIGEN_MPL2_ONLY '-DEIGEN_MAX_ALIGN_BYTES=64' -iquoteexternal/org_tensorflow -iquotebazel-out/k8-opt/bin/external/org_tensorflow -iquoteexternal/com_google_protobuf -iquotebazel-out/k8-opt/bin/external/com_google_protobuf -iquoteexternal/zlib -iquotebazel-out/k8-opt/bin/external/zlib -iquoteexternal/eigen_archive -iquotebazel-out/k8-opt/bin/external/eigen_archive -iquoteexternal/com_google_absl -iquotebazel-out/k8-opt/bin/external/com_google_absl -iquoteexternal/nsync -iquotebazel-out/k8-opt/bin/external/nsync -iquoteexternal/gif -iquotebazel-out/k8-opt/bin/external/gif -iquoteexternal/libjpeg_turbo -iquotebazel-out/k8-opt/bin/external/libjpeg_turbo -iquoteexternal/com_googlesource_code_re2 -iquotebazel-out/k8-opt/bin/external/com_googlesource_code_re2 -iquoteexternal/farmhash_archive -iquotebazel-out/k8-opt/bin/external/farmhash_archive -iquoteexternal/fft2d -iquotebazel-out/k8-opt/bin/external/fft2d -iquoteexternal/highwayhash -iquotebazel-out/k8-opt/bin/external/highwayhash -iquoteexternal/double_conversion -iquotebazel-out/k8-opt/bin/external/double_conversion -iquoteexternal/snappy -iquotebazel-out/k8-opt/bin/external/snappy -isystem external/com_google_protobuf/src -isystem bazel-out/k8-opt/bin/external/com_google_protobuf/src -isystem external/zlib -isystem bazel-out/k8-opt/bin/external/zlib -isystem external/org_tensorflow/third_party/eigen3/mkl_include -isystem bazel-out/k8-opt/bin/external/org_tensorflow/third_party/eigen3/mkl_include -isystem external/eigen_archive -isystem bazel-out/k8-opt/bin/external/eigen_archive -isystem external/nsync/public -isystem bazel-out/k8-opt/bin/external/nsync/public -isystem external/gif -isystem bazel-out/k8-opt/bin/external/gif -isystem external/farmhash_archive/src -isystem bazel-out/k8-opt/bin/external/farmhash_archive/src -isystem external/double_conversion -isystem bazel-out/k8-opt/bin/external/double_conversion -Wno-sign-compare -Wno-stringop-truncation -mavx '-std=c++14' -DEIGEN_AVOID_STL_ARRAY -Iexternal/gemmlowp -Wno-sign-compare '-ftemplate-depth=900' -fno-exceptions -DINTEL_MKL -msse3 -DTENSORFLOW_MONOLITHIC_BUILD -pthread '-DINTEL_MKL=1' -fno-canonical-system-headers -Wno-builtin-macro-redefined '-D__DATE__="redacted"' '-D__TIMESTAMP__="redacted"' '-D__TIME__="redacted"' -c external/org_tensorflow/tensorflow/core/util/batch_util.cc -o bazel-out/k8-opt/bin/external/org_tensorflow/tensorflow/core/_objs/framework_internal_impl/batch_util.pic.o)
Execution platform: @local_execution_config_platform//:platform
In file included from external/org_tensorflow/third_party/eigen3/unsupported/Eigen/CXX11/FixedPoint:46,
from external/org_tensorflow/tensorflow/core/framework/numeric_types.h:24,
from external/org_tensorflow/tensorflow/core/framework/allocator.h:26,
from external/org_tensorflow/tensorflow/core/framework/tensor.h:23,
from external/org_tensorflow/tensorflow/core/util/batch_util.h:18,
from external/org_tensorflow/tensorflow/core/util/batch_util.cc:16:
external/org_tensorflow/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX.h:14:41: warning: ignoring attributes on template argument '__m256i' {aka '__vector(4) long long int'} [-Wignored-attributes]
typedef eigen_packet_wrapper<__m256i, 10> Packet32q8i;
^
external/org_tensorflow/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/PacketMathAVX.h:15:41: warning: ignoring attributes on template argument '__m128i' {aka '__vector(2) long long int'} [-Wignored-attributes]
typedef eigen_packet_wrapper<__m128i, 11> Packet16q8i;
^
gcc: fatal error: Killed signal terminated program cc1plus
compilation terminated.
Target //build:build_wheel failed to build
INFO: Elapsed time: 1050.784s, Critical Path: 92.65s
INFO: 974 processes: 43 internal, 931 local.
FAILED: Build did NOT complete successfully
ERROR: Build failed. Not running target
FAILED: Build did NOT complete successfully
Traceback (most recent call last):
File "build/build.py", line 521, in <module>
main()
File "build/build.py", line 516, in main
shell(command)
File "build/build.py", line 51, in shell
output = subprocess.check_output(cmd)
File "/usr/local/lib/python3.8/subprocess.py", line 411, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/usr/local/lib/python3.8/subprocess.py", line 512, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['./bazel-3.7.2-linux-x86_64', 'run', '--verbose_failures=true', '--jobs=2', '--config=short_logs', '--config=avx_posix', '--config=mkl_open_source_only', ':build_wheel', '--', '--output_path=/root/jax/dist']' returned non-zero exit status 1.
```
Wait, I think I figured it out. Generated the wheels successfully. However `pip3 install dist/jaxlib-0.1.66-cp36-none-manylinux2010_x86_64.whl` now gives me `jaxlib-0.1.66-cp36-none-manylinux2010_x86_64.whl is not a supported wheel on this platform.`.
I'm on x86-64 and python3.6.9
Why could this be happening?
@hawkinsp
Ok, never mind that. Everything works now.
I'm nearly done with the lapack wrapper (was away for a couple of weeks). Is `CustomCallWithLayout` documented somewhere? Can't find any docs for it. I'm trying to understand what its arguments mean. Can't wrap my head around what shape `shape_with_layout` refers to.
@jakevdp @hawkinsp
I don't think there is any documentation for `CustomCallWithLayout`; your best bet is probably to take a look at examples within the JAX codebase.
As for `shape_with_layout`, this specifies the shape of the outputs. It's essentially a set of abstract shapes that specify the dtype, the dimensions, and the memory layout. e.g. for a 2D array, row-major layout is `(0, 1)` and column-major layout is `(1, 0)`.
Hope that helps
Hi everyone,
I landed on this issue trying to implement the `logm` matrix function for https://github.com/google/jax/issues/5469.
I didn't know if it was still active so went ahead wrote a wrapper for LAPACK's gees routine (which computes the Schur decomposition). It uses C++ templates like the refactored lapack wrappers in `jaxlib/lapack_kernels.cc`.
With these additions, on my macOS machine, jaxlib compiles. I wrote a schur primitive (by closely following the eig primitive code) to use the wrapper.
On preliminary tests it outputs the same decomposition as scipy's schur function, so it appears to work.
I am currently working on writing more tests for this decomposition. The current version of my code passes the existing linalg tests so it looks like nothing has broken. I thought I would write before writing the test suite to maybe get some early feedback.
The signatures for the wrappers are [here](https://github.com/SaturdayGenfo/jax/blob/3c17096bc2dfa4aa9c04028f61f58f3b2b8e8df5/jaxlib/lapack_kernels.h#L152). (For reference, the LAPACK I signatures I followed are [here](https://software.intel.com/content/www/us/en/develop/documentation/onemkl-developer-reference-fortran/top/lapack-routines/lapack-least-squares-and-eigenvalue-problem-routines/lapack-least-squares-and-eigenvalue-problem-driver-routines/nonsymmetric-eigenvalue-problems-lapack-driver-routines/gees.html))
The implementation of the wrappers is [here](https://github.com/SaturdayGenfo/jax/blob/dfd69b3cc20000058366f1d8858c4bfcbae17f8a/jaxlib/lapack_kernels.cc#L631).
The tie-in to scipy's LAPACK is [here](https://github.com/SaturdayGenfo/jax/blob/3c17096bc2dfa4aa9c04028f61f58f3b2b8e8df5/jaxlib/lapack.cc#L118).
The python gees function is [here](https://github.com/SaturdayGenfo/jax/blob/dfd69b3cc20000058366f1d8858c4bfcbae17f8a/jaxlib/lapack.py#L588)
The primitive is [here](https://github.com/SaturdayGenfo/jax/blob/dfd69b3cc20000058366f1d8858c4bfcbae17f8a/jax/_src/lax/linalg.py#L1446).
I used function pointers for the *optional* callable argument that `gees` uses. Is there a way to pass a callable through `CustomCallWithLayout` ? Since I didn't know how to refer to a callable in the `operands` argument, using the callable argument raises an error for now.
| 2021-09-28T19:31:42 |
google/jax | 8,063 | google__jax-8063 | [
"7810"
] | 7b4e977cbe0be4b9a5aefe173d9513cd8036ec77 | diff --git a/jax/experimental/sparse/ops.py b/jax/experimental/sparse/ops.py
--- a/jax/experimental/sparse/ops.py
+++ b/jax/experimental/sparse/ops.py
@@ -1394,7 +1394,15 @@ def __init__(self, args, *, shape):
self.shape = shape
def __repr__(self):
- repr_ = f"{self.__class__.__name__}({self.dtype}{list(self.shape)}, nse={self.nse})"
+ name = self.__class__.__name__
+ try:
+ nse = self.nse
+ dtype = self.dtype
+ shape = list(self.shape)
+ except:
+ repr_ = f"{name}(<invalid>)"
+ else:
+ repr_ = f"{name}({dtype}{shape}, nse={nse})"
if isinstance(self.data, core.Tracer):
repr_ = f"{type(self.data).__name__}[{repr_}]"
return repr_
| diff --git a/tests/sparse_test.py b/tests/sparse_test.py
--- a/tests/sparse_test.py
+++ b/tests/sparse_test.py
@@ -1113,6 +1113,13 @@ def f(X, y):
class SparseObjectTest(jtu.JaxTestCase):
+ def test_repr(self):
+ M = sparse.BCOO.fromdense(jnp.arange(5, dtype='float32'))
+ assert repr(M) == "BCOO(float32[5], nse=4)"
+
+ M_invalid = sparse.BCOO(([], []), shape=100)
+ assert repr(M_invalid) == "BCOO(<invalid>)"
+
@parameterized.named_parameters(
{"testcase_name": "_{}".format(Obj.__name__), "Obj": Obj}
for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])
| `repr` error on empty BCOO sparse matrix
I tried to initialize an empty BCOO matrix by:
```
from jax.experimental import sparse as jsparse
import jax.numpy as jnp
M = jsparse.BCOO((jnp.array([]), jnp.array([])), shape=(100, 100))
```
Just using the repr on `M` gives the following error: `<repr(<jax.experimental.sparse.ops.BCOO at 0x7f15fb4c9590>) failed: IndexError: tuple index out of range>`, which is unexpected.
On the other hand, initializing `M` with
```M = jsparse.BCOO((jnp.zeros(0, dtype='float32'), jnp.zeros((0, 2), dtype='int32')), shape=(100, 100)) ```
gives the correct repr output
```
M
BCOO(float32[100, 100], nse=0)
```
| Thanks for the report!
I think we need some input validation on `__init__` β this is a bit difficult, though, because jax transforms sometimes instantiate pytrees with placeholder objects... so we'd have to somehow skip the validation in those cases.
I've also noted the following strange behavior:
```python
q = jsparse.BCOO.fromdense(jnp.zeros(100, dtype=jnp.float32))
q.data, q.indices
# (DeviceArray([], dtype=float32), DeviceArray([], dtype=int32))
jsparse.BCOO((jnp.array([], dtype=jnp.float32), jnp.array([], dtype=jnp.int32)), shape=(100,))
# <repr(<jax.experimental.sparse.ops.BCOO at 0x7f4337d49950>) failed: IndexError: tuple index out of range>
```
while
```python
jsparse.BCOO((q.data, q.indices), shape=(100,))
# BCOO(float32[100], nse=0)
```
I think this is more of a bug on the `repr` side. Inspecting the shapes manually gives:
```python
q.data.shape, q.indices.shape
# (0,), (0,1)
```
Initializing the BCOO accordingly gives the expected output
```python
q = jsparse.BCOO((jnp.zeros(0), jnp.zeros((0,1))), shape=(100,))
q
# BCOO(float32[100], nse=0)
```
but this cannot be guessed by simply printing `q.data, q.indices`, as maybe it should?
Yeah, `indices` need to be of shape `[nse, rank]`. You can see this is the case, despite the empty array repr not being clear on this:
```python
import jax.experimental.sparse as jsparse
import jax.numpy as jnp
q = jsparse.BCOO.fromdense(jnp.zeros(100, dtype=jnp.float32))
q.indices.shape
# [0, 1]
```
As with the original error, if the indices are the wrong shape it will lead to issues.
And yes, I think that instantiating `BCOO` with an incorrectly-shaped index should result in an immediate error. But as mentioned above this is difficult because of how JAX transforms use placeholders within pytrees. | 2021-10-01T17:40:44 |
google/jax | 8,065 | google__jax-8065 | [
"8064"
] | 6cc730707f3877d414e47912f9cd63b4f3257050 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6690,22 +6690,43 @@ class _IndexUpdateHelper:
The ``at`` property is syntactic sugar for calling the indexed update functions
defined in :mod:`jax.ops`, and acts as a pure equivalent of in-place
- modificatons. For further information, see `Indexed Update Operators
- <https://jax.readthedocs.io/en/latest/jax.ops.html#indexed-update-operators>`_.
+ modificatons.
In particular:
- - ``x = x.at[idx].set(y)`` is a pure equivalent of ``x[idx] = y``.
- - ``x = x.at[idx].add(y)`` is a pure equivalent of ``x[idx] += y``.
- - ``x = x.at[idx].multiply(y)`` (aka ``mul``) is a pure equivalent of
- ``x[idx] *= y``.
- - ``x = x.at[idx].divide(y)`` is a pure equivalent of ``x[idx] /= y``.
- - ``x = x.at[idx].power(y)`` is a pure equivalent of ``x[idx] **= y``.
- - ``x = x.at[idx].min(y)`` is a pure equivalent of
- ``x[idx] = minimum(x[idx], y)``.
- - ``x = x.at[idx].max(y)`` is a pure equivalent of
- ``x[idx] = maximum(x[idx], y)``.
+ ============================== ================================
+ Alternate syntax Equivalent In-place expression
+ ============================== ================================
+ ``x = x.at[idx].set(y)`` ``x[idx] = y``
+ ``x = x.at[idx].add(y)`` ``x[idx] += y``
+ ``x = x.at[idx].multiply(y)`` ``x[idx] *= y``
+ ``x = x.at[idx].divide(y)`` ``x[idx] /= y``
+ ``x = x.at[idx].power(y)`` ``x[idx] **= y``
+ ``x = x.at[idx].min(y)`` ``x[idx] = minimum(x[idx], y)``
+ ``x = x.at[idx].max(y)`` ``x[idx] = maximum(x[idx], y)``
+ ``x = x.at[idx].get()`` ``x = x[idx]``
+ ============================== ================================
+
+ None of these expressions modify the original ``x``; instead they return
+ a modified copy of ``x``. However, inside a :py:func:`jax.jit` compiled function,
+ expressions like ``x = x.at[idx].set(y)`` are guaranteed to be applied in-place.
+
+ Unlike NumPy in-place operations such as :code:`x[idx] += y`, if multiple
+ indices refer to the same location, all updates will be applied (NumPy would
+ only apply the last update, rather than applying all updates.) The order
+ in which conflicting updates are applied is implementation-defined and may be
+ nondeterministic (e.g., due to concurrency on some hardware platforms).
+
+ By default, JAX assumes that all indices are in-bounds. There is experimental
+ support for giving more precise semantics to out-of-bounds indexed accesses,
+ via the ``mode`` parameter to functions such as ``get`` and ``set``. Valid
+ values for ``mode`` include ``"clip"``, which means that out-of-bounds indices
+ will be clamped into range, and ``"fill"``/``"drop"``, which are aliases and
+ mean that out-of-bounds reads will be filled with a scalar ``fill_value``,
+ and out-of-bounds writes will be discarded.
"""
+ # TODO(jakevdp): document additional arguments to the methods, including
+ # `indices_are_sorted`, `unique_indices`, `mode`, and `fill_value`.
__slots__ = ("array",)
def __init__(self, array):
@@ -6716,6 +6737,7 @@ def __getitem__(self, index):
def __repr__(self):
return f"_IndexUpdateHelper({repr(self.array)})"
+ndarray.at.__doc__ = _IndexUpdateHelper.__doc__
_power_fn = power
_divide_fn = divide
diff --git a/jax/_src/ops/scatter.py b/jax/_src/ops/scatter.py
--- a/jax/_src/ops/scatter.py
+++ b/jax/_src/ops/scatter.py
@@ -114,6 +114,9 @@ def _scatter_impl(x, y, scatter_op, treedef, static_idx, dynamic_idx,
class _Indexable(object):
"""Helper object for building indexes for indexed update functions.
+ .. deprecated:: 0.2.22
+ Prefer the use of :attr:`jax.numpy.ndarray.at`.
+
This is a singleton object that overrides the :code:`__getitem__` method
to return the index it is passed.
@@ -136,6 +139,9 @@ def index_add(x: Array,
unique_indices: bool = False) -> Array:
"""Pure equivalent of :code:`x[idx] += y`.
+ .. deprecated:: 0.2.22
+ Prefer the use of :attr:`jax.numpy.ndarray.at`.
+
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
@@ -183,6 +189,9 @@ def index_mul(x: Array,
unique_indices: bool = False) -> Array:
"""Pure equivalent of :code:`x[idx] *= y`.
+ .. deprecated:: 0.2.22
+ Prefer the use of :attr:`jax.numpy.ndarray.at`.
+
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
@@ -230,6 +239,9 @@ def index_min(x: Array,
unique_indices: bool = False) -> Array:
"""Pure equivalent of :code:`x[idx] = minimum(x[idx], y)`.
+ .. deprecated:: 0.2.22
+ Prefer the use of :attr:`jax.numpy.ndarray.at`.
+
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
@@ -274,6 +286,9 @@ def index_max(x: Array,
unique_indices: bool = False) -> Array:
"""Pure equivalent of :code:`x[idx] = maximum(x[idx], y)`.
+ .. deprecated:: 0.2.22
+ Prefer the use of :attr:`jax.numpy.ndarray.at`.
+
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
@@ -318,6 +333,9 @@ def index_update(x: Array,
unique_indices: bool = False) -> Array:
"""Pure equivalent of :code:`x[idx] = y`.
+ .. deprecated:: 0.2.22
+ Prefer the use of :attr:`jax.numpy.ndarray.at`.
+
Returns the value of `x` that would result from the
NumPy-style :mod:`indexed assignment <numpy.doc.indexing>`::
| Update jax.ops.index_* documentation to include current preferred index notation
If you search on Google for how to update array indices in JAX, you end up on [this documentation page](https://jax.readthedocs.io/en/latest/_autosummary/jax.ops.index_update.html#jax-ops-index-update) for `jax.ops.index_update`.
`jax.ops.index_update` is no longer the best way to update indices -- in most cases users should instead write code like `x.at[idx].set(y)`, which is a really great improvement!
The documentation for the `jax.ops.index_*` should also describe and/or link to the alternate, simpler, and preferred notation for working with indices.
| Thanks for the report β agreed this is something that should be addressed. Are you interested in contributing a fix? If not I'm happy to take care of it.
I am not going to have time to submit a fix in a reasonable timeframe. If you are willing, you should definitely do it! Thank you!
OK, will do. Thanks! | 2021-10-01T18:10:00 |
|
google/jax | 8,115 | google__jax-8115 | [
"7965"
] | 6aa61260d124bc85beeaa2d5a67a9436e0bc79fd | diff --git a/jax/_src/lax/parallel.py b/jax/_src/lax/parallel.py
--- a/jax/_src/lax/parallel.py
+++ b/jax/_src/lax/parallel.py
@@ -896,6 +896,12 @@ def _all_to_all_batched_collective(axis_size, frame_name, _, vals_in, dims_in,
raise NotImplementedError("Please open a feature request!")
x, = vals_in
d, = dims_in
+ if d is batching.not_mapped:
+ # TODO(sharadmv,apaszke): Remove this broadcast that comes from
+ # all_gather_transpose and instead avoid using all_to_all in
+ # all_gather_transpose.
+ x = lax.broadcast(x, (axis_size, *x.shape))
+ d = 0
if isinstance(axis_name, (list, tuple)):
pos = axis_name.index(frame_name)
major_axes, minor_axes = axis_name[:pos], axis_name[pos + 1:]
@@ -1087,6 +1093,9 @@ def _all_gather_transpose_rule(cts, x, *, all_gather_dimension, axis_name, axis_
cts, axis_name=axis_name, split_axis=all_gather_dimension,
concat_axis=concat_axis, axis_index_groups=axis_index_groups),
axis=concat_axis),)
+ # TODO(sharadmv,apaszke): re-enable this when we can properly detect
+ # replication.
+ # return (lax.dynamic_index_in_dim(cts, idx, axis=all_gather_dimension, keepdims=False) * axis_size,)
def _all_gather_batcher(vals_in, dims_in, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled):
if tiled:
| `vmap(grad(all_gather))` errors
Repro:
```python
import functools
import jax
import jax.numpy as jnp
from jax import lax
jax.config.update('jax_traceback_filtering', 'off')
@functools.partial(jax.vmap, in_axes=(0, None), axis_name='a')
def run(x, y):
def f(x, y):
return lax.all_gather(x, 'a')[y].sum()
return jax.grad(f)(x, y)
run(jnp.ones(5), jnp.ones(10, jnp.int32))
```
Traceback:
There are a lot of frames (45) so I kept in the last few:
```
/usr/local/lib/python3.7/dist-packages/jax/core.py in bind(self, *args, **params)
263 args, used_axis_names(self, params) if self._dispatch_on_params else None)
264 tracers = map(top_trace.full_raise, args)
--> 265 out = top_trace.process_primitive(self, tracers, params)
266 return map(full_lower, out) if self.multiple_results else full_lower(out)
267
/usr/local/lib/python3.7/dist-packages/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params)
138 _main_trace_for_axis_names(self.main, core.used_axis_names(primitive, params))):
139 frame = core.axis_frame(self.axis_name)
--> 140 val_out, dim_out = collective_rules[primitive](frame, vals_in, dims_in, **params)
141 elif all(bdim is not_mapped for bdim in dims_in):
142 return primitive.bind(*vals_in, **params)
/usr/local/lib/python3.7/dist-packages/jax/_src/lax/parallel.py in _all_to_all_batched_collective(frame, vals_in, dims_in, axis_name, split_axis, concat_axis, axis_index_groups)
901 if not major_axes and not minor_axes:
902 if split_axis == concat_axis:
--> 903 axis = split_axis + (d <= split_axis)
904 d_pre_split = d
905 x = _splitaxis(axis, frame.size, x)
TypeError: '<=' not supported between instances of 'NoneType' and 'int'
```
@apaszke
| Some further investigation indicates that the `all_to_all_p` axis primitive batcher does not currently handle unbatched (`dim=None`) inputs. | 2021-10-06T15:24:06 |
|
google/jax | 8,160 | google__jax-8160 | [
"8152"
] | a8ce40be94f67e180b7a5414ee2afa9eb40486f0 | diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py
--- a/jax/_src/lax/control_flow.py
+++ b/jax/_src/lax/control_flow.py
@@ -210,6 +210,10 @@ def fori_loop(lower, upper, body_fun, init_val):
use_scan = False
if use_scan:
+ if config.jax_disable_jit and upper_ == lower_:
+ # non-jit implementation of scan does not support length=0
+ return init_val
+
(_, result), _ = scan(_fori_scan_body_fun(body_fun), (lower_, init_val),
None, length=upper_ - lower_)
else:
@@ -1284,6 +1288,8 @@ def scan(f, init, xs, length=None):
length, = unique_lengths
if config.jax_disable_jit:
+ if length == 0:
+ raise ValueError("zero-length scan is not supported in disable_jit() mode because the output type is unknown.")
carry = init
ys = []
maybe_reversed = reversed if reverse else lambda x: x
| diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py
--- a/tests/lax_control_flow_test.py
+++ b/tests/lax_control_flow_test.py
@@ -542,6 +542,23 @@ def body_fun(i, state):
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), np.sum(x[:num]), check_dtypes=False)
+ def testForiLoopIssue8152(self):
+ y = lax.fori_loop(lower=0, upper=0, body_fun=lambda x, i: x + i, init_val=1.)
+ self.assertAllClose(y, 1., check_dtypes=False)
+
+ # trivial fori_loop should work - even when jit is disabled
+ with jax.disable_jit():
+ y = lax.fori_loop(lower=0, upper=0, body_fun=lambda x, i: x + i, init_val=1.)
+ self.assertAllClose(y, 1., check_dtypes=False)
+
+ # scan with length 0 should work with jit, but raise an error without
+ def should_raise_wo_jit():
+ carry, out = lax.scan(lambda c, x: (c + x, x), 0., np.array([]))
+ return carry
+ self.assertAllClose(should_raise_wo_jit(), 0., check_dtypes=False)
+ with jax.disable_jit():
+ self.assertRaises(ValueError, should_raise_wo_jit)
+
def testCond(self):
def fun(x):
if x < 3:
| fori_loop with disable_jit fails when no actual iterations happen
In a trivial call of `fori_loop`, i.e. when `upper == lower`, a normal call works just fine,
but a call with `disable_jit` causes a `TypeError`.
Can be reproduced by
```python
from jax import lax, disable_jit
def test():
non_trivial = lax.fori_loop(lower=0, upper=3, body_fun=lambda x, i: x + i, init_val=1)
print(f'non-trivial result: {non_trivial}')
trivial = lax.fori_loop(lower=0, upper=0, body_fun=lambda x, i: x + i, init_val=1)
print(f'trivial result: {trivial}')
test()
# non-trivial result: 4
# trivial result: 1
with disable_jit():
test()
# non-trivial result: 4
# TypeError: tree_map() missing 1 required positional argument: 'tree'
```
This is because the [non-jit implementation of `lax.scan`](https://github.com/google/jax/blob/cd3d37f4d14bdce8501795c759be7a5b9f79e157/jax/_src/lax/control_flow.py#L1286)
calls `tree_multimap` to stack the outputs, but there aren't any outputs to stack.
I suggest to check for this special case (of a trivial array to scan over) in the non-jit branch of scan and directly return the initial carry and a trivial stacked_y.
Happy to make a PR if this is the way you want to do it.
| Thanks for the report - it looks like you've diagnosed the issue. Would you like to prepare a PR? If you're not able to at the moment, I'm happy to look into a fix. Thanks! | 2021-10-11T17:12:06 |
google/jax | 8,164 | google__jax-8164 | [
"8163"
] | d17633413deb0122fc8dedb334f70233a2c583c4 | diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py
--- a/jax/experimental/sparse/bcoo.py
+++ b/jax/experimental/sparse/bcoo.py
@@ -67,17 +67,20 @@ def _dedupe_bcoo_one(data, indices):
if indices.shape[1] == 0:
return data, indices
- # This is a fixed-size version of jnp.unique() with return_indices=True
- # unique values are zero-filled at the end.
- perm = jnp.lexsort(indices.T[::-1])
+ # The following is similar to
+ # indices_unique, inv_idx = jnp.unique(indices, axis=0, return_inverse=True,
+ # size=indices.shape[0], fill_value=0)
+ # but modified to keep padding at the end of the resulting arrays.
+ is_padding = (indices == 0).all(1) & (data == 0)
+ perm = jnp.lexsort(indices[:, ::-1].T)
aux = indices[perm]
mask = jnp.ones(indices.shape[0], dtype=bool)
mask = mask.at[1:].set(jnp.any(aux[1:] != aux[:-1], 1))
+ mask = mask & ~is_padding[perm] # this is the padding modification.
imask = jnp.cumsum(mask) - 1
indices_unique = jnp.where(mask[:, None], aux, 0)[jnp.argsort(~mask)]
inv_idx = jnp.zeros_like(imask).at[perm].set(imask)
- # With the above, de-duping is easy.
data_unique = jnp.zeros_like(data).at[inv_idx].add(data)
return data_unique, indices_unique
| diff --git a/tests/sparse_test.py b/tests/sparse_test.py
--- a/tests/sparse_test.py
+++ b/tests/sparse_test.py
@@ -1059,6 +1059,16 @@ def test_bcoo_dedupe(self, shape, dtype, n_batch, n_dense):
M_dedup = M._dedupe()
self.assertAllClose(M.todense(), M_dedup.todense())
+ def test_bcoo_dedupe_padding(self):
+ # Regression test for https://github.com/google/jax/issues/8163
+ data = jnp.array([1, 0, 0])
+ indices = jnp.array([1, 0, 0])[:, None]
+ x = sparse.BCOO((data, indices), shape=(3,))
+ y = x._dedupe()
+ self.assertArraysEqual(x.todense(), y.todense())
+ self.assertArraysEqual(x.indices, y.indices)
+ self.assertArraysEqual(x.data, y.data)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_nbatch={}_ndense={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense, axes),
| [sparse] BCOO._dedupe moves padded indices to the front
Example:
```python
import jax.numpy as jnp
from jax.experimental import sparse
data = jnp.array([1, 1])
indices = jnp.array([1, 1])[:, None]
shape = (2,)
x = sparse.BCOO((data, indices), shape=shape)
x = x._dedupe()
print(x.data, x.indices.ravel())
# [2 0] [1 0]
x = x._dedupe()
print(x.data, x.indices.ravel())
# [0 2] [0 1]
```
The second `_dedupe()` call should leave the representation unchanged.
| 2021-10-11T19:47:26 |
|
google/jax | 8,203 | google__jax-8203 | [
"7693"
] | 792a89ba5ffa952d2a30e6e7a61e70aa9c8f1f8e | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6478,6 +6478,16 @@ def piecewise(x, condlist, funclist, *args, **kw):
funclist = [0] + list(funclist)
else:
raise ValueError(f"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}")
+ consts = {i: c for i, c in enumerate(funclist) if not callable(c)}
+ funcs = {i: f for i, f in enumerate(funclist) if callable(f)}
+ return _piecewise(x, condlist, consts,
+ frozenset(funcs.items()), # dict is not hashable.
+ *args, **kw)
+
+@partial(jit, static_argnames=['funcs'])
+def _piecewise(x, condlist, consts, funcs, *args, **kw):
+ funcs = dict(funcs)
+ funclist = [consts.get(i, funcs.get(i)) for i in range(len(condlist) + 1)]
indices = argmax(cumsum(concatenate([zeros_like(condlist[:1]), condlist], 0), 0), 0)
dtype = _dtype(x)
def _call(f):
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -1826,6 +1826,15 @@ def testPiecewise(self, shape, dtype, ncond, nfunc):
# This is a higher-order function, so the cache miss check will fail.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)
+ def testPiecewiseRecompile(self):
+ def g(x):
+ g.num_traces += 1
+ return x
+ g.num_traces = 0
+ x = jnp.arange(10.0)
+ for i in range(5):
+ jnp.piecewise(x, [x < 0], [g, 0.])
+ self.assertEqual(g.num_traces, 1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_perm={}_{}".format(
| jnp.piecewise compiles on every call in some cases
Arose when submitting PR #7692.
```
def g(x):
one = jnp.ones_like(x)
return jnp.where(x>0, one, -one)
def f1(x):
one = jnp.ones_like(x)
return jnp.piecewise(x, [x < 0, x > 0], [g, g, 0.])
def f2(x):
return jnp.piecewise(x, [x < 0, x > 0], [-1., 1., 0.])
x = jnp.arange(-2, 3)
for i in range(5):
print(i, 'f1', f1(x))
for i in range(5):
print(i, 'f2', f2(x))
```
Checking the logs, I see:
```
DEBUG:absl:Compiling _cumulative_reduction (5315785472) for args (ShapedArray(bool[3,5]),).
DEBUG:absl:Compiling <unnamed wrapped function> (5316489152) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
DEBUG:absl:Compiling <unnamed wrapped function> (5316499392) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
DEBUG:absl:Compiling <lambda> (5316499904) for args (ShapedArray(int32[5]), ShapedArray(int32[])).
0 f1 [-1 -1 0 1 1]
DEBUG:absl:Compiling <unnamed wrapped function> (5316615552) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
DEBUG:absl:Compiling <unnamed wrapped function> (5316614720) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
1 f1 [-1 -1 0 1 1]
DEBUG:absl:Compiling <unnamed wrapped function> (5316651968) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
DEBUG:absl:Compiling <unnamed wrapped function> (5316606656) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
2 f1 [-1 -1 0 1 1]
DEBUG:absl:Compiling <unnamed wrapped function> (5316694400) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
DEBUG:absl:Compiling <unnamed wrapped function> (5316706816) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
3 f1 [-1 -1 0 1 1]
DEBUG:absl:Compiling <unnamed wrapped function> (5316745024) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
DEBUG:absl:Compiling <unnamed wrapped function> (5316666944) for args (ShapedArray(bool[5]), ShapedArray(int32[]), ShapedArray(int32[])).
4 f1 [-1 -1 0 1 1]
0 f2 [-1 -1 0 1 1]
1 f2 [-1 -1 0 1 1]
2 f2 [-1 -1 0 1 1]
3 f2 [-1 -1 0 1 1]
4 f2 [-1 -1 0 1 1]
```
Every call to `f1` causes a compilation, whereas the equivalent `f2` does not.
| Marking `f1` with `@jit`makes the problem go away. So is this a bug? Unsure.
I think the problem is more why even jitted `g` got traced twice when `jnp.piecewise` is present.
Thanks for the report! The reason for the re-compilation when `piecewise` is not jitted is from these lines: https://github.com/google/jax/blob/d349086ca5c676f5bcbb26b07b2c8e5946598431/jax/_src/numpy/lax_numpy.py#L5807-L5808
Each function passed to `piecewise` is wrapped before passing it to `lax.switch` in order to pass args and kwargs.
I think the easiest fix for this would probably be to JIT-compile `jnp.piecewise` by default.
Hmm, turns out it's not so simple as that, because `funclist` can contain a mix of static and non-static arguments (either functions or possibly-traced constants). Still doable, but not as easy as adding `@jit`
I think the root cause of this is from the `switch` statement: each time `switch` encounters a function it re-traces it:
```python
In [1]: from jax.lax import switch
In [2]: def g(x):
...: print("tracing", g, x)
...: return x
In [3]: def f(x):
...: return switch(0, [g], 1)
In [4]: f(1)
tracing <function g at 0x7feb1ee56ee0> 1
Out[4]: 1
In [5]: f(1)
tracing <function g at 0x7feb1ee56ee0> 1
Out[5]: 1
```
I'm not sure whether this is intended or not.
If you change `f` to
```python
def f(x):
return switch(0, [g, g], 1)
```
subsequent calls don't retrace anything.
> Hmm, turns out it's not so simple as that, because `funclist` can contain a mix of static and non-static arguments (either functions or possibly-traced constants). Still doable, but not as easy as adding `@jit`
I may be oversimplifying the situation, but finding a workaround seems rather easy. Changing
```python
funclist = [_call(f) if callable(f) else _const(f) for f in funclist]
```
to
```python
d = {}
funclist = [d.setdefault(f, _call(f)) if callable(f) else _const(f) for f in funclist]
```
works, with the added benefit/(unexpected behaviour?) that even not jitted function don't get trace twice. Consider
```python
@jax.jit
def g(x):
print('g')
return x
def f(x):
return jnp.piecewise(x, [x < 0], [g, g])
x = jnp.r_[1, 1, -1, -1, 0]
f(x)
```
cc @froystig for what seems like potentially missed caching in `lax.switch`
It's not tracing the single branch at all, it's [simply calling it](https://github.com/google/jax/blob/bbfd8f7cfc540d7a93b9c8e7b4460fef4af9f560/jax/_src/lax/control_flow.py#L636-L637) in line. That's a correct optimization, and I think a desirable one.
In what sense is this a root cause?
Revisiting this. With better understanding of JAX now, I think the root cause of the issue is that `jnp.piecewise` is creating temporary functions... https://github.com/google/jax/blob/10af170a85a20b86b44d6d026e9ef1669e9af0ce/jax/_src/numpy/lax_numpy.py#L6489-L6494
...and passing them to `lax.switch`, which traces the functions it receives... https://github.com/google/jax/blob/7fa6b1b5fafaf22503bc194ba66a00a6859bdadd/jax/_src/lax/control_flow.py#L656-L657
... and since all trace cacheing is keyed on the function ID, temporary functions are essentially never cached. So the `jnp.piecewise` implementation is similar to the second block here:
```python
import jax.numpy as jnp
from jax import lax
def g(x):
print(f'tracing g({x})')
return x
def anon(func):
return lambda *args: func(*args)
# g is traced once here:
lax.switch(0, [g, g], 1)
# tracing g(Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>)
# g is traced multiple times here:
lax.switch(0, [anon(g), anon(g)], 1)
# tracing g(Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>)
# tracing g(Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=1/0)>)
```
I think the best solution here would be to make `jnp.piecewise` a wrapper for an underlying JIT-compiled function that separates out the static and non-static arguments it receives, so that the entire `lax.switch` call would be JIT compiled and appropriately cached. | 2021-10-13T18:28:03 |
google/jax | 8,222 | google__jax-8222 | [
"8220"
] | 1bafdb6d7e87c2a8ae8310cd06fd73a182aa81d8 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -1329,6 +1329,8 @@ def vmap(fun: F, in_axes=0, out_axes=0, axis_name=None) -> F:
(axes) of the array returned by the :func:`vmap`-ed function, which is one
more than the number of dimensions (axes) of the corresponding array
returned by ``fun``.
+ axis_name: Optional, a hashable Python object used to identify the mapped
+ axis so that parallel collectives can be applied.
Returns:
Batched/vectorized version of ``fun`` with arguments that correspond to
@@ -1403,6 +1405,16 @@ def vmap(fun: F, in_axes=0, out_axes=0, axis_name=None) -> F:
If the ``out_axes`` is specified for a mapped result, the result is transposed
accordingly.
+
+ Finally, here's an example using ``axis_name`` together with collectives:
+
+ >>> xs = jnp.arange(3. * 4.).reshape(3, 4)
+ >>> print(vmap(lambda x: lax.psum(x, 'i'), axis_name='i')(xs))
+ [[12. 15. 18. 21.]
+ [12. 15. 18. 21.]
+ [12. 15. 18. 21.]]
+
+ See the :py:func:`jax.pmap` docstring for more examples involving collectives.
"""
_check_callable(fun)
docstr = ("Vectorized version of {fun}. Takes similar arguments as {fun} "
| vmap documentation does not document "axis_name" parameter
vmap() accepts an axis_name parameter (as verified by looking at the code), but this parameter is not documented at all in the docstring.
(From visiting https://jax.readthedocs.io/en/latest/_modules/jax/_src/api.html#vmap just prior to filing this bug)
| Thanks for the report - this was added in https://github.com/google/jax/pull/4005. I'm going to assign @apaszke as the author of that PR.
I'm stealing this! | 2021-10-14T20:10:00 |
|
google/jax | 8,234 | google__jax-8234 | [
"8221"
] | aaf3bb789e03f8529b839dc29986da167831f1e8 | diff --git a/jax/_src/lax/parallel.py b/jax/_src/lax/parallel.py
--- a/jax/_src/lax/parallel.py
+++ b/jax/_src/lax/parallel.py
@@ -1089,12 +1089,10 @@ def _all_gather_transpose_rule(cts, x, *, all_gather_dimension, axis_name, axis_
axis=concat_axis),)
def _all_gather_batcher(vals_in, dims_in, *, all_gather_dimension, axis_name, axis_index_groups, axis_size, tiled):
- if tiled:
- raise NotImplementedError("Please open a feature request!")
(x,), (d,) = vals_in, dims_in
if d <= all_gather_dimension:
all_gather_dimension += 1
- else:
+ elif not tiled: # Tiled all-gather doesn't modify the set of dimensions
d += 1
result = all_gather_p.bind(
x,
@@ -1107,8 +1105,6 @@ def _all_gather_batcher(vals_in, dims_in, *, all_gather_dimension, axis_name, ax
def _all_gather_batched_collective(frame_size, frame_name, _, vals_in, dims_in, all_gather_dimension, axis_name,
axis_index_groups, axis_size, tiled):
- if tiled:
- raise NotImplementedError("Please open a feature request!")
assert axis_index_groups is None, "axis_index_groups not supported in vmap"
assert axis_size == frame_size, "axis size doesn't match"
if not isinstance(axis_name, tuple):
@@ -1121,8 +1117,12 @@ def _all_gather_batched_collective(frame_size, frame_name, _, vals_in, dims_in,
out_shape = list(np.shape(x))
out_shape.insert(all_gather_dimension, axis_size)
broadcast_dims = [i for i in range(len(out_shape)) if i != all_gather_dimension]
- return lax.broadcast_in_dim(x, out_shape, broadcast_dims), batching.not_mapped
- return _moveaxis(d, all_gather_dimension, x), batching.not_mapped
+ y = lax.broadcast_in_dim(x, out_shape, broadcast_dims)
+ else:
+ y = _moveaxis(d, all_gather_dimension, x)
+ if tiled:
+ y = _foldaxis(all_gather_dimension, y)
+ return y, batching.not_mapped
all_gather_p = core.AxisPrimitive('all_gather')
all_gather_p.def_abstract_eval(_all_gather_abstract_eval)
| diff --git a/tests/batching_test.py b/tests/batching_test.py
--- a/tests/batching_test.py
+++ b/tests/batching_test.py
@@ -1193,6 +1193,22 @@ def f(x):
res = vmap(vmap(f, axis_name='j'), axis_name='i', out_axes=None)(x)
self.assertAllClose(res, x.T)
+ def testAllGatherTiled(self):
+ def f(x):
+ return lax.all_gather(x, axis_name='i', tiled=True)
+
+ x = jnp.arange(60).reshape((4, 3, 5))
+ res = vmap(f, axis_name='i', in_axes=(1,), out_axes=None)(x)
+ self.assertAllClose(res, x.transpose((1, 0, 2)).reshape(-1, 5))
+
+ def testBatchedAllGatherTiled(self):
+ def f(x):
+ return lax.all_gather(x, axis_name='i', tiled=True)
+
+ x = jnp.arange(60).reshape((4, 3, 5))
+ res = vmap(vmap(f, in_axes=1, out_axes=1), axis_name='i', in_axes=1, out_axes=None)(x)
+ self.assertAllClose(res, x.transpose((1, 0, 2)).reshape(-1, 5))
+
def testAllGatherVjp(self):
def f(x):
return lax.all_gather(x, axis_name='i')
| Implement tiled=True for all_gather inside a vmap (to address NotImplementedError exception)
My scenario:
I use an all_gather with tiled=True inside a pmap. I hit an error which is nearly impossible to understand because pmap JITs its code. So I tried the recommended strategy of substituting "vmap" for "pmap". I then hit the following exception: raise NotImplementedError("Please open a feature request!")
This exception is raised because "tiled" is set to True for my all_gather. The exception is raised by the second line of _all_gather_batched_collective() in parallel.py
| 2021-10-15T14:38:29 |
|
google/jax | 8,268 | google__jax-8268 | [
"8267"
] | 0f0bfcaef56c3f44473ff7f6b7ef95ca6576b3e1 | diff --git a/jax/experimental/sparse/transform.py b/jax/experimental/sparse/transform.py
--- a/jax/experimental/sparse/transform.py
+++ b/jax/experimental/sparse/transform.py
@@ -55,6 +55,7 @@
from jax import core
from jax import lax
from jax import linear_util as lu
+import jax.numpy as jnp
from jax._src.api_util import flatten_fun_nokwargs
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
@@ -78,7 +79,7 @@ def __init__(self, bufs=()):
self._buffers = list(bufs)
def push(self, arr: Array) -> int:
- self._buffers.append(np.array(arr) if np.isscalar(arr) else arr) # type: ignore
+ self._buffers.append(jnp.asarray(arr)) # type: ignore
return len(self._buffers) - 1
def get(self, ind: int) -> Array:
@@ -155,7 +156,8 @@ def argspec_to_aval(argspec):
if argspec.is_unit():
return core.abstract_unit
else:
- return core.ShapedArray(argspec.shape, argspec.data(spenv).dtype)
+ data = argspec.data(spenv)
+ return core.ShapedArray(argspec.shape, data.dtype, data.aval.weak_type)
return tree_map(argspec_to_aval, argspecs, is_leaf=_is_argspec)
| diff --git a/tests/sparsify_test.py b/tests/sparsify_test.py
--- a/tests/sparsify_test.py
+++ b/tests/sparsify_test.py
@@ -343,6 +343,16 @@ def func(x, y):
with self.assertRaisesRegex(TypeError, "sparsified true_fun and false_fun output.*"):
func(x_bcoo, y)
+ def testWeakTypes(self):
+ # Regression test for https://github.com/google/jax/issues/8267
+ M = jnp.arange(12, dtype='int32').reshape(3, 4)
+ Msp = BCOO.fromdense(M)
+ self.assertArraysEqual(
+ operator.mul(2, M),
+ sparsify(operator.mul)(2, Msp).todense(),
+ check_dtypes=True,
+ )
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| [sparse] sparsify transform does not respect weak types
```python
from jax import config
config.update('jax_enable_x64', True)
import jax.numpy as jnp
from jax.experimental import sparse
M = jnp.arange(12, dtype='int32').reshape(3, 4)
Msp = sparse.BCOO.fromdense(M)
print((2 * M).dtype) # int32
print((2 * Msp).dtype) # int64
```
Found in the process of #8180
| 2021-10-18T20:35:58 |
|
google/jax | 8,288 | google__jax-8288 | [
"8282"
] | 9fee130d6bb4be754a2dcbe459698f340dc2f1a8 | diff --git a/jax/_src/scipy/stats/multivariate_normal.py b/jax/_src/scipy/stats/multivariate_normal.py
--- a/jax/_src/scipy/stats/multivariate_normal.py
+++ b/jax/_src/scipy/stats/multivariate_normal.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from functools import partial
import numpy as np
import scipy.stats as osp_stats
@@ -42,7 +43,10 @@ def logpdf(x, mean, cov, allow_singular=None):
if cov.ndim < 2 or cov.shape[-2:] != (n, n):
raise ValueError("multivariate_normal.logpdf got incompatible shapes")
L = lax.linalg.cholesky(cov)
- y = lax.linalg.triangular_solve(L, x - mean, lower=True, transpose_a=True)
+ y = jnp.vectorize(
+ partial(lax.linalg.triangular_solve, lower=True, transpose_a=True),
+ signature="(n,n),(n)->(n)"
+ )(L, x - mean)
return (-1/2 * jnp.einsum('...i,...i->...', y, y) - n/2*np.log(2*np.pi)
- jnp.log(L.diagonal(axis1=-1, axis2=-2)).sum(-1))
| diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py
--- a/tests/scipy_stats_test.py
+++ b/tests/scipy_stats_test.py
@@ -528,13 +528,7 @@ def testIssue972(self):
[(3,), (3,), ()],
[(3,), (3,), (3, 3)],
[(3, 4), (4,), (4, 4)],
-
- # # These test cases are where scipy flattens things, which has
- # # different batch semantics than some might expect
- # [(5, 3, 2), (5, 3, 2,), ()],
- # [(5, 3, 2), (5, 3, 2,), (5, 3, 2, 2)],
- # [(5, 3, 2), (3, 2,), (5, 3, 2, 2)],
- # [(5, 3, 2), (3, 2,), (2, 2)],
+ [(2, 3, 4), (4,), (4, 4)],
]
for x_dtype, mean_dtype, cov_dtype in itertools.combinations_with_replacement(jtu.dtypes.floating, 3)
if (mean_shape is not None or mean_dtype == np.float32)
@@ -561,6 +555,56 @@ def args_maker():
self._CompileAndCheck(lsp_stats.multivariate_normal.logpdf, args_maker,
rtol=1e-4, atol=1e-4)
+
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": "_x={}_mean={}_cov={}".format(
+ jtu.format_shape_dtype_string(x_shape, x_dtype),
+ jtu.format_shape_dtype_string(mean_shape, mean_dtype)
+ if mean_shape is not None else None,
+ jtu.format_shape_dtype_string(cov_shape, cov_dtype)
+ if cov_shape is not None else None),
+ "x_shape": x_shape, "x_dtype": x_dtype,
+ "mean_shape": mean_shape, "mean_dtype": mean_dtype,
+ "cov_shape": cov_shape, "cov_dtype": cov_dtype}
+ for x_shape, mean_shape, cov_shape in [
+ # These test cases are where scipy flattens things, which has
+ # different batch semantics than some might expect, so we manually
+ # vectorize scipy's outputs for the sake of testing.
+ [(5, 3, 2), (5, 3, 2), (5, 3, 2, 2)],
+ [(2,), (5, 3, 2), (5, 3, 2, 2)],
+ [(5, 3, 2), (2,), (5, 3, 2, 2)],
+ [(5, 3, 2), (5, 3, 2,), (2, 2)],
+ [(1, 3, 2), (3, 2,), (5, 1, 2, 2)],
+ [(5, 3, 2), (1, 2,), (2, 2)],
+ ]
+ for x_dtype, mean_dtype, cov_dtype in itertools.combinations_with_replacement(jtu.dtypes.floating, 3)
+ if (mean_shape is not None or mean_dtype == np.float32)
+ and (cov_shape is not None or cov_dtype == np.float32)))
+ def testMultivariateNormalLogpdfBroadcasted(self, x_shape, x_dtype, mean_shape,
+ mean_dtype, cov_shape, cov_dtype):
+ rng = jtu.rand_default(self.rng())
+ def args_maker():
+ args = [rng(x_shape, x_dtype)]
+ if mean_shape is not None:
+ args.append(5 * rng(mean_shape, mean_dtype))
+ if cov_shape is not None:
+ if cov_shape == ():
+ args.append(0.1 + rng(cov_shape, cov_dtype) ** 2)
+ else:
+ factor_shape = (*cov_shape[:-1], 2 * cov_shape[-1])
+ factor = rng(factor_shape, cov_dtype)
+ args.append(np.matmul(factor, np.swapaxes(factor, -1, -2)))
+ return args
+
+ osp_fun = np.vectorize(osp_stats.multivariate_normal.logpdf,
+ signature="(n),(n),(n,n)->()")
+
+ self._CheckAgainstNumpy(osp_fun, lsp_stats.multivariate_normal.logpdf,
+ args_maker, tol=1e-3)
+ self._CompileAndCheck(lsp_stats.multivariate_normal.logpdf, args_maker,
+ rtol=1e-4, atol=1e-4)
+
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_nbatch={}_dtype={}".format(ndim, nbatch, dtype.__name__),
"ndim": ndim, "nbatch": nbatch, "dtype": dtype}
| multivariate_normal.pdf does not work for multi-dimensional input
```python
size = 256
x, y = jnp.mgrid[-1:1:(2/size), -1:1:(2/size)]
pos = jnp.dstack((x, y))
mean = jnp.array([0.5, -0.2])
cov = jnp.array([[2.0, 0.3], [0.3, 0.5]])
gs = jax.scipy.stats.multivariate_normal.pdf(pos, mean=mean, cov=cov)
```
Expected: `gs` holds a tensor of shape `[256, 256]` with pdf values (as in SciPy).
Result:
```
/usr/local/lib/python3.7/dist-packages/jax/_src/lax/linalg.py in triangular_solve_shape_rule(a, b, left_side, **unused_kwargs)
582 msg = ("triangular_solve requires both arguments to have the same number "
583 "of dimensions and equal batch dimensions, got {} and {}.")
--> 584 raise TypeError(msg.format(a.shape, b.shape))
585 common_dim = -2 if left_side else -1
586 if a.shape[-1] != b.shape[common_dim]:
TypeError: triangular_solve requires both arguments to have the same number of dimensions and equal batch dimensions, got (2, 2) and (256, 256, 2).
```
JAX version: 0.2.21
SciPy version: 1.4.1
See also this notebook: https://colab.research.google.com/drive/1YOW5Q2HubHQbYtrdy1B_xnT-_qVeLKOh?usp=sharing
| Thanks for the report - I'll take a look
It looks like some of these cases were explicitly left out in #2481; we should be able to add them. | 2021-10-19T18:01:09 |
google/jax | 8,305 | google__jax-8305 | [
"8299"
] | 1b80feea6acf758fd9dc3e616e8efcb8db831ce9 | diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py
--- a/jax/experimental/sparse/bcoo.py
+++ b/jax/experimental/sparse/bcoo.py
@@ -197,6 +197,12 @@ def _bcoo_todense_batching_rule(batched_args, batch_dims, *, shape):
bcoo_fromdense_p = core.Primitive('bcoo_fromdense')
bcoo_fromdense_p.multiple_results = True
+_TRACED_NSE_ERROR = """
+The error arose for the nse argument of bcoo_fromdense. In order for BCOO.fromdense()
+to be used in traced/compiled code, you must pass a concrete value to the nse
+(number of specified elements) argument.
+"""
+
def bcoo_fromdense(mat, *, nse=None, n_batch=0, n_dense=0, index_dtype=jnp.int32):
"""Create COO-format sparse matrix from a dense matrix.
@@ -215,7 +221,7 @@ def bcoo_fromdense(mat, *, nse=None, n_batch=0, n_dense=0, index_dtype=jnp.int32
mat = jnp.asarray(mat)
if nse is None:
nse = _bcoo_nse(mat, n_batch, n_dense)
- nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_fromdense")
+ nse = core.concrete_or_error(operator.index, nse, _TRACED_NSE_ERROR)
return bcoo_fromdense_p.bind(mat, nse=nse, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype)
| sparse.BCOO.fromdense cannot be called inside a function which is jittable
My use case is the following. Inside a function `f`, after some computation, I get a matrix for which I am sure that it is a sparse matrix. I wish to convert this matrix to a sparse format using BCOO so that I can make the following computations efficient. Ideally, I should be able to jit `f`. It doesn't look possible right now.
Here is a simple example:
```python
from jax.experimental import sparse
jsum = sparse.sparsify(jnp.sum)
def test_bcoo(A):
B = sparse.BCOO.fromdense(A)
return jsum(B)
print(test_bcoo(jnp.eye(4)))
test_bcoo_jit = jax.jit(test_bcoo)
print(test_bcoo_jit(jnp.eye(4)))
```
`test_bcoo` runs perfectly but `test_bcoo_jit` fails with following error:
```
---------------------------------------------------------------------------
ConcretizationTypeError Traceback (most recent call last)
<ipython-input-91-4e719b763a0d> in <module>
9
10 test_bcoo_jit = jax.jit(test_bcoo)
---> 11 print(test_bcoo_jit(jnp.eye(4)))
[... skipping hidden 13 frame]
<ipython-input-91-4e719b763a0d> in test_bcoo(A)
3
4 def test_bcoo(A):
----> 5 B = sparse.BCOO.fromdense(A)
6 return jsum(B)
7
/opt/anaconda3/envs/jax/lib/python3.9/site-packages/jax/experimental/sparse/bcoo.py in fromdense(cls, mat, nse, index_dtype, n_dense, n_batch)
958 def fromdense(cls, mat, *, nse=None, index_dtype=np.int32, n_dense=0, n_batch=0):
959 """Create a BCOO array from a (dense) :class:`DeviceArray`."""
--> 960 return cls(bcoo_fromdense(mat, nse=nse, index_dtype=index_dtype, n_dense=n_dense, n_batch=n_batch), shape=mat.shape)
961
962 @classmethod
/opt/anaconda3/envs/jax/lib/python3.9/site-packages/jax/experimental/sparse/bcoo.py in bcoo_fromdense(mat, nse, n_batch, n_dense, index_dtype)
216 if nse is None:
217 nse = _bcoo_nse(mat, n_batch, n_dense)
--> 218 nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_fromdense")
219 return bcoo_fromdense_p.bind(mat, nse=nse, n_batch=n_batch, n_dense=n_dense,
220 index_dtype=index_dtype)
/opt/anaconda3/envs/jax/lib/python3.9/site-packages/jax/core.py in concrete_or_error(force, val, context)
1009 return force(val.aval.val)
1010 else:
-> 1011 raise ConcretizationTypeError(val, context)
1012 else:
1013 return force(val)
ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(int64[])>with<DynamicJaxprTrace(level=0/1)>
nse argument of bcoo_fromdense
While tracing the function test_bcoo at <ipython-input-91-4e719b763a0d>:4 for jit, this concrete value was not available in Python because it depends on the value of the argument 'A'.
See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError
```
| Thanks for the report β we should improve this error, but it is expected that `sparse.BCOO.fromdense` will fail within JIT unless you pass a static value to `nse` (number of specified elements):
```python
def test_bcoo(A):
B = sparse.BCOO.fromdense(A, nse=4)
return jsum(B)
print(test_bcoo(jnp.eye(4)))
# 4.0
print(jax.jit(test_bcoo)(jnp.eye(4)))
# 4.0
```
This is for the same reasons that simpler functions like `jnp.where`, or `jnp.nonzero`, or `jnp.unique`, will fail if used within JIT: JAX array shapes must be statically known, and not depend on the contents of traced arrays. It is a fundamental limitation of JAX's compilation model.
In general, I'd suggest treating sparse matrix creation as something that happens as a setup step before you call your workhorse function, so e.g. something like this:
```python
B = sparse.BCOO.fromdense(A)
def test_bcoo(B):
return jsum(B)
```
Then the number of specified elements can be determined automatically while the input matrix is still concrete.
Does that make sense?
#8305 improves this error message. It now says:
```
jax._src.errors.ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(int32[])>with<DynamicJaxprTrace(level=0/1)>
The error arose for the nse argument of bcoo_fromdense. In order for BCOO.fromdense()
to be used in traced or compiled code, you must pass a concrete value to the nse
(number of specified elements) argument
``` | 2021-10-20T15:44:14 |
|
google/jax | 8,390 | google__jax-8390 | [
"7861"
] | df13fa2664159b97bf8526a749bc41e0a3117336 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -1109,20 +1109,21 @@ def _poisson(key, lam, shape, dtype):
def poisson(key: KeyArray,
lam: RealArray,
- shape: Sequence[int] = (),
+ shape: Optional[Sequence[int]] = None,
dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:
"""Sample Poisson random values with given shape and integer dtype.
Args:
key: a PRNG key used as the random key.
- lam: rate parameter (mean of the distribution), must be >= 0.
+ lam: rate parameter (mean of the distribution), must be >= 0. Must be broadcast-compatible with ``shape``
shape: optional, a tuple of nonnegative integers representing the result
- shape. Default ().
+ shape. Default (None) produces a result shape equal to ``lam.shape``.
dtype: optional, a integer dtype for the returned values (default int64 if
jax_enable_x64 is true, otherwise int32).
Returns:
- A random array with the specified shape and dtype.
+ A random array with the specified dtype and with shape given by ``shape`` if
+ ``shape is not None, or else by ``lam.shape``.
"""
key, _ = _check_prng_key(key)
if key.impl is not prng.threefry_prng_impl:
@@ -1130,9 +1131,11 @@ def poisson(key: KeyArray,
'`poisson` is only implemented for the threefry2x32 RNG, '
f'not {key.impl}')
dtype = dtypes.canonicalize_dtype(dtype)
- shape = core.canonicalize_shape(shape)
- if np.shape(lam) != shape:
- lam = jnp.broadcast_to(lam, shape)
+ if shape is not None:
+ shape = core.canonicalize_shape(shape)
+ else:
+ shape = np.shape(lam)
+ lam = jnp.broadcast_to(lam, shape)
lam = lax.convert_element_type(lam, np.float32)
return _poisson(key, lam, shape, dtype)
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -730,6 +730,12 @@ def testPoissonBatched(self):
self._CheckChiSquared(samples[:10000], scipy.stats.poisson(2.0).pmf)
self._CheckChiSquared(samples[10000:], scipy.stats.poisson(20.0).pmf)
+ def testPoissonWithoutShape(self):
+ key = self.seed_prng(1)
+ lam = 2 * jnp.ones(10000)
+ samples = random.poisson(key, lam)
+ self._CheckChiSquared(samples, scipy.stats.poisson(2.0).pmf)
+
def testPoissonShape(self):
key = self.seed_prng(0)
x = random.poisson(key, np.array([2.0, 20.0]), shape=(3, 2))
| jax.random.poisson does not broadcast to shape of rate param
jax.random.poisson does not broadcast to the shape of the rate parameter. This is a problem for me--I need to JIT compile a function which samples from a Poisson distribution--but because JAX doesn't broadcast the shape of the rate parameter, I end up passing in the shape of the rate parameter to Poisson, and this gives a `TracerArrayConversionError`.
By contrast, np.random.poisson broadcasts to the shape of the rate parameter.
```python
import jax.numpy as jnp
key = jnp.random.PRNGKey(0)
rate = jnp.ones((5,5,5))
jax.random.poisson(key, lam=rate)
```
Output:
```
~/.conda/envs/jax_env/lib/python3.9/site-packages/jax/_src/random.py in poisson(key, lam, shape, dtype)
1242 shape = core.canonicalize_shape(shape)
1243 if np.shape(lam) != shape:
-> 1244 lam = jnp.broadcast_to(lam, shape)
1245 lam = lax.convert_element_type(lam, np.float32)
1246 return _poisson(key, lam, shape, dtype)
~/.conda/envs/jax_env/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in broadcast_to(arr, shape)
1830 shape_tail = shape[nlead:]
1831 compatible = _all(core.symbolic_equal_one_of_dim(arr_d, [1, shape_d])
-> 1832 for arr_d, shape_d in safe_zip(arr_shape, shape_tail))
1833 if nlead < 0 or not compatible:
1834 msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
~/.conda/envs/jax_env/lib/python3.9/site-packages/jax/_src/util.py in safe_zip(*args)
31 n = len(args[0])
32 for arg in args[1:]:
---> 33 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
34 return list(zip(*args))
35
AssertionError: length mismatch: [3, 0]
```
tagging @shoyer since I think he wrote the Poisson sampling code, hope that's not too obnoxious :)
Link to original PR which added Poisson sampler is here: https://github.com/google/jax/pull/2805
| Looking at the source, it seems like it's just a matter of a few tweaks: changing the default shape to `None` and then adding a line like
```python
if shape is None:
shape = lam.shape
```
This seems to be how the other functions in jax.random handle this. I can make a PR if helpful
We always welcome PRs! A PR that fixes the problem together with a testcase that demonstrates it would be very welcome. | 2021-10-28T19:24:47 |
google/jax | 8,421 | google__jax-8421 | [
"8414"
] | 32319e1bc36e17ca270e9ff1a9545e6680f9eb28 | diff --git a/jax/_src/numpy/linalg.py b/jax/_src/numpy/linalg.py
--- a/jax/_src/numpy/linalg.py
+++ b/jax/_src/numpy/linalg.py
@@ -53,9 +53,26 @@ def cholesky(a):
@_wraps(np.linalg.svd)
-@partial(jit, static_argnames=('full_matrices', 'compute_uv'))
-def svd(a, full_matrices=True, compute_uv=True):
+@partial(jit, static_argnames=('full_matrices', 'compute_uv', 'hermitian'))
+def svd(a, full_matrices: bool = True, compute_uv: bool = True,
+ hermitian: bool = False):
a = _promote_arg_dtypes(jnp.asarray(a))
+ if hermitian:
+ w, v = lax_linalg.eigh(a)
+ s = lax.abs(v)
+ if compute_uv:
+ sign = lax.sign(v)
+ idxs = lax.broadcasted_iota(np.int64, s.shape, dimension=s.ndim - 1)
+ s, idxs, sign = lax.sort((s, idxs, sign), dimension=-1, num_keys=1)
+ s = lax.rev(s, dimensions=[s.ndim - 1])
+ idxs = lax.rev(idxs, dimensions=[s.ndim - 1])
+ sign = lax.rev(sign, dimensions=[s.ndim - 1])
+ u = jnp.take_along_axis(w, idxs[..., None, :], axis=-1)
+ vh = _H(u * sign[..., None, :])
+ return u, s, vh
+ else:
+ return lax.rev(lax.sort(s, dimension=-1), dimensions=[s.ndim-1])
+
return lax_linalg.svd(a, full_matrices, compute_uv)
| diff --git a/tests/linalg_test.py b/tests/linalg_test.py
--- a/tests/linalg_test.py
+++ b/tests/linalg_test.py
@@ -527,18 +527,19 @@ def testNorm(self, shape, dtype, ord, axis, keepdims):
self._CompileAndCheck(jnp_fn, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_n={}_full_matrices={}_compute_uv={}".format(
+ {"testcase_name": "_n={}_full_matrices={}_compute_uv={}_hermitian={}".format(
jtu.format_shape_dtype_string(b + (m, n), dtype), full_matrices,
- compute_uv),
+ compute_uv, hermitian),
"b": b, "m": m, "n": n, "dtype": dtype, "full_matrices": full_matrices,
- "compute_uv": compute_uv}
+ "compute_uv": compute_uv, "hermitian": hermitian}
for b in [(), (3,), (2, 3)]
for m in [0, 2, 7, 29, 53]
for n in [0, 2, 7, 29, 53]
for dtype in float_types + complex_types
for full_matrices in [False, True]
- for compute_uv in [False, True]))
- def testSVD(self, b, m, n, dtype, full_matrices, compute_uv):
+ for compute_uv in [False, True]
+ for hermitian in ([False, True] if m == n else [False])))
+ def testSVD(self, b, m, n, dtype, full_matrices, compute_uv, hermitian):
if (jnp.issubdtype(dtype, np.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex SVD implementation")
@@ -551,7 +552,10 @@ def norm(x):
return norm / (max(1, m, n) * jnp.finfo(dtype).eps)
a, = args_maker()
- out = jnp.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv)
+ if hermitian:
+ a = a + np.conj(T(a))
+ out = jnp.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv,
+ hermitian=hermitian)
if compute_uv:
# Check the reconstructed matrices
if full_matrices:
| Incorrect documentation for jax.numpy.linalg.svd
The [documentation](https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.linalg.svd.html) shows that `jax.numpy.linalg.svd` accepts the keyword argument `hermitian`, like the numpy.linalg version. But the signature `jax.numpy.linalg.svd(a, full_matrices=True, compute_uv=True)` does not list this, and when one tries to use it an error is thrown.
| Thanks for the report - documentation for numpy wrappers is automatically generated from the numpy docs. You'll see that the type signature at the top of the page shows the correct parameters, and *Original docstring below* indicates that the remainder of the documentation comes from the original numpy function.
It's an imperfect solution, but prevents us from having to copy modified versions of all docstrings in the wrapped functions to the JAX source, which would be prone to becoming outdated and which in terms of open source licensing requirements would be problematic.
What do you think? Is there something we could do to make that more clear?
Implementing the `hermitian=True` case looks pretty easy, I'll have a go at that. | 2021-11-01T13:56:30 |
google/jax | 8,426 | google__jax-8426 | [
"8412"
] | 6dce6c929774a3d71a214d6ce4f6bdf34f277f85 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -5996,16 +5996,32 @@ def _eliminate_deprecated_list_indexing(idx):
idx = (idx,)
return idx
+def _is_boolean_index(i):
+ try:
+ abstract_i = core.get_aval(i)
+ except TypeError:
+ abstract_i = None
+ return (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
+ or isinstance(i, list) and i and _all(_is_scalar(e)
+ and issubdtype(_dtype(e), np.bool_) for e in i))
+
def _expand_bool_indices(idx, shape):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
+ total_dims = len(shape)
+ num_ellipsis = _sum(e is Ellipsis for e in idx)
+ if num_ellipsis > 1:
+ raise IndexError("an index can only have a single ellipsis ('...')")
+ elif num_ellipsis == 1:
+ total_dims = _sum(_ndim(e) if _is_boolean_index(e) else 1 for e in idx
+ if e is not None and e is not Ellipsis)
+ ellipsis_offset = 0
for dim_number, i in enumerate(idx):
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
- if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
- or isinstance(i, list) and i and _all(_is_scalar(e) and issubdtype(_dtype(e), np.bool_) for e in i)):
+ if _is_boolean_index(i):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
@@ -6017,13 +6033,16 @@ def _expand_bool_indices(idx, shape):
raise TypeError("JAX arrays do not support boolean scalar indices")
else:
i_shape = _shape(i)
- expected_shape = shape[len(out): len(out) + _ndim(i)]
+ start = len(out) + ellipsis_offset
+ expected_shape = shape[start: start + _ndim(i)]
if i_shape != expected_shape:
raise IndexError("boolean index did not match shape of indexed array in index "
f"{dim_number}: got {i_shape}, expected {expected_shape}")
out.extend(np.where(i))
else:
out.append(i)
+ if i is Ellipsis:
+ ellipsis_offset = _max(0, len(shape) - total_dims - 1)
return tuple(out)
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -749,6 +749,28 @@ def testBooleanIndexingArray2D(self):
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
+ def testBoolean1DIndexingWithEllipsis(self):
+ # Regression test for https://github.com/google/jax/issues/8412
+ x = np.arange(24).reshape(4, 3, 2)
+ idx = (..., np.array([True, False]))
+ ans = jnp.array(x)[idx]
+ expected = x[idx]
+ self.assertAllClose(ans, expected, check_dtypes=False)
+
+ def testBoolean2DIndexingWithEllipsis(self):
+ x = np.arange(24).reshape(4, 3, 2)
+ idx = (..., np.array([[True, False], [True, False], [False, False]]))
+ ans = jnp.array(x)[idx]
+ expected = x[idx]
+ self.assertAllClose(ans, expected, check_dtypes=False)
+
+ def testBoolean1DIndexingWithTrailingEllipsis(self):
+ x = np.arange(24).reshape(4, 3, 2)
+ idx = (np.array([True, False, True, False]), ...)
+ ans = jnp.array(x)[idx]
+ expected = x[idx]
+ self.assertAllClose(ans, expected, check_dtypes=False)
+
def testBooleanIndexingDynamicShapeError(self):
x = np.zeros(3)
i = np.array([True, True, False])
@@ -760,6 +782,8 @@ def testScalarBooleanIndexingNotImplemented(self):
jnp.arange(4)[True]
with self.assertRaisesRegex(TypeError, msg):
jnp.arange(4)[False]
+ with self.assertRaisesRegex(TypeError, msg):
+ jnp.arange(4)[..., True]
def testIssue187(self):
x = jnp.ones((5, 5))
| Boolean indexing does not compatible with Ellipsis
It seems that the following snippet does not work anymore.
```python
import jax
import jax.numpy as jnp
import numpy as np
jnp.ones((4, 3, 2))[..., np.array([True, False])] # fail
jnp.ones((4, 3, 2))[:, :, np.array([True, False])] # work
np.ones((4, 3, 2))[..., np.array([True, False])] # work
```
| Thanks for the report. It looks like we canonicalize boolean indices before handling ellipsis, so that's something that will need to change. | 2021-11-01T19:24:32 |
google/jax | 8,488 | google__jax-8488 | [
"8469",
"8649"
] | f1261000d2ac1dddfecfe33f33bfeeaf192b747f | diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py
--- a/jax/_src/lax/control_flow.py
+++ b/jax/_src/lax/control_flow.py
@@ -788,7 +788,10 @@ def cond(*args, **kwargs):
except TypeError:
pass
else:
- return _cond_with_per_branch_args(*ba.args)
+ assert not ba.kwargs # no catch-all **kwargs in _cond_with_per_branch
+ _, _, maybe_true_fun, _, maybe_false_fun = ba.args
+ if callable(maybe_true_fun) and callable(maybe_false_fun):
+ return _cond_with_per_branch_args(*ba.args)
return _cond(*args, **kwargs)
| diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py
--- a/tests/lax_control_flow_test.py
+++ b/tests/lax_control_flow_test.py
@@ -599,8 +599,6 @@ def false_fun(x):
def testCondTwoOperands(self):
# see https://github.com/google/jax/issues/8469
- self.skipTest("two-operand cond behavior is ambiguous (#8469)")
-
add, mul = lax.add, lax.mul
def fun(x):
| two-operand cond conflicts with per-branch-operand cond
#8467 introduces multi-operand `lax.cond`. When given two operands, as in:
```python
def f(x):
return lax.cond(x == 0, lax.add, lax.mul, x, y)
```
`cond` detects five total arguments and falls back to behavior that is backwards-compatible with the deprecated per-branch-operand `cond`. The latter accepts, in order: a predicate, true operand, true branch, false operand, and false branch. This results in an error:
```
TypeError: Value <function add ...> with type <class 'function'> is not a valid JAX type
```
One option is to rollback (or disallow) multi-operand cond for now, complete the deprecation of the previous cond, and roll forward (or re-enable) multi-operand cond.
| Having the same issue here! For now, I have to strictly use single operand `cond` calls. | 2021-11-08T22:52:55 |
google/jax | 8,498 | google__jax-8498 | [
"8495"
] | 249cdff2d3084be7f16bc2a1aecb2074fb1a7b56 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -3870,41 +3870,23 @@ def _geomspace(start, stop, num=50, endpoint=True, dtype=None, axis: int = 0):
@_wraps(np.meshgrid, lax_description=_ARRAY_VIEW_DOC)
-def meshgrid(*args, **kwargs):
- _check_arraylike("meshgrid", *args)
- indexing = kwargs.get("indexing", "xy")
- sparse = kwargs.get("sparse", False)
- copy = kwargs.get("copy", True)
+def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
+ _check_arraylike("meshgrid", *xi)
+ args = [asarray(x) for x in xi]
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
-
- args = list(args)
- if indexing == "xy":
- if len(args) >= 2:
- args[0], args[1] = args[1], args[0]
- elif indexing != "ij":
- raise ValueError("Valid values for indexing are 'xy' and 'ij', got {}"
- .format(indexing))
-
- shape = []
- for i, a in enumerate(args):
- args[i] = a = asarray(a)
- if len(a.shape) != 1:
- msg = "Arguments to jax.numpy.meshgrid must be 1D, got shape {}"
- raise ValueError(msg.format(a.shape))
- shape.append(1 if sparse else a.shape[0])
-
- output = []
- for i, a in enumerate(args):
- s = shape
- if sparse:
- s = list(s)
- s[i] = _shape(a)[0]
- output.append(lax.broadcast_in_dim(a, s, (i,)))
-
+ if indexing not in ["xy", "ij"]:
+ raise ValueError(f"Valid values for indexing are 'xy' and 'ij', got {indexing}")
+ if _any(a.ndim != 1 for a in args):
+ raise ValueError("Arguments to jax.numpy.meshgrid must be 1D, got shapes "
+ f"{[a.shape for a in args]}")
+ if indexing == "xy" and len(args) >= 2:
+ args[0], args[1] = args[1], args[0]
+ shape = [1 if sparse else a.shape[0] for a in args]
+ _a_shape = lambda i, a: [*shape[:i], a.shape[0], *shape[i + 1:]] if sparse else shape
+ output = [lax.broadcast_in_dim(a, _a_shape(i, a), (i,)) for i, a, in enumerate(args)]
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
-
return output
| jax.numpy.meshgrid arg check for "indexing" fails
Hi,
I just noticed `meshgrid` function failed to throw error if there is typo for arg `indexing`, this cause me a lot time for debugging.
```python
import jax.numpy as jnp
import numpy as np
if __name__ == "__main__":
jm = jnp.arange(2)
jn = jnp.arange(2)
# here is a typo "index" which should be "indexing"
# but there is no error, and the output is just as if
# it was indexing="xy"
jmm, jnn = jnp.meshgrid(jm, jn, index="ij")
print("jax: typo index, but no error,xy ordered output")
print(jmm)
print(jnn)
print("jax: no typo, correct ij ordered output")
# here is the right args
jmm, jnn = jnp.meshgrid(jm, jn, indexing="ij")
print(jmm)
print(jnn)
m = np.arange(2)
n = np.arange(2)
mm, nn = np.meshgrid(m, n, indexing="ij")
print("numpy correct ij ordered output")
print(mm)
print(nn)
# below typo of index for numpy code will error
mm, nn = np.meshgrid(m, n, index="ij")
```
output:
```python
jax: typo index, but no error,xy ordered output
[[0 1]
[0 1]]
[[0 0]
[1 1]]
jax: no typo, correct ij ordered output
[[0 0]
[1 1]]
[[0 1]
[0 1]]
numpy correct ij ordered output
[[0 0]
[1 1]]
[[0 1]
[0 1]]
Traceback (most recent call last):
File "6.py", line 28, in <module>
mm, nn = np.meshgrid(m, n, index="ij")
File "<__array_function__ internals>", line 4, in meshgrid
TypeError: _meshgrid_dispatcher() got an unexpected keyword argument 'index'
```
| Thanks for the report - we should add better argument validation there. | 2021-11-09T17:07:59 |
|
google/jax | 8,520 | google__jax-8520 | [
"8513"
] | 8f6e077d9aa10b3c99608d03608c8071217514a7 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6287,8 +6287,7 @@ def _quantile(a, q, axis, interpolation, keepdims, squash_nans):
if interpolation not in ["linear", "lower", "higher", "midpoint", "nearest"]:
raise ValueError("interpolation can only be 'linear', 'lower', 'higher', "
"'midpoint', or 'nearest'")
- a = asarray(a, dtype=promote_types(_dtype(a), float32))
- q = asarray(q, dtype=promote_types(_dtype(q), float32))
+ a, q = _promote_dtypes_inexact(a, q)
if axis is None:
a = ravel(a)
axis = 0
@@ -6473,7 +6472,8 @@ def percentile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,
out=None, overwrite_input=False, interpolation="linear",
keepdims=False):
_check_arraylike("percentile", a, q)
- q = true_divide(q, float32(100.0))
+ a, q = _promote_dtypes_inexact(a, q)
+ q = true_divide(q, 100.0)
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -4416,6 +4416,12 @@ def np_fun(*args):
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
+ @unittest.skipIf(not config.jax_enable_x64, "test requires X64")
+ @unittest.skipIf(jtu.device_under_test() != 'cpu', "test is for CPU float64 precision")
+ def testPercentilePrecision(self):
+ # Regression test for https://github.com/google/jax/issues/8513
+ x = jnp.float64([1, 2, 3, 4, 7, 10])
+ self.assertEqual(jnp.percentile(x, 50), 3.5)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
| Floating point deviation in `jax.numpy.percentile` with linear interpolation between `v0.2.20` and `v0.2.21`
Hi. There is some (very minor) deviations in the output of `jax.numpy.percentile` between `jax` [`v0.2.20` and `v0.2.21`](https://github.com/google/jax/compare/jax-v0.2.20...jax-v0.2.21) in the case that linear interpolation is used (the default). Interestingly, it is really in `jax.numpy.percentile` and not in `jax.numpy.quantile` as can be shown in the included example (for convenience this Issue also exists as [a GitHub Gist](https://gist.github.com/matthewfeickert/d8364201ddad6653315a62dc9b921318)).
## Minimal failing example
```python
# example.py
import jax
import jax.numpy as jnp
import numpy as np
from jax.config import config
config.update("jax_enable_x64", True)
if __name__ == "__main__":
# percentile interpolation options:
# This optional parameter specifies the interpolation method to use when the desired percentile lies between two data points i < j:
# * βlinearβ: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
# * βlowerβ: i.
# * βhigherβ: j.
# * βnearestβ: i or j, whichever is nearest.
# * βmidpointβ: (i + j) / 2.
input = [[10, 7, 4], [3, 2, 1]]
print(f"input list: {input}")
print(f"input list ravel: {np.asarray(input).ravel()}")
# [10 7 4 3 2 1]
print(f"\nNumPy v{np.__version__}")
print(f"JAX v{jax.__version__}\n")
numpy_array = np.asarray(input)
print(f"{numpy_array=}")
jax_array = jnp.asarray(input, dtype="float")
print(f"{jax_array=}")
print("\n# Checking quantile\n")
assert np.quantile(numpy_array, 0) == 1.0
assert np.quantile(numpy_array, 0.50) == 3.5
assert np.quantile(numpy_array, 1) == 10
assert np.quantile(numpy_array, 0.50, axis=1).tolist() == [7.0, 2.0]
assert np.quantile(numpy_array, 0.50, interpolation="linear") == 3.5
assert np.quantile(numpy_array, 0.50, interpolation="nearest") == 3.0
assert np.quantile(numpy_array, 0.50, interpolation="lower") == 3.0
assert np.quantile(numpy_array, 0.50, interpolation="midpoint") == 3.5
assert np.quantile(numpy_array, 0.50, interpolation="higher") == 4.0
assert jnp.quantile(jax_array, 0) == 1.0
assert jnp.quantile(jax_array, 0.50) == 3.5
assert jnp.quantile(jax_array, 1) == 10
assert jnp.quantile(jax_array, 0.50, axis=1).tolist() == [7.0, 2.0]
assert jnp.quantile(jax_array, 0.50, interpolation="linear") == 3.5
assert jnp.quantile(jax_array, 0.50, interpolation="nearest") == 3.0
assert jnp.quantile(jax_array, 0.50, interpolation="lower") == 3.0
assert jnp.quantile(jax_array, 0.50, interpolation="midpoint") == 3.5
assert jnp.quantile(jax_array, 0.50, interpolation="higher") == 4.0
print("# Checking percentile")
assert np.percentile(numpy_array, 0) == 1.0
assert np.percentile(numpy_array, 50) == 3.5
assert np.percentile(numpy_array, 100) == 10
assert np.percentile(numpy_array, 50, axis=1).tolist() == [7.0, 2.0]
assert np.percentile(numpy_array, 50, interpolation="linear") == 3.5
assert np.percentile(numpy_array, 50, interpolation="nearest") == 3.0
assert np.percentile(numpy_array, 50, interpolation="lower") == 3.0
assert np.percentile(numpy_array, 50, interpolation="midpoint") == 3.5
assert np.percentile(numpy_array, 50, interpolation="higher") == 4.0
# default interpolation method is "linear"
assert jnp.percentile(jax_array, 0) == 1.0
assert jnp.percentile(jax_array, 50) == 3.5 # 3.499999761581421
assert jnp.percentile(jax_array, 100) == 10 # 9.999998092651367
assert jnp.percentile(jax_array, 50, axis=1).tolist() == [7.0, 2.0]
assert jnp.percentile(jax_array, 50, interpolation="linear") == 3.5 # 3.499999761581421
assert jnp.percentile(jax_array, 50, interpolation="nearest") == 3.0
assert jnp.percentile(jax_array, 50, interpolation="lower") == 3.0
assert jnp.percentile(jax_array, 50, interpolation="midpoint") == 3.5
assert jnp.percentile(jax_array, 50, interpolation="higher") == 4.0
```
```console
user@machine:~$ python --version
Python 3.9.6
user@machine:~$ python -m venv /tmp/venv && . /tmp/venv/bin/activate
(venv) user@machine:~$ python -m pip install --upgrade pip setuptools wheel
(venv) user@machine:~$ cat requirements_passing.txt
jax==0.2.20
jaxlib==0.1.69
(venv) user@machine:~$ python -m pip install -r requirements_passing.txt
(venv) user@machine:~$ python example.py
input list: [[10, 7, 4], [3, 2, 1]]
input list ravel: [10 7 4 3 2 1]
NumPy v1.21.4
JAX v0.2.20
numpy_array=array([[10, 7, 4],
[ 3, 2, 1]])
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
jax_array=DeviceArray([[10., 7., 4.],
[ 3., 2., 1.]], dtype=float64)
# Checking quantile
# Checking percentile
(venv) user@machine:~$ cat requirements_failing.txt
jax==0.2.21
jaxlib==0.1.69
(venv) user@machine:~$ python -m pip install -r requirements_failing.txt
(venv) user@machine:~$ python example.py
input list: [[10, 7, 4], [3, 2, 1]]
input list ravel: [10 7 4 3 2 1]
NumPy v1.21.4
JAX v0.2.21
numpy_array=array([[10, 7, 4],
[ 3, 2, 1]])
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
jax_array=DeviceArray([[10., 7., 4.],
[ 3., 2., 1.]], dtype=float64)
# Checking quantile
# Checking percentile
Traceback (most recent call last):
File "/home/feickert/Code/debug/jax-percentile-drift/example.py", line 67, in <module>
assert jnp.percentile(jax_array, 50) == 3.5 # 3.499999761581421
AssertionError
```
## Notes
Comparing the code for [`v0.2.20`](https://github.com/google/jax/blob/jax-v0.2.20/jax/_src/numpy/lax_numpy.py#L5905-L5912)
https://github.com/google/jax/blob/a7b61c0e00d1b535df8a30a82edc0074884d5f4c/jax/_src/numpy/lax_numpy.py#L5905-L5912
and [`v0.2.21`](https://github.com/google/jax/blob/jax-v0.2.21/jax/_src/numpy/lax_numpy.py#L6420-L6429)
https://github.com/google/jax/blob/dbeb97d394740bfd122a46249c967139c10d3f11/jax/_src/numpy/lax_numpy.py#L6420-L6429
It seems (at first glance as I haven't dug into this yet) that the only relevant difference is the removal of `asarray(q)` in the `true_divide` call in PR #7747 (though I would think given the point of that PR that nothing should have changed)
```diff
-q = true_divide(asarray(q), float32(100.0))
+q = true_divide(q, float32(100.0))
```
This effect is quite minor, and probably poses no real significance in most cases, but it deviates from the docstring described behavior. Maybe the most obvious example is the extremes where the q-th percentile is 1 — which should return the element of the array object which is the maxima (in the example `10`) but instead returns the floating point approximation of that element (`9.999998092651367`).
## Request
Would it be possible to revert to the `v0.2.20` behavior? This would be more consistent with both the docstring and NumPy.
## JAX Issues checklist
Please:
- [x] Check for duplicate issues.
- [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks
- [x] If applicable, include full error messages/tracebacks.
| I think the operative change here is that `jnp.percentile` is JIT-compiled by default starting in v0.2.21. Try running the following in version 0.2.20:
```python
import jax
import jax.numpy as jnp
import numpy as np
from jax.config import config
config.update("jax_enable_x64", True)
input = [[10, 7, 4], [3, 2, 1]]
numpy_array = np.asarray(input)
jax_array = jnp.asarray(input, dtype="float")
print("jax_version:", jax.__version__)
print("no jit:", jnp.percentile(jax_array, 50))
print("jit:", jax.jit(jnp.percentile)(jax_array, 50))
# jax_version: 0.2.20
# no jit: 3.5
# jit: 3.499999761581421
```
Why does JIT compiling cause this kind of inaccuracy? As part of compilation, XLA is free to re-arrange mathematical operations for efficiency, and sometimes this changes results slightly due to the imprecision inherent to floating point.
On further exploration, it looks like passing `np.float64(50)` rather than `50` fixes the issue. I think we could probably address this by being a bit more careful about dtype promotion within the `percentile` implementation.
> Why does JIT compiling cause this kind of inaccuracy? As part of compilation, XLA is free to re-arrange mathematical operations for efficiency, and sometimes this changes results slightly due to the imprecision inherent to floating point.
Thanks very much for the example and explanation @jakevdp — this is already quite helpful!
> I think we could probably address this by being a bit more careful about dtype promotion within the percentile implementation.
That would be great if possible in the future. :+1: | 2021-11-11T16:26:46 |
google/jax | 8,624 | google__jax-8624 | [
"8623"
] | f08a5a07a8953b6dabfdff63d883d86e0ce80f3a | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6321,6 +6321,8 @@ def _quantile(a, q, axis, interpolation, keepdims, squash_nans):
raise ValueError("interpolation can only be 'linear', 'lower', 'higher', "
"'midpoint', or 'nearest'")
a, q = _promote_dtypes_inexact(a, q)
+ if issubdtype(a.dtype, np.complexfloating):
+ raise ValueError("quantile does not support complex input, as the operation is poorly defined.")
if axis is None:
a = ravel(a)
axis = 0
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -1603,6 +1603,8 @@ def testPad(self, shape, dtype, mode, pad_width, constant_values):
if (pad_width != () and stat_length != () and
not (dtype in bool_dtypes and mode == 'mean'))))
def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):
+ if mode == 'median' and np.issubdtype(dtype, np.complexfloating):
+ self.skipTest("median statistic is not supported for dtype=complex.")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
| jax.median fails for complex values
```python
>>> import numpy as np
>>> import jax.numpy as jnp
>>> x = np.array([1 + 0j, 0 + 1j])
>>> np.median(x)
(0.5+0.5j)
>>> jnp.median(x)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<...>
TypeError: floor does not accept dtype complex64. Accepted dtypes are subtypes of floating.
```
| See https://github.com/numpy/numpy/issues/12943 for a discussion of the meaning of numpy's median for complex inputs; we may just want to raise an explicit `ValueError` or `NotImplementedError` rather than trying to replicate this. | 2021-11-19T18:55:44 |
google/jax | 8,766 | google__jax-8766 | [
"8573"
] | 404c3c7d2538c4553b68b38d3a7949e477dd3c42 | diff --git a/build/build.py b/build/build.py
--- a/build/build.py
+++ b/build/build.py
@@ -90,40 +90,40 @@ def check_numpy_version(python_bin_path):
# Bazel
-BAZEL_BASE_URI = "https://github.com/bazelbuild/bazel/releases/download/4.1.0/"
+BAZEL_BASE_URI = "https://github.com/bazelbuild/bazel/releases/download/4.2.1/"
BazelPackage = collections.namedtuple("BazelPackage",
["base_uri", "file", "sha256"])
bazel_packages = {
("Linux", "x86_64"):
BazelPackage(
base_uri=None,
- file="bazel-4.1.0-linux-x86_64",
+ file="bazel-4.2.1-linux-x86_64",
sha256=
- "0eb2e378d2782e7810753e2162245ad1179c1bb12f848c692b4a595b4edf779b"),
+ "1a4f3a3ce292307bceeb44f459883859c793436d564b95319aacb8af1f20557c"),
("Linux", "aarch64"):
BazelPackage(
base_uri=None,
- file="bazel-4.1.0-linux-arm64",
+ file="bazel-4.2.1-linux-arm64",
sha256=
- "b3834742166379e52b880319dec4699082cb26fa96cbb783087deedc5fbb5f2b"),
+ "0a849f99d59eab7058212a89b2a0d2b6a17f1ef7ba7fb7a42523a7171bb1c64f"),
("Darwin", "x86_64"):
BazelPackage(
base_uri=None,
- file="bazel-4.1.0-darwin-x86_64",
+ file="bazel-4.2.1-darwin-x86_64",
sha256=
- "2eecc3abb0ff653ed0bffdb9fbfda7b08548c2868f13da4a995f01528db200a9"),
+ "74d93848f0c9d592e341e48341c53c87e3cb304a54a2a1ee9cff3df422f0b23c"),
("Darwin", "arm64"):
BazelPackage(
base_uri=None,
- file="bazel-4.1.0-darwin-arm64",
+ file="bazel-4.2.1-darwin-arm64",
sha256=
- "c372d39ab9dac96f7fdfc2dd649e88b05ee4c94ce3d6cf2313438ef0ca6d5ac1"),
+ "f0375d77afe61a7167cd6d9cda1fde0eb29839e0ca8718002da73ea895b31dc0"),
("Windows", "AMD64"):
BazelPackage(
base_uri=None,
- file="bazel-4.1.0-windows-x86_64.exe",
+ file="bazel-4.2.1-windows-x86_64.exe",
sha256=
- "7b2077af7055b421fe31822f83c3c3c15e36ff39b69560ba2472dde92dd45b46"),
+ "8fa10dfdc8b2f610a8891b68e2a132540e6fac4fea62174cffd9994b9147586e"),
}
@@ -185,7 +185,7 @@ def get_bazel_paths(bazel_path_flag):
def get_bazel_path(bazel_path_flag):
"""Returns the path to a Bazel binary, downloading Bazel if not found. Also,
- checks Bazel's version is at least newer than 4.1.0.
+ checks Bazel's version is at least newer than 4.2.1.
A manual version check is needed only for really old bazel versions.
Newer bazel releases perform their own version check against .bazelversion
@@ -194,11 +194,11 @@ def get_bazel_path(bazel_path_flag):
"""
for path in filter(None, get_bazel_paths(bazel_path_flag)):
version = get_bazel_version(path)
- if version is not None and version >= (4, 1, 0):
+ if version is not None and version >= (4, 2, 1):
return path, ".".join(map(str, version))
print("Cannot find or download a suitable version of bazel."
- "Please install bazel >= 4.1.0.")
+ "Please install bazel >= 4.2.1.")
sys.exit(-1)
| Failure building jaxlib from source on `arm64` with bazel 4.1.0
Please:
- [x] Check for duplicate issues.
- [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this:
```shell
python build/build.py
```
Building `jaxlib` at HEAD on my machine using bazel 4.1.0 fails (macOS 12.0, Macbook Pro 14" M1 Pro 2021). This error seems to come from a bad `xcode-locator` call, possibly because the bazel version that is pulled there (see the traceback below) is configured for the wrong architecture? IIRC, I also do not (yet) have Rosetta installed.
Manually editing the `.bazelversion` file and setting it to `4.2.1` (which enables the use of the latest bazel version provided by `brew`) avoids this issue and builds the wheel successfully (though I have not tested it yet).
- [x] If applicable, include full error messages/tracebacks.
```
β python build/build.py
_ _ __ __
| | / \ \ \/ /
_ | |/ _ \ \ /
| |_| / ___ \/ \
\___/_/ \/_/\_\
b'\x1b[31mERROR: The project you\'re trying to build requires Bazel 4.1.0 (specified in /Users/nicholasjunge/Workspaces/python/jax/.bazelversion), but it wasn\'t found in /opt/homebrew/Cellar/bazel/4.2.1_1/libexec/bin.\x1b[0m\n\nBazel binaries for all official releases can be downloaded from here:\n https://github.com/bazelbuild/bazel/releases\n\nYou can download the required version directly using this command:\n (cd "/opt/homebrew/Cellar/bazel/4.2.1_1/libexec/bin" && curl -fLO https://releases.bazel.build/4.1.0/release/bazel-4.1.0-darwin-arm64 && chmod +x bazel-4.1.0-darwin-arm64)\n'
Bazel binary path: ./bazel-4.1.0-darwin-arm64
Bazel version: 4.1.0
Python binary path: /Users/nicholasjunge/Workspaces/python/jax/venv/bin/python
Python version: 3.9
NumPy version: 1.21.4
MKL-DNN enabled: yes
Target CPU: arm64
Target CPU features: release
CUDA enabled: no
TPU enabled: no
ROCm enabled: no
Building XLA and installing it in the jaxlib source tree...
./bazel-4.1.0-darwin-arm64 run --verbose_failures=true --config=mkl_open_source_only :build_wheel -- --output_path=/Users/nicholasjunge/Workspaces/python/jax/dist --cpu=arm64
INFO: Options provided by the client:
Inherited 'common' options: --isatty=0 --terminal_columns=80
INFO: Reading rc options for 'run' from /Users/nicholasjunge/Workspaces/python/jax/.bazelrc:
Inherited 'common' options: --experimental_repo_remote_exec
INFO: Reading rc options for 'run' from /Users/nicholasjunge/Workspaces/python/jax/.bazelrc:
Inherited 'build' options: --apple_platform_type=macos --macos_minimum_os=10.9 --announce_rc --define open_source_build=true --spawn_strategy=standalone --enable_platform_specific_config --define=no_aws_support=true --define=no_gcp_support=true --define=no_hdfs_support=true --define=no_kafka_support=true --define=no_ignite_support=true --define=grpc_no_ares=true -c opt --config=short_logs --copt=-DMLIR_PYTHON_PACKAGE_PREFIX=jaxlib.mlir.
INFO: Reading rc options for 'run' from /Users/nicholasjunge/Workspaces/python/jax/.jax_configure.bazelrc:
Inherited 'build' options: --strategy=Genrule=standalone --repo_env PYTHON_BIN_PATH=/Users/nicholasjunge/Workspaces/python/jax/venv/bin/python --action_env=PYENV_ROOT --python_path=/Users/nicholasjunge/Workspaces/python/jax/venv/bin/python --distinct_host_configuration=false
INFO: Found applicable config definition build:short_logs in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --output_filter=DONT_MATCH_ANYTHING
INFO: Found applicable config definition build:mkl_open_source_only in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --define=tensorflow_mkldnn_contraction_kernel=1
INFO: Found applicable config definition build:macos in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --config=posix
INFO: Found applicable config definition build:posix in file /Users/nicholasjunge/Workspaces/python/jax/.bazelrc: --copt=-fvisibility=hidden --copt=-Wno-sign-compare --cxxopt=-std=c++14 --host_cxxopt=-std=c++14
Loading:
Loading: 0 packages loaded
WARNING: Download from https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/8909dc5ebe8ad39f1743131eb70df402d796acab.tar.gz failed: class com.google.devtools.build.lib.bazel.repository.downloader.UnrecoverableHttpException GET returned 404 Not Found
WARNING: Download from https://storage.googleapis.com/mirror.tensorflow.org/github.com/tensorflow/runtime/archive/fbdd15997f4495011fed44d051152a2a69436e69.tar.gz failed: class com.google.devtools.build.lib.bazel.repository.downloader.UnrecoverableHttpException GET returned 404 Not Found
Analyzing: target //build:build_wheel (0 packages loaded, 0 targets configured)
DEBUG: Rule 'io_bazel_rules_docker' indicated that a canonical reproducible form can be obtained by modifying arguments shallow_since = "1596824487 -0400"
DEBUG: Repository io_bazel_rules_docker instantiated at:
/Users/nicholasjunge/Workspaces/python/jax/WORKSPACE:37:14: in <toplevel>
/private/var/tmp/_bazel_nicholasjunge/270a4a78734ae0f3124fa7265b8a65ef/external/org_tensorflow/tensorflow/workspace0.bzl:108:34: in workspace
/private/var/tmp/_bazel_nicholasjunge/270a4a78734ae0f3124fa7265b8a65ef/external/bazel_toolchains/repositories/repositories.bzl:35:23: in repositories
Repository rule git_repository defined at:
/private/var/tmp/_bazel_nicholasjunge/270a4a78734ae0f3124fa7265b8a65ef/external/bazel_tools/tools/build_defs/repo/git.bzl:199:33: in <toplevel>
WARNING: Download from https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.5.tar.gz failed: class com.google.devtools.build.lib.bazel.repository.downloader.UnrecoverableHttpException GET returned 404 Not Found
INFO: Analyzed target //build:build_wheel (0 packages loaded, 0 targets configured).
INFO: Found 1 target...
[0 / 5] [Prepa] BazelWorkspaceStatusAction stable-status.txt ... (2 actions, 0 running)
ERROR: /private/var/tmp/_bazel_nicholasjunge/270a4a78734ae0f3124fa7265b8a65ef/external/zlib/BUILD.bazel:5:11: Compiling gzclose.c failed: Exec failed due to IOException: com.google.devtools.build.lib.shell.ExecFailedException: java.io.IOException: Cannot run program "/var/tmp/_bazel_nicholasjunge/install/d0b631b3e5a643567f221e133629d965/xcode-locator": error=86, Bad CPU type in executable
Target //build:build_wheel failed to build
INFO: Elapsed time: 0.249s, Critical Path: 0.04s
INFO: 20 processes: 19 internal, 1 local.
FAILED: Build did NOT complete successfully
ERROR: Build failed. Not running target
FAILED: Build did NOT complete successfully
b''
Traceback (most recent call last):
File "/Users/nicholasjunge/Workspaces/python/jax/build/build.py", line 524, in <module>
main()
File "/Users/nicholasjunge/Workspaces/python/jax/build/build.py", line 519, in main
shell(command)
File "/Users/nicholasjunge/Workspaces/python/jax/build/build.py", line 53, in shell
output = subprocess.check_output(cmd)
File "/opt/homebrew/Cellar/[email protected]/3.9.8/Frameworks/Python.framework/Versions/3.9/lib/python3.9/subprocess.py", line 424, in check_output
return run(*popenargs, stdout=PIPE, timeout=timeout, check=True,
File "/opt/homebrew/Cellar/[email protected]/3.9.8/Frameworks/Python.framework/Versions/3.9/lib/python3.9/subprocess.py", line 528, in run
raise CalledProcessError(retcode, process.args,
subprocess.CalledProcessError: Command '['./bazel-4.1.0-darwin-arm64', 'run', '--verbose_failures=true', '--config=mkl_open_source_only', ':build_wheel', '--', '--output_path=/Users/nicholasjunge/Workspaces/python/jax/dist', '--cpu=arm64']' returned non-zero exit status 36.
```
| @hawkinsp since you are building jaxlib for mac today | 2021-12-02T14:00:03 |
|
google/jax | 8,869 | google__jax-8869 | [
"8744"
] | 2e375f04a62b120b7a5a84e3072f36d4417f2b6d | diff --git a/jax/_src/lax/control_flow.py b/jax/_src/lax/control_flow.py
--- a/jax/_src/lax/control_flow.py
+++ b/jax/_src/lax/control_flow.py
@@ -2357,18 +2357,6 @@ def _promote_weak_typed_inputs(in_vals, in_avals, out_avals):
in_vals[i] = lax.convert_element_type(in_vals[i], new_dtype)
return in_vals, True
-def _stop_gradient_fun(f):
- """Create a version of f() that stops all gradients."""
- def wrapper(*args, **kwargs):
- args_flat, in_args_tree = tree_flatten((args, kwargs))
- args_avals = tuple(_map(_abstractify, args_flat))
- g = lambda a, b: f(*a, **b)
- jaxpr, consts, out_tree = _initial_style_jaxpr(g, in_args_tree, args_avals)
- all_args = _map(lax.stop_gradient, (*consts, *args_flat))
- out = core.jaxpr_as_fun(jaxpr)(*all_args)
- return tree_unflatten(out_tree, out)
- return wrapper
-
_RootTuple = collections.namedtuple('_RootTuple', 'f, solve, l_and_s')
@@ -2426,7 +2414,7 @@ def custom_root(f, initial_guess, solve, tangent_solve, has_aux=False):
_check_tree("f", "initial_guess", out_tree, in_tree, False)
solve_jaxpr, solve_consts, solution_tree = _initial_style_jaxpr(
- partial(solve, _stop_gradient_fun(f)), in_args_tree, guess_avals)
+ partial(solve, f), in_args_tree, guess_avals)
_check_tree("solve", "initial_guess", solution_tree, in_tree, has_aux)
def linearize_and_solve(x, b):
| diff --git a/tests/lax_control_flow_test.py b/tests/lax_control_flow_test.py
--- a/tests/lax_control_flow_test.py
+++ b/tests/lax_control_flow_test.py
@@ -95,6 +95,52 @@ def posify(matrix):
jtu.ignore_warning, message=".*jit-of-pmap.*")
+# Simple optimization routine for testing custom_root
+def binary_search(func, x0, low=0.0, high=100.0):
+ del x0 # unused
+
+ def cond(state):
+ low, high = state
+ midpoint = 0.5 * (low + high)
+ return (low < midpoint) & (midpoint < high)
+
+ def body(state):
+ low, high = state
+ midpoint = 0.5 * (low + high)
+ update_upper = func(midpoint) > 0
+ low = jnp.where(update_upper, low, midpoint)
+ high = jnp.where(update_upper, midpoint, high)
+ return (low, high)
+
+ solution, _ = lax.while_loop(cond, body, (low, high))
+ return solution
+
+# Optimization routine for testing custom_root.
+def newton_raphson(func, x0):
+ tol = 1e-16
+ max_it = 20
+
+ fx0, dfx0 = func(x0), jax.jacobian(func)(x0)
+ initial_state = (0, x0, fx0, dfx0) # (iteration, x, f(x), grad(f)(x))
+
+ def cond(state):
+ it, _, fx, _ = state
+ return (jnp.max(jnp.abs(fx)) > tol) & (it < max_it)
+
+ def body(state):
+ it, x, fx, dfx = state
+ step = jnp.linalg.solve(
+ dfx.reshape((-1, fx.size)), fx.ravel()
+ ).reshape(fx.shape)
+ x_next = x - step
+ fx, dfx = func(x_next), jax.jacobian(func)(x_next)
+ return (it + 1, x_next, fx, dfx)
+
+ _, x, _, _ = lax.while_loop(cond, body, initial_state)
+
+ return x
+
+
class LaxControlFlowTest(jtu.JaxTestCase):
def setUp(self):
@@ -2007,33 +2053,19 @@ def fun(carry, _):
jax.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash
- def test_custom_root_scalar(self):
+ @parameterized.named_parameters(
+ {"testcase_name": "binary_search", "solve_method": binary_search},
+ {"testcase_name": "newton_raphson", "solve_method": newton_raphson},
+ )
+ def test_custom_root_scalar(self, solve_method):
def scalar_solve(f, y):
return y / f(1.0)
- def binary_search(func, x0, low=0.0, high=100.0):
- del x0 # unused
-
- def cond(state):
- low, high = state
- midpoint = 0.5 * (low + high)
- return (low < midpoint) & (midpoint < high)
-
- def body(state):
- low, high = state
- midpoint = 0.5 * (low + high)
- update_upper = func(midpoint) > 0
- low = jnp.where(update_upper, low, midpoint)
- high = jnp.where(update_upper, midpoint, high)
- return (low, high)
-
- solution, _ = lax.while_loop(cond, body, (low, high))
- return solution
-
def sqrt_cubed(x, tangent_solve=scalar_solve):
f = lambda y: y ** 2 - x ** 3
- return lax.custom_root(f, 0.0, binary_search, tangent_solve)
+ # Note: Nonzero derivative at x0 required for newton_raphson
+ return lax.custom_root(f, 1.0, solve_method, tangent_solve)
value, grad = jax.value_and_grad(sqrt_cubed)(5.0)
self.assertAllClose(value, 5 ** 1.5, check_dtypes=False, rtol=1e-6)
@@ -2044,7 +2076,11 @@ def sqrt_cubed(x, tangent_solve=scalar_solve):
inputs = jnp.array([4.0, 5.0])
results = jax.vmap(sqrt_cubed)(inputs)
- self.assertAllClose(results, inputs ** 1.5, check_dtypes=False)
+ self.assertAllClose(
+ results, inputs ** 1.5, check_dtypes=False,
+ atol={jnp.float32: 1e-3, jnp.float64: 1e-6},
+ rtol={jnp.float32: 1e-3, jnp.float64: 1e-6},
+ )
results = jax.jit(sqrt_cubed)(5.0)
self.assertAllClose(
@@ -2073,6 +2109,30 @@ def linear_solve(a, b):
expected = jnp.linalg.solve(a, b)
self.assertAllClose(expected, actual)
+ def test_custom_root_vector_nonlinear(self):
+
+ def nonlinear_func(x, y):
+ # func(x, y) == 0 if and only if x == y.
+ return (x - y) * (x**2 + y**2 + 1)
+
+ def tangent_solve(g, y):
+ return jnp.linalg.solve(
+ jax.jacobian(g)(y).reshape(-1, y.size),
+ y.ravel()
+ ).reshape(y.shape)
+
+ def nonlinear_solve(y):
+ f = lambda x: nonlinear_func(x, y)
+ x0 = -jnp.ones_like(y)
+ return lax.custom_root(f, x0, newton_raphson, tangent_solve)
+
+ y = self.rng().randn(3, 1)
+ jtu.check_grads(nonlinear_solve, (y,), order=2,
+ rtol={jnp.float32: 1e-2, jnp.float64: 1e-3})
+
+ actual = jax.jit(nonlinear_solve)(y)
+ self.assertAllClose(y, actual, rtol=1e-5, atol=1e-5)
+
def test_custom_root_with_custom_linear_solve(self):
def linear_solve(a, b):
| `lax.custom_root` fails on root finders that require gradient
The `lax.custom_root` method does not work on methods that require gradient, e.g., the Newton-Raphson method. The core of the problem seems to be overly-aggressive application of `stop_gradient` [here](https://github.com/google/jax/blob/e6950dd63ed3c054067c53bd4806dfbb85e0eef7/jax/_src/lax/control_flow.py#L2310).
While I've included a minimal example below, [this notebook](https://colab.research.google.com/drive/1e5_0gGYc-v9lKKwvUS7rqSUhA0D-OpT3) does a deeper dive into the error and demonstrates that removing `_stop_gradient_fun` applications resolves the problem.
I'd be happy to make a PR to resolve the issue, but my naΓ―ve solution of removing all calls to `stop_gradient` in `custom_root` appears... well, naΓ―ve. I'm just not sure why `stop_gradient` is required at all here, but it seems related to not wanting to trace gradients through control structures like while-loops. I suppose I'm curious why custom JVP'ing a function doesn't automatiically shortcut the gradient computation...
### Related issues
- #1448 discusses a `define_implicit_gradient` function that doesn't use `stop_gradient`. This function was never merged. Regardless, the fact that `custom_root` methods can't use gradients seems like a bug.
- Additional reading of the discussion there suggests that one of the main reasons for `stop_gradient` was the non-differentiability of control structures (`lax.while_loop` in particular). Nevertheless, my tests don't seem to trigger the issue.
## Minimal example
This example follows [the JAX unit test](https://github.com/google/jax/blob/e6950dd63ed3c054067c53bd4806dfbb85e0eef7/tests/lax_control_flow_test.py#L2010) of the `custom_root` method, with the bisection method replaced with Newton's method.
```python
import jax
import numpy as np
def newton(f, x0):
"""Newton's method for root-finding."""
initial_state = (0, x0) # (iteration, x)
def cond(state):
it, x = state
# We fix 10 iterations for simplicity, this is plenty for convergence in our tests.
return (it < 10)
def body(state):
it, x = state
fx, dfx = f(x), jax.grad(f)(x)
step = fx / dfx
new_state = it + 1, x - step
return new_state
return jax.lax.while_loop(
cond,
body,
initial_state,
)[1]
```
### Newton's method without `custom_root`
```python
initial_guess = 2.0
x_test = 5.0
def sqrt_cubed_newton(x):
implicit = lambda y: y ** 2 - x ** 3
return newton(implicit, initial_guess)
print("Without custom_root")
v = sqrt_cubed_newton(x_test)
print(f"Value = {v}")
if abs(v - x_test ** 1.5) < 1e-5:
print("Pass")
else:
print("Fail")
```
```
Without custom_root
Value=11.180339813232422
Pass
```
### With `custom_root`
```python
def sqrt_cubed_newton_custom_root(x):
def scalar_solve(g, y):
return y / g(1.0)
implicit = lambda y: y ** 2 - x ** 3
return jax.lax.custom_root(implicit, initial_guess, newton, scalar_solve)
print("With custom_root")
v = sqrt_cubed_newton_custom_root(x_test)
print(f"Value = {v}")
if abs(v - x_test ** 1.5) < 1e-5:
print("Pass")
else:
print("Fail")
```
```
With custom_root
Value = nan
Fail
```
The notebook referenced above shows how removing the call to `_stop_gradient_fun` fixes the problem.
Please:
- [x] Check for duplicate issues.
- [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this
- [x] If applicable, include full error messages/tracebacks.
| Hi @mbmccoy, thanks for the careful investigation here!
Iβm struggling to remember the exact context here, but I think we might actually be able to get away with removing the use of stop_gradient inside custom_root, because while_loop now has a JVP rule defined. This was not the case back when custom_root was written.
We also switched from writing custom_root as a primitive to writing it via custom_jvp. So that also may have obviated the need for this.
Basically, I would suggest going ahead with removing stop_gradient_fun, being sure to add something like your Newton solver as an integration test. | 2021-12-09T00:02:24 |
google/jax | 8,926 | google__jax-8926 | [
"8928"
] | 2d47d0c2e04ac49184b91e5d2d44b897d0aa12f5 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2875,8 +2875,8 @@ def nonzero(a, *, size=None, fill_value=None):
return out
@_wraps(np.flatnonzero, lax_description=_NONZERO_DOC)
-def flatnonzero(a, *, size=None):
- return nonzero(ravel(a), size=size)[0]
+def flatnonzero(a, *, size=None, fill_value=None):
+ return nonzero(ravel(a), size=size, fill_value=fill_value)[0]
def _nan_reduction(a, name, jnp_reduction, init_val, nan_if_all_nan,
@@ -5212,8 +5212,8 @@ def vander(x, N=None, increasing=False):
"""
@_wraps(np.argwhere, lax_description=_ARGWHERE_DOC)
-def argwhere(a, *, size=None):
- result = transpose(vstack(nonzero(a, size=size)))
+def argwhere(a, *, size=None, fill_value=None):
+ result = transpose(vstack(nonzero(a, size=size, fill_value=fill_value)))
if ndim(a) == 0:
return result[:0].reshape(result.shape[0], 0)
return result.reshape(result.shape[0], ndim(a))
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -998,6 +998,29 @@ def testFlatNonzero(self, shape, dtype):
jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": "_shape={}_size={}_fill_value={}".format(
+ jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
+ "shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
+ for shape in nonempty_array_shapes
+ for dtype in all_dtypes
+ for fill_value in [None, -1, 10, (-1,), (10,)]
+ for size in [1, 5, 10]))
+ def testFlatNonzeroSize(self, shape, dtype, size, fill_value):
+ rng = jtu.rand_some_zero(self.rng())
+ args_maker = lambda: [rng(shape, dtype)]
+ @jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
+ def np_fun(x):
+ result = np.flatnonzero(x)
+ if size <= len(result):
+ return result[:size]
+ else:
+ fill_val = fill_value or 0
+ return np.concatenate([result, np.full(size - len(result), fill_val, result.dtype)])
+ jnp_fun = lambda x: jnp.flatnonzero(x, size=size, fill_value=fill_value)
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
+ self._CompileAndCheck(jnp_fun, args_maker)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
@@ -1017,6 +1040,30 @@ def testArgWhere(self, shape, dtype):
jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": "_shape={}_size={}_fill_value={}".format(
+ jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
+ "shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
+ for shape in nonempty_array_shapes
+ for dtype in all_dtypes
+ for fill_value in [None, -1, shape or (1,)]
+ for size in [1, 5, 10]))
+ def testArgWhereSize(self, shape, dtype, size, fill_value):
+ rng = jtu.rand_some_zero(self.rng())
+ args_maker = lambda: [rng(shape, dtype)]
+ @jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
+ def np_fun(x):
+ result = np.argwhere(x)
+ if size <= len(result):
+ return result[:size]
+ else:
+ fillvals = fill_value if np.ndim(fill_value) else result.shape[-1] * [fill_value or 0]
+ return np.empty((size, 0), dtype=int) if np.ndim(x) == 0 else np.stack([np.concatenate([arg, np.full(size - len(arg), fval, arg.dtype)])
+ for fval, arg in safe_zip(fillvals, result.T)]).T
+ jnp_fun = lambda x: jnp.argwhere(x, size=size, fill_value=fill_value)
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
+ self._CompileAndCheck(jnp_fun, args_maker)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
| Jax options not threaded through to base implementation in jnp
**Bug**
`jnp.flatnonzero` and `jnp.argwhere` mention `fill_value` option in documentation (see image below), but calling with `fill_value` leads to 'unexpected keyword argument' error.
In the below code, the last two lines will raise errors due to unexpected argument `fill_value`
```python
import jax.numpy as jnp
d = jnp.array([True,False,True])
jnp.nonzero(d, size=20, fill_value=100)
jnp.flatnonzero(d, size=20, fill_value=100)
jnp.argwhere(d, size=20, fill_value=100)
```
**Why?**
The implementation of `jnp.flatnonzero` and `jnp.argwhere` link back to `jnp.nonzero` but the `fill_value` argument wasn't threaded through (just the `size` argument).
The threading is there in `jnp.where` and other cases, just those two have been missed as far as I've noticed.
**Fix**
Pull request:
#8926
just threads the argument
Images



| 2021-12-13T15:27:20 |
|
google/jax | 8,940 | google__jax-8940 | [
"8688"
] | 0404dbdd29b4d405427f4e6f2eed0e03a0224782 | diff --git a/jax/_src/lax/parallel.py b/jax/_src/lax/parallel.py
--- a/jax/_src/lax/parallel.py
+++ b/jax/_src/lax/parallel.py
@@ -793,7 +793,8 @@ def _ppermute_batcher(axis_size, frame_name, _, vals_in, dims_in, axis_name, per
raise NotImplementedError("ppermute batcher only supports a single axis")
assert axis_name[0] == frame_name, "ppermute batcher called with a wrong axis!"
assert len(perm) == axis_size, "Permutation doesn't match the axis size!"
- assert d is not batching.not_mapped
+ if d is batching.not_mapped:
+ return v, d
perm_indices = np.zeros(axis_size, dtype=int)
for src, dst in perm:
perm_indices[dst] = src
| diff --git a/tests/batching_test.py b/tests/batching_test.py
--- a/tests/batching_test.py
+++ b/tests/batching_test.py
@@ -1267,6 +1267,19 @@ def f(x):
self.assertEqual(f(jnp.ones(3)).shape, (3,))
self.assertEqual(jax.vmap(f)(jnp.ones((2, 3))).shape, (2, 3))
+ def testPpermuteBatcherTrivial(self):
+ # https://github.com/google/jax/issues/8688
+ def ppermute(input):
+ return jax.lax.ppermute(input, axis_name="i", perm=[[0, 1], [1, 0]])
+
+ grad_fn = jax.grad(ppermute)
+
+ vmapped_gradients_fn = jax.vmap(grad_fn, axis_name="i")
+
+ vector = jax.numpy.array([1., 2.])
+ ans = vmapped_gradients_fn(vector) # doesn't crash
+ self.assertAllClose(ans, jnp.ones(2), check_dtypes=False)
+
Array = Any
ArrayElt = Any
| Unexpected assertion error for gradient of ppermute.
The following code produces an assertion error:
```
import jax
def ppermute(input):
return jax.lax.ppermute(input, axis_name="i", perm=[[0, 1], [1, 0]])
grad_fn = jax.grad(ppermute)
vmapped_gradients_fn = jax.vmap(grad_fn, axis_name="i")
vector = jax.numpy.array([1., 2.])
grads = vmapped_gradients_fn(vector)
```
```
AssertionError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/jax/_src/lax/parallel.py in _ppermute_batcher(axis_size, frame_name, _, vals_in, dims_in, axis_name, perm)
791 assert axis_name[0] == frame_name, "ppermute batcher called with a wrong axis!"
792 assert len(perm) == axis_size, "Permutation doesn't match the axis size!"
--> 793 assert d is not batching.not_mapped
794 perm_indices = np.zeros(axis_size, dtype=int)
795 for src, dst in perm:
```
Interestingly, the assertion error goes away, if I just inject a constant vector of ones:
```
import jax
def ppermute(input, one):
return one * jax.lax.ppermute(input, axis_name="i", perm=[[0, 1], [1, 0]])
grad_fn = jax.grad(ppermute)
vmapped_gradients_fn = jax.vmap(grad_fn, axis_name="i")
vector = jax.numpy.array([1., 2.])
ones = jax.numpy.ones([2])
grads = vmapped_gradients_fn(vector, ones)
print(grads) # [1. 1.]
```
This makes me believe the assert triggering above may indicate a bug.
| 2021-12-14T18:42:39 |
|
google/jax | 8,941 | google__jax-8941 | [
"8536"
] | 0404dbdd29b4d405427f4e6f2eed0e03a0224782 | diff --git a/jax/__init__.py b/jax/__init__.py
--- a/jax/__init__.py
+++ b/jax/__init__.py
@@ -52,6 +52,7 @@
)
from jax._src.api import (
ad, # TODO(phawkins): update users to avoid this.
+ block_until_ready,
checkpoint as checkpoint,
checkpoint_policies as checkpoint_policies,
closure_convert as closure_convert,
diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -3138,3 +3138,22 @@ def invertible(fun: Callable) -> Callable:
fun: The function assumed to be invertible.
"""
return iad.invertible(fun)
+
+
+def block_until_ready(x):
+ """
+ Tries to call a ``block_until_ready`` method on pytree leaves.
+
+ Args:
+ x: a pytree, usually with at least some JAX array instances at its leaves.
+
+ Returns:
+ A pytree with the same structure and values of the input, where the values
+ of all JAX array leaves are ready.
+ """
+ def try_to_block(x):
+ try:
+ return x.block_until_ready()
+ except AttributeError:
+ return x
+ return jax.tree_util.tree_map(try_to_block, x)
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -1517,6 +1517,13 @@ def test_devicearray_block_until_ready(self):
# Tests mostly that block_until_ready() does not produce an error.
self.assertTrue(y is x)
+ def test_block_until_ready_function(self):
+ # Just tests that we don't error...
+ pytree = (device_put(1.), np.ones(3))
+ pytree = jax.block_until_ready(pytree)
+ self.assertAllClose(pytree[0], jnp.array(1.), check_dtypes=False)
+ self.assertAllClose(pytree[1], np.ones(3), check_dtypes=False)
+
def test_devicearray_weakref_friendly(self):
x = device_put(1.)
y = weakref.ref(x)
| jax.block_until_ready() for blocking until pytrees leaves are ready
It is sometimes useful (e.g., in benchmarking) to disable JAX's asynchronous dispatch: https://jax.readthedocs.io/en/latest/async_dispatch.html
The easy way to compute an array is to call `.block_until_ready()`, but this doesn't work on pytrees.
I propose adding a function `jax.block_until_ready()` for mapping block_until_ready() over the leaves of a pytree:
```python
def block_until_ready(pytree):
return tree_util.tree_map(lambda x: x.block_until_ready(), pytree)
```
I've written versions of this helper function quite a few times and it would be nice not to have to do so again.
This version requires that all leaves are JAX arrays. Alternatively, we could skip calling `.block_until_ready()` on non-JAX arrays, which don't have the method defined. This might be more generally useful.
| @mattjj or perhaps we should have a typeclass for things you can block on? π | 2021-12-14T19:02:54 |
google/jax | 8,946 | google__jax-8946 | [
"8171"
] | d6223cd4f843accc4cf9fddf1f5f49aec87c8a2f | diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py
--- a/jax/interpreters/partial_eval.py
+++ b/jax/interpreters/partial_eval.py
@@ -1408,8 +1408,9 @@ def process_custom_jvp_call(self, prim, fun, jvp, tracers):
with core.new_sublevel():
fun_jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(fun, self.main, in_avals)
closed_fun_jaxpr = core.ClosedJaxpr(convert_constvars_jaxpr(fun_jaxpr), ())
+ main_ = ref(self.main)
jvp_jaxpr_thunk = _memoize(
- lambda: trace_to_subjaxpr_dynamic(jvp, self.main, 2 * in_avals)[::2])
+ lambda: trace_to_subjaxpr_dynamic(jvp, main_(), 2 * in_avals)[::2])
out_tracers = [DynamicJaxprTracer(self, a) for a in out_avals]
invars = map(self.getvar, tracers)
constvars = map(self.getvar, map(self.instantiate_const, consts))
@@ -1430,8 +1431,9 @@ def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
with core.new_sublevel():
fun_jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(fun, self.main, in_avals)
closed_fun_jaxpr = core.ClosedJaxpr(convert_constvars_jaxpr(fun_jaxpr), ())
+ main_ = ref(self.main)
fwd_jaxpr_thunk = _memoize(
- lambda: trace_to_subjaxpr_dynamic(fwd, self.main, in_avals)[::2])
+ lambda: trace_to_subjaxpr_dynamic(fwd, main_(), in_avals)[::2])
out_tracers = [DynamicJaxprTracer(self, a) for a in out_avals]
invars = map(self.getvar, tracers)
constvars = map(self.getvar, map(self.instantiate_const, consts))
@@ -1449,6 +1451,9 @@ def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable
def _memoize(thunk):
+ if config.jax_check_tracer_leaks:
+ return thunk
+
cell = []
saved_state = core.thread_local_state.trace_state.copy()
def memoized():
| diff --git a/tests/nn_test.py b/tests/nn_test.py
--- a/tests/nn_test.py
+++ b/tests/nn_test.py
@@ -176,6 +176,22 @@ def testOneHotAxis(self):
def testTanhExists(self):
nn.tanh # doesn't crash
+ def testCustomJVPLeak(self):
+ # https://github.com/google/jax/issues/8171
+ @jax.jit
+ def fwd():
+ a = jnp.array(1.)
+
+ def f(hx, _):
+ hx = jax.nn.sigmoid(hx + a)
+ return hx, None
+
+ hx = jnp.array(0.)
+ jax.lax.scan(f, hx, None, length=2)
+
+ with jax.checking_leaks():
+ fwd() # doesn't crash
+
InitializerRecord = collections.namedtuple(
"InitializerRecord",
["name", "initializer", "shapes", "dtypes"])
| `jax.nn.sigmoid` custom jvp leak
Hello,
I got a tracing leak related to `jax.nn.sigmoid` custom jvp. I think this is related to https://github.com/google/jax/issues/5636.
I'm using JAX 0.2.21 on CPU.
```python
import jax
import jax.numpy as jnp
print(jax.__version__)
# 0.2.21
def _sigmoid(x: jnp.ndarray):
return 1.0 / (1. + jnp.exp(-x))
@jax.jit
def fwd():
a = jnp.array(1.)
def f(hx, _):
hx = jax.nn.sigmoid(hx + a)
# hx = _sigmoid(hx + a)
return hx, None
hx = jnp.array(0.)
jax.lax.scan(f, hx, None, length=2)
with jax.checking_leaks():
fwd()
# [...]
# Exception: Leaked sublevel 1. Leaked tracer(s): [Traced<ShapedArray(float32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)>].
```
When I replace `jax.nn.sigmoid` by `_sigmoid`, no leak detected.
The full stack trace:
```
---------------------------------------------------------------------------
UnfilteredStackTrace Traceback (most recent call last)
<ipython-input-22-61b72614d9a4> in <module>
16 with jax.checking_leaks():
---> 17 fwd()
[... skipping hidden 1 frame]
~/.local/lib/python3.9/site-packages/jax/_src/api.py in cache_miss(*args, **kwargs)
410 flat_fun, out_tree = flatten_fun(f, in_tree)
--> 411 out_flat = xla.xla_call(
412 flat_fun, *args_flat,
~/.local/lib/python3.9/site-packages/jax/core.py in bind(self, fun, *args, **params)
1617 def bind(self, fun, *args, **params):
-> 1618 return call_bind(self, fun, *args, **params)
1619
~/.local/lib/python3.9/site-packages/jax/core.py in call_bind(primitive, fun, *args, **params)
1608 tracers = map(top_trace.full_raise, args)
-> 1609 outs = primitive.process(top_trace, fun, tracers, params)
1610 return map(full_lower, apply_todos(env_trace_todo(), outs))
~/.local/lib/python3.9/site-packages/jax/core.py in process(self, trace, fun, tracers, params)
1620 def process(self, trace, fun, tracers, params):
-> 1621 return trace.process_call(self, fun, tracers, params)
1622
~/.local/lib/python3.9/site-packages/jax/core.py in process_call(self, primitive, f, tracers, params)
614 def process_call(self, primitive, f, tracers, params):
--> 615 return primitive.impl(f, *tracers, **params)
616 process_map = process_call
~/.local/lib/python3.9/site-packages/jax/interpreters/xla.py in _xla_call_impl(***failed resolving arguments***)
621 del inline # Only used at tracing time
--> 622 compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
623 *unsafe_map(arg_spec, args))
~/.local/lib/python3.9/site-packages/jax/linear_util.py in memoized_fun(fun, *args)
261 else:
--> 262 ans = call(fun, *args)
263 cache[key] = (ans, fun.stores)
~/.local/lib/python3.9/site-packages/jax/interpreters/xla.py in _xla_callable(fun, device, backend, name, donated_invars, *arg_specs)
693 def _xla_callable(fun: lu.WrappedFun, device, backend, name, donated_invars, *arg_specs):
--> 694 return lower_xla_callable(fun, device, backend, name, donated_invars, *arg_specs).compile().unsafe_call
695
~/.local/lib/python3.9/site-packages/jax/interpreters/xla.py in lower_xla_callable(fun, device, backend, name, donated_invars, *arg_specs)
701 abstract_args, arg_devices = unzip2(arg_specs)
--> 702 jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(
703 fun, abstract_args, pe.debug_info_final(fun, "jit"))
~/.local/lib/python3.9/site-packages/jax/interpreters/partial_eval.py in trace_to_jaxpr_final(fun, in_avals, debug_info)
1521 with core.new_sublevel():
-> 1522 jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(fun, main, in_avals)
1523 del fun, main
/usr/lib/python3.9/contextlib.py in __exit__(self, type, value, traceback)
123 try:
--> 124 next(self.gen)
125 except StopIteration:
~/.local/lib/python3.9/site-packages/jax/core.py in new_sublevel()
842 if leaked_tracers:
--> 843 raise Exception(f'Leaked sublevel {t()}. Leaked tracer(s): {leaked_tracers}.')
844
UnfilteredStackTrace: Exception: Leaked sublevel 1. Leaked tracer(s): [Traced<ShapedArray(float32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)>].
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Exception Traceback (most recent call last)
<ipython-input-22-61b72614d9a4> in <module>
15
16 with jax.checking_leaks():
---> 17 fwd()
/usr/lib/python3.9/contextlib.py in __exit__(self, type, value, traceback)
122 if type is None:
123 try:
--> 124 next(self.gen)
125 except StopIteration:
126 return False
Exception: Leaked sublevel 1. Leaked tracer(s): [Traced<ShapedArray(float32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)>].
```
| 2021-12-14T20:59:24 |
|
google/jax | 8,947 | google__jax-8947 | [
"8910"
] | 17b4aa9ad0a72eabcfbdeb4956d721ab6e19430d | diff --git a/jax/interpreters/partial_eval.py b/jax/interpreters/partial_eval.py
--- a/jax/interpreters/partial_eval.py
+++ b/jax/interpreters/partial_eval.py
@@ -1159,11 +1159,13 @@ def _origin_msg(self):
msts = [" operation "
f"{core.pp_eqn(eqn, core.JaxprPpContext(), print_shapes=True)}\n"
f" from line {source_info_util.summarize(eqn.source_info)}"
- for eqn in progenitor_eqns]
+ for eqn in progenitor_eqns[:5]] # show at most 5
origin = (f"While tracing the function {dbg.func_src_info} "
f"for {dbg.traced_for}, "
"this value became a tracer due to JAX operations on these lines:"
"\n\n" + "\n\n".join(msts))
+ if len(progenitor_eqns) > 5:
+ origin += "\n\n(Additional originating lines are not shown.)"
else:
origin = (f"The error occured while tracing the function {dbg.func_src_info} "
f"for {dbg.traced_for}.")
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -2765,6 +2765,16 @@ def f():
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
f()
+ def test_concrete_error_because_const_2(self):
+ @jax.jit
+ def f():
+ result = sum(jnp.add(1, 1) for _ in range(6))
+ assert result > 0
+
+ msg = "Additional originating lines are not shown."
+ with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
+ f()
+
def test_xla_computation_zeros_doesnt_device_put(self):
with jtu.count_device_put() as count:
api.xla_computation(lambda: jnp.zeros(3))()
| set a max on the number of lines of tracer provenance printed
A user reported getting a ton of lines printed under "this value became a tracer due to JAX operations on these lines". That makes sense, since there's no limit on the amount of provenance information we attempt to print! Let's set a limit...
| 2021-12-14T21:29:47 |
|
google/jax | 8,948 | google__jax-8948 | [
"8945"
] | c060f4614fe5e2f7dec7cbbd3cf209c80fac50ec | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -475,25 +475,6 @@ def _jnp_dtype(obj, align=False, copy=False):
64: np.int64,
}
-def _np_array(obj, dtype=None, **kwargs):
- """Return a properly-typed numpy array.
-
- `_np_array(obj, **kwds)` is equivalent to `np.array(obj, **kwds)`, with the
- exception that when obj.dtype is not defined and dtype is not specified, it
- uses Jax's default dtypes.
- """
- arr = np.array(obj, dtype=dtype, **kwargs)
- obj_dtype = getattr(obj, 'dtype', None)
- arr_dtype = np.dtype(arr.dtype).type
- if dtype is None and obj_dtype is None:
- if dtypes.is_python_scalar(obj):
- arr = arr.astype(result_type(obj))
- elif arr_dtype in _DEFAULT_TYPEMAP:
- arr = arr.astype(_DEFAULT_TYPEMAP[arr_dtype])
- return arr
-
-_np_asarray = partial(_np_array, copy=False)
-
def _promote_shapes(fun_name, *args):
"""Prepend implicit leading singleton dimensions for Numpy broadcasting."""
if len(args) < 2:
@@ -3574,7 +3555,6 @@ def atleast_3d(*arys):
https://jax.readthedocs.io/en/latest/faq.html).
"""
-
@_wraps(np.array, lax_description=_ARRAY_DOC)
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
@@ -3583,28 +3563,45 @@ def array(object, dtype=None, copy=True, order="K", ndmin=0):
# check if the given dtype is compatible with JAX
lax._check_user_dtype_supported(dtype, "array")
+ # Here we make a judgment call: we only return a weakly-typed array when the
+ # input object itself is weakly typed. That ensures asarray(x) is a no-op whenever
+ # x is weak, but avoids introducing weak types with something like array([1, 2, 3])
weak_type = dtype is None and dtypes.is_weakly_typed(object)
- dtype = dtype and dtypes.canonicalize_dtype(dtype)
- if _can_call_numpy_array(object):
- if dtypes.is_python_scalar(object):
- object = dtypes.coerce_to_array(object, dtype)
- # TODO(jakevdp): falling back to numpy here fails to overflow for lists containing
- # large integers; see discussion in https://github.com/google/jax/pull/6047.
- object = _np_array(object, dtype=dtype, ndmin=ndmin, copy=False)
+ # For Python scalar literals, call coerce_to_array to catch any overflow errors.
+ # We don't use dtypes.is_python_scalar because we don't want this triggering for
+ # traced values. We do this here because it matters whether or not dtype is None.
+ # We don't assign the result because we want the raw object to be used for type
+ # inference below.
+ if isinstance(object, (bool, int, float, complex)):
+ _ = dtypes.coerce_to_array(object, dtype)
+
+ leaves = tree_leaves(object)
+ if dtype is None:
+ # Use lattice_result_type rather than result_type to avoid canonicalization.
+ # Otherwise, weakly-typed inputs would have their dtypes canonicalized.
+ try:
+ dtype = dtypes._lattice_result_type(*leaves)[0] if leaves else dtypes.float_
+ except TypeError:
+ # This happens if, e.g. one of the entries is a memoryview object.
+ # This is rare, so we only handle it if the normal path fails.
+ leaves = [_convert_to_array_if_dtype_fails(leaf) for leaf in leaves]
+ dtype = dtypes._lattice_result_type(*leaves)[0]
- # call _np_array a second time with canonicalized dtype
- dtype = dtypes.canonicalize_dtype(object.dtype)
- object = _np_array(object, dtype=dtype, copy=False)
+ if not weak_type:
+ dtype = dtypes.canonicalize_dtype(dtype)
- assert type(object) not in dtypes.python_scalar_dtypes
+ # We can't use the ndarray class because we need to handle internal buffers
+ # (See https://github.com/google/jax/issues/8950)
+ ndarray_types = (device_array.DeviceArray, core.Tracer)
- if type(object) is np.ndarray:
- _inferred_dtype = object.dtype and dtypes.canonicalize_dtype(object.dtype)
- lax._check_user_dtype_supported(_inferred_dtype, "array")
- out = _np_array(object, copy=copy, dtype=dtype)
- if dtype: assert _dtype(out) == dtype
- elif isinstance(object, (device_array.DeviceArray, core.Tracer)):
+ if not _any(isinstance(leaf, ndarray_types) for leaf in leaves):
+ # TODO(jakevdp): falling back to numpy here fails to overflow for lists containing
+ # large integers; see discussion in https://github.com/google/jax/pull/6047.
+ # More correct would be to call coerce_to_array on each leaf, but this may have
+ # performance implications.
+ out = np.array(object, dtype=dtype, ndmin=ndmin, copy=False)
+ elif isinstance(object, ndarray_types):
if object.aval is None:
# object is a raw buffer; convert to device array on its current device.
aval = ShapedArray(object.xla_shape().dimensions(), object.dtype,
@@ -3615,34 +3612,30 @@ def array(object, dtype=None, copy=True, order="K", ndmin=0):
if object:
out = stack([asarray(elt, dtype=dtype) for elt in object])
else:
- out = _np_array([], dtype=dtype)
+ out = np.array([], dtype=dtype)
else:
try:
view = memoryview(object)
except TypeError:
pass # `object` does not support the buffer interface.
else:
- return array(_np_asarray(view), dtype, copy, ndmin=ndmin)
+ return array(np.asarray(view), dtype, copy, ndmin=ndmin)
raise TypeError("Unexpected input type for array: {}".format(type(object)))
- if weak_type:
- # Here we make a judgment call: we only return a weakly-typed array when obj
- # itself is weakly typed. That ensures array(x) is a no-op whenever x is weak,
- # but avoids introducing weak types with something like array([1, 2, 3])
- out = lax._convert_element_type(out, dtype, weak_type=True)
- else:
- # If dtype is not specified, we use result_type(out). This ensures JIT invariance
- # with, e.g. lists of scalars.
- out = lax._convert_element_type(out, dtype or result_type(out))
-
+ out = lax._convert_element_type(out, dtype, weak_type=weak_type)
if ndmin > ndim(out):
- out = lax.broadcast(out, (1,) * (ndmin - ndim(out)))
+ out = lax.expand_dims(out, range(ndmin - ndim(out)))
return out
-def _can_call_numpy_array(x):
- return _all(not isinstance(l, (core.Tracer, device_array.DeviceArray))
- for l in tree_leaves(x))
+
+def _convert_to_array_if_dtype_fails(x):
+ try:
+ dtypes.dtype(x)
+ except TypeError:
+ return np.asarray(x)
+ else:
+ return x
@_wraps(np.asarray, lax_description=_ARRAY_DOC)
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -3667,6 +3667,11 @@ def _check(obj, out_dtype, weak_type):
_check([jnp.float64(1)], np.float64, False)
_check([jnp.complex128(1)], np.complex128, False)
+ # Mixed inputs use JAX-style promotion.
+ # (regression test for https://github.com/google/jax/issues/8945)
+ _check([0, np.int16(1)], np.int16, False)
+ _check([0.0, np.float16(1)], np.float16, False)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": f"_dtype={np.dtype(dtype)}", "dtype": dtype}
for dtype in all_dtypes))
@@ -3753,12 +3758,12 @@ def __array__(self, dtype=None):
def testArrayMethod(self):
class arraylike(object):
- dtype = np.float32
+ dtype = np.dtype('float32')
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
- assert ans == 3.
+ self.assertEqual(ans, 3.)
def testMemoryView(self):
self.assertAllClose(
| jnp.array breaks JIT invariance for some list inputs
Minimal repro:
```python
import numpy as np
import jax.numpy as jnp
from jax import jit
x = [0, np.int16(1)]
print(jnp.array(x).dtype)
# int32
print(jit(jnp.array)(x).dtype)
# int16
```
The reason for this is that, outside JIT, `jnp.array(x)` falls back to `np.array(x)`, presumably for performance reasons, which results in numpy-style type promotion being applied to the list contents in place of JAX-style type promotion: https://github.com/google/jax/blob/2f4bd118839b29bdf9aa84f8c5b351ce6d7bff71/jax/_src/numpy/lax_numpy.py#L3589-L3594
Within JIT, however, the list contents are traced and `_can_call_numpy_array` evaluates to False, meaning that the final dtype is determined by `jnp.stack`, which uses JAX type promotion to decide on the output type: https://github.com/google/jax/blob/2f4bd118839b29bdf9aa84f8c5b351ce6d7bff71/jax/_src/numpy/lax_numpy.py#L3614-L3616
I think the best fix here would be to use JAX-style type promotion in both cases.
| 2021-12-14T21:35:46 |
|
google/jax | 8,951 | google__jax-8951 | [
"7809"
] | d6223cd4f843accc4cf9fddf1f5f49aec87c8a2f | diff --git a/jax/_src/flatten_util.py b/jax/_src/flatten_util.py
--- a/jax/_src/flatten_util.py
+++ b/jax/_src/flatten_util.py
@@ -57,7 +57,23 @@ def _ravel_list(lst):
sizes, shapes = unzip2((jnp.size(x), jnp.shape(x)) for x in lst)
indices = np.cumsum(sizes)
+ if all(dt == to_dtype for dt in from_dtypes):
+ # Skip any dtype conversion, resulting in a dtype-polymorphic `unravel`.
+ # See https://github.com/google/jax/issues/7809.
+ del from_dtypes, to_dtype
+ def unravel(arr):
+ chunks = jnp.split(arr, indices[:-1])
+ return [chunk.reshape(shape) for chunk, shape in zip(chunks, shapes)]
+ raveled = jnp.concatenate([jnp.ravel(e) for e in lst])
+ return raveled, unravel
+
+ # When there is more than one distinct input dtype, we perform type
+ # conversions and produce a dtype-specific unravel function.
def unravel(arr):
+ arr_dtype = dtypes.dtype(arr)
+ if arr_dtype != to_dtype:
+ raise TypeError(f"unravel function given array of dtype {arr_dtype}, "
+ f"but expected dtype {to_dtype}")
chunks = jnp.split(arr, indices[:-1])
with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore complex-to-real cast warning
| diff --git a/tests/tree_util_test.py b/tests/tree_util_test.py
--- a/tests/tree_util_test.py
+++ b/tests/tree_util_test.py
@@ -379,6 +379,23 @@ def testEmpty(self):
tree_ = unravel(raveled)
self.assertAllClose(tree, tree_, atol=0., rtol=0.)
+ def testDtypePolymorphicUnravel(self):
+ # https://github.com/google/jax/issues/7809
+ x = jnp.arange(10, dtype=jnp.float32)
+ x_flat, unravel = flatten_util.ravel_pytree(x)
+ y = x_flat < 5.3
+ x_ = unravel(y)
+ self.assertEqual(x_.dtype, y.dtype)
+
+ def testDtypeMonomorphicUnravel(self):
+ # https://github.com/google/jax/issues/7809
+ x1 = jnp.arange(10, dtype=jnp.float32)
+ x2 = jnp.arange(10, dtype=jnp.int32)
+ x_flat, unravel = flatten_util.ravel_pytree((x1, x2))
+ y = x_flat < 5.3
+ with self.assertRaisesRegex(TypeError, 'but expected dtype'):
+ _ = unravel(y)
+
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| Unraveling an array with dtype bool results in pytree arrays with dtype float32
Please:
- [x] Check for duplicate issues.
- [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this:
```python
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
x = jnp.arange(10, dtype=jnp.float32)
x_flat, unravel = ravel_pytree(x)
y = x_flat < 5.3
print(y.dtype) # => <dtype: 'bool'>
print(unravel(y).dtype) # => <dtype: 'float32'>
```
- [x] If applicable, include full error messages/tracebacks.
n/a
I'm running Python 3.9 with jax 0.2.19 and jaxlib 0.1.70.
| Hi - thanks for the report. I think this may be intended, given the lines here: https://github.com/google/jax/blob/c69eedd6f8bd360f23b8793c074648addd1e23d7/jax/flatten_util.py#L55-L56
The output dtype is explicitly set to the type promotion output of the input dtypes.
Maybe @mattjj has some context on this?
I was worried about that... My hope was that ravel/unravel would only transform array shapes and not dtypes, but perhaps there a good reason it should coerce dtypes as well?
Sorry @samuela, I let this one slip through the cracks somehow!
The generated `unravel` function just isn't dtype-polymorphic; it produces an output with type (i.e. pytree structure, array shapes, and array dtypes) equal to that of the provided input. Perhaps it should've even raised an error when given a bool-dtype input...
One reason for that is to handle the case when there are mixed dtypes in the input pytree. Since the raveled array needs to have a uniform dtype, we promote the dtypes of the inputs, and when unraveling we cast back to the input's dtypes. But that means the output dtype doesn't determine the input dtypes in general, and hence it's not clear how to make an `unravel` which is polymorphic in the way you expected.
I'm not sure how to reconcile these two behaviors (supporting multiple distinct input dtypes, and producing a dtype-polymorphic `unravel`) in one function, without making the behavior complicated. We could just special case it to be "if all the input dtypes are equal, then produce a polymorphic `unravel`". But that might be making a simple function kind of unpredictable.
Since the function is small, would it make sense just to have your own implementation? (Or, in the intervening time, did you find other solutions?)
Here's an implementation which has the behavior you expected:
```python
import jax
from jax.tree_util import tree_flatten, tree_unflatten
from jax.util import unzip2
import jax.numpy as jnp
import numpy as np
def ravel_pytree(pytree):
leaves, treedef = tree_flatten(pytree)
flat, unravel_list = _ravel_list(leaves)
unravel_pytree = lambda flat: tree_unflatten(treedef, unravel_list(flat))
return flat, unravel_pytree
def _ravel_list(lst):
if not lst: return jnp.array([], jnp.float32), lambda _: []
from_dtypes = [jax.dtypes.result_type(l) for l in lst]
to_dtype = jax.dtypes.result_type(*from_dtypes)
if not all(from_dtype == to_dtype for from_dtype in from_dtypes):
raise Exception
del from_dtypes, to_dtype
sizes, shapes = unzip2((jnp.size(x), jnp.shape(x)) for x in lst)
indices = np.cumsum(sizes)
def unravel(arr):
chunks = jnp.split(arr, indices[:-1])
return [chunk.reshape(shape) for chunk, shape in zip(chunks, shapes)]
raveled = jnp.concatenate([jnp.ravel(e) for e in lst])
return raveled, unravel
x = jnp.arange(10, dtype=jnp.float32)
x_flat, unravel = ravel_pytree(x)
y = x_flat < 5.3
print(y.dtype) # => <dtype: 'bool'>
print(unravel(y).dtype) # => <dtype: 'bool'>
```
Since I'm in an issue-closing mood, and since this is an old one, I'm going to somewhat preemptively close this issue. Let us know if we should reopen and continue the discussion! | 2021-12-14T22:40:52 |
google/jax | 8,963 | google__jax-8963 | [
"8962"
] | 0d71bff7b974bc037dc873beb4bfd5be53350de9 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -7052,6 +7052,7 @@ def _set_shaped_array_attributes(shaped_array):
setattr(shaped_array, "split", core.aval_method(split))
setattr(shaped_array, "compress", _compress_method)
setattr(shaped_array, "at", core.aval_property(_IndexUpdateHelper))
+ setattr(shaped_array, "item", core.aval_method(device_array.DeviceArray.item))
_set_shaped_array_attributes(ShapedArray)
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -825,6 +825,23 @@ def jit(self):
class APITest(jtu.JaxTestCase):
+ def test_grad_item(self):
+ def f(x):
+ if x.astype(bool).item():
+ return x ** 2
+ else:
+ return x
+ out = jax.grad(f)(2.0)
+ self.assertEqual(out, 4)
+
+ def test_jit_item(self):
+ def f(x):
+ return x.item()
+ x = jnp.array(1.0)
+ self.assertEqual(f(x), x)
+ with self.assertRaisesRegex(core.ConcretizationTypeError, "Abstract tracer value"):
+ jax.jit(f)(x)
+
def test_grad_bad_input(self):
def f(x):
return x
| array.item() not defined on ShapedArray (defined on jnp.array)
This also does not work under `jax.grad` and other transformations.
It should be very easy to fix.
```python
import jax
def test(x):
return x.item()
test(jax.numpy.ones(1))
# 1.0
>>> jax.jit(test)(jax.numpy.ones(1))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/traceback_util.py", line 162, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/api.py", line 424, in cache_miss
out_flat = xla.xla_call(
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/core.py", line 1661, in bind
return call_bind(self, fun, *args, **params)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/core.py", line 1652, in call_bind
outs = primitive.process(top_trace, fun, tracers, params)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/core.py", line 1664, in process
return trace.process_call(self, fun, tracers, params)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/core.py", line 633, in process_call
return primitive.impl(f, *tracers, **params)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/dispatch.py", line 128, in _xla_call_impl
compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/linear_util.py", line 263, in memoized_fun
ans = call(fun, *args)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/dispatch.py", line 155, in _xla_callable_uncached
return lower_xla_callable(fun, device, backend, name, donated_invars,
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/dispatch.py", line 169, in lower_xla_callable
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/interpreters/partial_eval.py", line 1566, in trace_to_jaxpr_final
jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(fun, main, in_avals)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/interpreters/partial_eval.py", line 1543, in trace_to_subjaxpr_dynamic
ans = fun.call_wrapped(*in_tracers)
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/linear_util.py", line 166, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "<stdin>", line 2, in test
File "/home/filippovicentini/Documents/pythonenvs/netket_env/lib64/python3.8/site-packages/jax/core.py", line 574, in __getattr__
attr = getattr(self.aval, name)
jax._src.traceback_util.UnfilteredStackTrace: AttributeError: 'ShapedArray' object has no attribute 'item'
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 2, in test
AttributeError: 'ShapedArray' object has no attribute 'item'
```
| This seems reasonable to me β `item()` returns a Python scalar containing the value in the array. The values within traced arrays are abstract, and thus the value cannot be accessed. Maybe a more explicit error would be clearer?
(Edit: in the case of concrete tracers, perhaps we should return the value)
Ah!
I just wanted to get a scalar `()-` shaped numpy array and this looked like an easy way to do that. I can of course just reshape.
Ok, this is a bug of my brain, not of jax.
Sorry. | 2021-12-15T18:15:14 |
google/jax | 9,061 | google__jax-9061 | [
"9050"
] | ae392e23b7249799d76a4747fc8ba9470cceb31d | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6090,7 +6090,7 @@ def _expand_bool_indices(idx, shape):
else:
out.append(i)
if i is Ellipsis:
- ellipsis_offset = _max(0, len(shape) - total_dims - 1)
+ ellipsis_offset = len(shape) - total_dims - 1
return tuple(out)
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -757,6 +757,21 @@ def testBoolean1DIndexingWithEllipsis(self):
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
+ def testBoolean1DIndexingWithEllipsis2(self):
+ # Regression test for https://github.com/google/jax/issues/9050
+ x = np.arange(3)
+ idx = (..., np.array([True, False, True]))
+ ans = jnp.array(x)[idx]
+ expected = x[idx]
+ self.assertAllClose(ans, expected, check_dtypes=False)
+
+ def testBoolean1DIndexingWithEllipsis3(self):
+ x = np.arange(6).reshape(2, 3)
+ idx = (0, ..., np.array([True, False, True]))
+ ans = jnp.array(x)[idx]
+ expected = x[idx]
+ self.assertAllClose(ans, expected, check_dtypes=False)
+
def testBoolean2DIndexingWithEllipsis(self):
x = np.arange(24).reshape(4, 3, 2)
idx = (..., np.array([[True, False], [True, False], [False, False]]))
| Boolean index with ellipsis
Boolean indexing the 'last' index of a 1D array using the ellipsis notation does currently no longer work in the version of JAX I'm using (jax==0.2.26, jaxlib==0.1.75). This is not the intended behavior as the ellipsis should generalize to 0 extra dimensions in front as well (check by for example swapping `jnp` with `np` in the example below).
Here's a minimal example:
```python
arr = jnp.array([1, 2, 3])
mask = jnp.array([True, False, True])
arr[..., mask]
```
This is the full traceback:
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
/tmp/ipykernel_36135/3397629872.py in <module>
1 arr = jnp.array([1, 2, 3])
2 mask = jnp.array([True, False, True])
----> 3 arr[..., mask]
~/.anaconda/envs/sax/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in _rewriting_take(arr, idx, indices_are_sorted, unique_indices, mode, fill_value)
5701 # followed by an optional reverse and broadcast_in_dim.
5702 arr = asarray(arr)
-> 5703 treedef, static_idx, dynamic_idx = _split_index_for_jit(idx, arr.shape)
5704 return _gather(arr, treedef, static_idx, dynamic_idx, indices_are_sorted,
5705 unique_indices, mode, fill_value)
~/.anaconda/envs/sax/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in _split_index_for_jit(idx, shape)
5780 # Expand any (concrete) boolean indices. We can then use advanced integer
5781 # indexing logic to handle them.
-> 5782 idx = _expand_bool_indices(idx, shape)
5783
5784 leaves, treedef = tree_flatten(idx)
~/.anaconda/envs/sax/lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py in _expand_bool_indices(idx, shape)
6092 expected_shape = shape[start: start + _ndim(i)]
6093 if i_shape != expected_shape:
-> 6094 raise IndexError("boolean index did not match shape of indexed array in index "
6095 f"{dim_number}: got {i_shape}, expected {expected_shape}")
6096 out.extend(np.where(i))
IndexError: boolean index did not match shape of indexed array in index 1: got (3,), expected ()
```
| Thanks for the report - I'll take a look at the issue today. | 2021-12-28T17:53:19 |
google/jax | 9,094 | google__jax-9094 | [
"9083"
] | 6411f8a03388ce63eb365188f2e2880815745125 | diff --git a/jax/_src/lax/slicing.py b/jax/_src/lax/slicing.py
--- a/jax/_src/lax/slicing.py
+++ b/jax/_src/lax/slicing.py
@@ -933,7 +933,7 @@ def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums,
indices_are_sorted=True, unique_indices=True,
- mode=GatherScatterMode.PROMISE_IN_BOUNDS)
+ mode=GatherScatterMode.CLIP)
dynamic_update_slice_p = standard_primitive(
| diff --git a/jax/experimental/jax2tf/tests/shape_poly_test.py b/jax/experimental/jax2tf/tests/shape_poly_test.py
--- a/jax/experimental/jax2tf/tests/shape_poly_test.py
+++ b/jax/experimental/jax2tf/tests/shape_poly_test.py
@@ -1701,6 +1701,9 @@ def _add_vmap_primitive_harnesses():
"lu",
"custom_linear_solve",
+ # Broken by https://github.com/google/jax/pull/9094
+ "dynamic_update_slice",
+
# We do *= shapes in the batching rule for conv_general_dilated
"conv_general_dilated",
diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -1552,6 +1552,15 @@ def args_maker():
self._CheckAgainstNumpy(lax_reference.dynamic_update_slice,
lax.dynamic_update_slice, args_maker)
+ def testDynamicUpdateSliceBatched(self):
+ # Regression test for https://github.com/google/jax/issues/9083
+ x = jnp.arange(5)
+ y = jnp.arange(6, 9)
+ ind = jnp.arange(6)
+ expected = jnp.vstack([lax.dynamic_update_slice(x, y, (i,)) for i in ind])
+ actual = jax.vmap(lax.dynamic_update_slice, (None, None, 0))(x, y, (ind,))
+ self.assertAllClose(expected, actual)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
| dynamic_update_slice start index adjustment fails when vmapped
The documentation for `dynamic_update_slice` explains "If the update slice is too large to fit in the array, the start index will be adjusted to make it fit" and this behavior does indeed seem to work normally but fails when I apply vmap. No update occurs if the update slice would not fit even if the update slice would still be partially in the array as in the example below.
```python
zeros = jnp.zeros(5)
ones = jnp.ones(3)
for i in range(1, 4):
print(jax.lax.dynamic_update_slice(zeros, ones, (jnp.array(i, dtype=int),)))
print(jax.vmap(jax.lax.dynamic_update_slice, (None, None, 0))(zeros, ones, (jnp.arange(1, 4),)))
# [0. 1. 1. 1. 0.]
# [0. 0. 1. 1. 1.]
# [0. 0. 1. 1. 1.]
# [[0. 1. 1. 1. 0.]
# [0. 0. 1. 1. 1.]
# [0. 0. 0. 0. 0.]]
```
| Thanks for the report; it looks like this is a bug in the `dynamic_update_slice` batching rule: https://github.com/google/jax/blob/04369a35888581b2c93abccffbe61fb3db8c460e/jax/_src/lax/slicing.py#L918-L936
On first look, I think the issue may be that it should use `mode=CLIP` rather than `mode=PROMISE_IN_BOUNDS`, but I'd have to dig deeper to be sure. | 2022-01-04T20:36:43 |
google/jax | 9,114 | google__jax-9114 | [
"9107"
] | 91e81672ebc7a3b1f67bdb93780968dc84085c37 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -50,7 +50,7 @@
from jax.interpreters import pxla
from jax import lax
from jax._src import device_array
-from jax._src.lax.lax import _array_copy
+from jax._src.lax.lax import _array_copy, _float_to_int_for_sort
from jax._src.ops import scatter
from jax._src.util import (unzip2, prod as _prod, subvals, safe_zip, ceil_of_ratio,
canonicalize_axis as _canonicalize_axis, maybe_named_axis)
@@ -6456,7 +6456,11 @@ def _searchsorted(a, v, side):
if len(a) == 0:
return 0
op = operator.le if side == 'left' else operator.lt
-
+ # TODO(jakevdp): handle NaNs correctly for complex. This will likely involve
+ # adding lexicographic sorting capabilities to the following.
+ a, v = _promote_dtypes(a, v)
+ if issubdtype(a.dtype, floating):
+ a, v = map(_float_to_int_for_sort, (a, v))
def body_fun(i, state):
low, high = state
mid = (low + high) // 2
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2978,6 +2978,20 @@ def testSearchsorted(self, ashape, vshape, side, dtype):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
+ for dtype in inexact_dtypes))
+ def testSearchsortedNans(self, dtype):
+ if np.issubdtype(dtype, np.complexfloating):
+ raise SkipTest("Known failure for complex inputs; see #9107")
+ sorted = jnp.array([-np.nan, -np.inf, -1, 0, 1, np.inf, np.nan], dtype=dtype)
+ self.assertArraysEqual(
+ jnp.searchsorted(sorted, sorted, side='left'),
+ jnp.arange(len(sorted)))
+ self.assertArraysEqual(
+ jnp.searchsorted(sorted, sorted, side='right'),
+ jnp.arange(1, 1 + len(sorted)))
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
| `jnp.searchsorted` returns different values to `np.searchsorted`
```python
import numpy as np
import jax.numpy as jnp
x = np.arange(10, dtype=float)
x[4:] = jnp.nan
np.searchsorted(x, 0) # 0
jnp.searchsorted(x, 0) # DeviceArray(10, dtype=int32)
```
It doesn't look like `jnp.searchsorted` handles NaNs correctly.
| I think this is expected: for `searchsorted` to work correctly, the array must be sorted in increasing order. But for your input I get this:
```python
>>> x[:-1] < x[1:]
array([ True, True, True, False, False, False, False, False, False])
```
which implies the array is not in a sorted order, which means the output is ill-defined. We have not made any effort to ensure that JAX's choices for ill-defined outputs match numpy's, as the details depend on the implementation, and so it would be quite difficult to do so (side note: even for `sort()`, JAX treats NaNs differently than NumPy does; see #6443)
What do you think? Is this something you can work around?
Thinking about it more... we could consider this defined behavior in that `sort()` is defined for `NaN` (and `inf`) entries. With that in mind, perhaps we should require that the index returned by `searchsorted` is that that would leave the array unchanged under JAX's definition of `sort()`. This could be done fairly easily, by first calling `_float_to_int_for_sort` on the entries, as is done when `sort()` is called on a float array: https://github.com/google/jax/blob/f96761cdc861ed6ecaa3bd1b84ca88b257d1b601/jax/_src/lax/lax.py#L3649
That still would not exactly match NumPy's behavior because of the handling of negative NaNs (cf. #6443) but it would definitely be more sensible than the current behavior. What do you think?
(side-note: this is quite timely becaue I've been working on `searchsorted` all this afternoon: #9108)
I think your proposal to handle NaNs as per `jnp.sort` sounds like a good one. Thanks!
This is the choice NumPy has made as well: ["As of NumPy 1.4.0 searchsorted works with real/complex arrays containing nan values. The enhanced sort order is documented in sort."](https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html) (And it would certainly handle my use case.)
The point about negative NaNs is certainly an oddity - since the concept of a negative NaN isn't technically defined - but agreed that this is still more sensible than the current behaviour. | 2022-01-06T17:21:06 |
google/jax | 9,142 | google__jax-9142 | [
"9034"
] | 0e201425e68f66c929433043c311762f60fef96a | diff --git a/jax/_src/lib/xla_bridge.py b/jax/_src/lib/xla_bridge.py
--- a/jax/_src/lib/xla_bridge.py
+++ b/jax/_src/lib/xla_bridge.py
@@ -57,7 +57,8 @@
'jax_xla_backend', '',
'Deprecated, please use --jax_platforms instead.')
flags.DEFINE_string(
- 'jax_backend_target', '',
+ 'jax_backend_target',
+ os.getenv('JAX_BACKEND_TARGET', '').lower(),
'Either "local" or "rpc:address" to connect to a remote service target.')
# TODO(skye): warn when this is used once we test out --jax_platforms a bit
flags.DEFINE_string(
| Some JAX flags cannot be set by environment variables
Currently, the `JAX_ENABLE_X64` flag can be set by environment variable:
```sh
$ export JAX_ENABLE_X64=True
$ python
>>> from jax.config import config
>>> config.FLAGS.jax_enable_x64
True
```
However, some flags cannot:
```sh
$ export JAX_XLA_BACKEND=tpu_driver
$ export JAX_BACKEND_TARGET="grpc://$COLAB_TPU_ADDR"
$ python
>>> from jax.config import config
>>> config.FLAGS.jax_xla_backend
''
>>> config.FLAGS.jax_backend_target
''
```
| Thanks for raising this!
@skye since you were looking at this stuff recently, could you take a look at this issue?
@mattjj in which module should I look for this?
> in which module should I look for this?
@bharatnishant are you asking about these particular flags, or about configuration options in general?
In general, the 'good' config stuff is in jax/_src/config.py. That config stuff is good because it has a uniform way to set up environment variables, flags, and context managers. Some flags are defined elsewhere, and you can find those just by grepping for them.
The `jax_xla_backend` option is instead [defined in xla_bridge.py](https://github.com/google/jax/blob/68e9e1c26d5d9439d03d09a10b8f9b26e8258383/jax/_src/lib/xla_bridge.py#L57), but as you can see it's deprecated in favor of [`jax_platforms`, which does have an env var](https://github.com/google/jax/blob/68e9e1c26d5d9439d03d09a10b8f9b26e8258383/jax/_src/lib/xla_bridge.py#L69). @ayaka14732 I suggest using `export JAX_PLATFORMS=tpu_driver` instead of using `JAX_XLA_BACKEND`.
As for `JAX_BACKEND_TARGET`, it looks like that [doesn't have an environment variable](https://github.com/google/jax/blob/68e9e1c26d5d9439d03d09a10b8f9b26e8258383/jax/_src/lib/xla_bridge.py#L60). Maybe we should add one there...
I don't think #9118 touches `JAX_BACKEND_TARGET`, so I'll add an environment variable there now (in an ad-hoc way, rather than using general jax/_src/config.py stuff). @skye lmk if that's mistaken. | 2022-01-08T21:00:36 |
|
google/jax | 9,178 | google__jax-9178 | [
"6443"
] | 0532a6326114b4282b837f97b2870307d5a7494f | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -960,8 +960,11 @@ def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
def sort(operand: Union[Array, Sequence[Array]], dimension: int = -1,
is_stable: bool = True, num_keys: int = 1) -> Union[Array, Tuple[Array, ...]]:
"""Wraps XLA's `Sort
- <https://www.tensorflow.org/xla/operation_semantics#sort>`_
- operator.
+ <https://www.tensorflow.org/xla/operation_semantics#sort>`_ operator.
+
+ For floating point inputs, -0.0 and 0.0 are treated as equivalent, and NaN values
+ are sorted to the end of the array. For complex inputs, the sort order is
+ lexicographic over the real and imaginary parts, with the real part primary.
Args:
operand : Array or sequence of arrays
@@ -3671,8 +3674,10 @@ def _float_to_int_for_sort(x):
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
- # order, -0 is ordered before 0, and -NaN and NaN appear at the beginning
- # and end of the ordering.
+ # order. In this scheme, -0 would be before 0, and -NaN and NaN appear at
+ # the beginning and end of the ordering. This causes issues for stable
+ # sorts, so we avoid this by standardizing the representation of zeros
+ # and NaNs in the output.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
@@ -3683,6 +3688,17 @@ def _float_to_int_for_sort(x):
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
+
+ # We cannot standardize zeros in x because XLA elides this is some cases.
+ # We cannot standardize NaNs in x because it triggers jax.debug_nans
+ # So instead we do these replacements in the signed integer representation.
+
+ # Standardize zeros:
+ signed = select(eq(x, _zero(x)), _zeros(signed), signed)
+ # Standardize nans:
+ signed_nan = x.dtype.type(np.nan).view(signed_dtype)
+ signed = select(_isnan(x), full_like(signed, signed_nan), signed)
+
flipped = bitcast_convert_type(
sub(unsigned_dtype.type(np.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
@@ -3690,7 +3706,8 @@ def _float_to_int_for_sort(x):
# Default comparator that sorts the operands lexicographically on the
# first `num_keys` arguments.
# For floating point types, a total order is created where
-# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.
+# -infinity < ... < 0 < ... < infinity < NaN.
+# 0.0 and -0.0 are treated as equivalent, as are all NaN representations.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support and lexicographic ordering to the algorithm from:
@@ -4364,6 +4381,9 @@ def _const(example, val):
dtype: Callable = partial(dtypes.dtype, canonicalize=True)
_dtype: Callable = partial(dtypes.dtype, canonicalize=True)
+def _isnan(x) -> bool:
+ return ne(x, x)
+
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), np.complexfloating)
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2979,18 +2979,20 @@ def testSearchsorted(self, ashape, vshape, side, dtype):
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
- for dtype in inexact_dtypes))
- def testSearchsortedNans(self, dtype):
+ {"testcase_name": f"_dtype={dtype.__name__}_side={side}", "dtype": dtype, "side": side}
+ for dtype in inexact_dtypes
+ for side in ['left', 'right']))
+ def testSearchsortedNans(self, dtype, side):
if np.issubdtype(dtype, np.complexfloating):
raise SkipTest("Known failure for complex inputs; see #9107")
- sorted = jnp.array([-np.nan, -np.inf, -1, 0, 1, np.inf, np.nan], dtype=dtype)
- self.assertArraysEqual(
- jnp.searchsorted(sorted, sorted, side='left'),
- jnp.arange(len(sorted)))
- self.assertArraysEqual(
- jnp.searchsorted(sorted, sorted, side='right'),
- jnp.arange(1, 1 + len(sorted)))
+ x = np.array([-np.inf, -1.0, 0.0, -0.0, 1.0, np.inf, np.nan, -np.nan], dtype=dtype)
+ # The sign bit should not matter for 0.0 or NaN, so argsorting the above should be
+ # equivalent to argsorting the following:
+ x_equiv = np.array([0, 1, 2, 2, 3, 4, 5, 5])
+
+ fun = partial(jnp.searchsorted, side=side)
+ self.assertArraysEqual(fun(x, x), fun(x_equiv, x_equiv))
+ self.assertArraysEqual(jax.jit(fun)(x, x), fun(x_equiv, x_equiv))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -1872,6 +1872,19 @@ def testSort(self, shape, dtype, axis, is_stable):
fun = lambda x: lax.sort(x, dimension=axis, is_stable=is_stable)
self._CompileAndCheck(fun, args_maker)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": f"_dtype={dtype.__name__}", "dtype": dtype}
+ for dtype in float_dtypes))
+ def testSortFloatSpecialValues(self, dtype):
+ # Test confirms that
+ # - NaNs are sorted to the end, regardless of representation
+ # - sign bit of 0.0 is ignored
+ x = jnp.array([-np.inf, 0.0, -0.0, np.inf, np.nan, -np.nan], dtype=dtype)
+ index = lax.iota(dtypes.int_, x.size)
+ argsort = lambda x: lax.sort_key_val(x, lax.iota(dtypes.int_, x.size), is_stable=True)[1]
+ self.assertArraysEqual(argsort(x), index)
+ self.assertArraysEqual(jax.jit(argsort)(x), index)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_isstable={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, is_stable),
| jnp.sort and np.sort treat negative NaNs differently
```python
import jax.numpy as jnp
import numpy as np
values = jnp.array([-np.nan, np.nan, -np.inf, np.inf, -1, 1])
print(np.sort(values))
# [-inf -1. 1. inf nan nan]
print(jnp.sort(values))
# [ nan -inf -1. 1. inf nan]
```
This appears to be deliberate behavior on the part of `lax.sort`, as seen in the code comment here: https://github.com/google/jax/blob/3c362ced9090fa790a5347ce47d4b210808415e2/jax/_src/lax/lax.py#L5845-L5849
I'm not sure whether the best fix here is to change the behavior of `jnp.sort`, or to simply document that this is a known semantic difference.
| A similar difference between `lax` and `numpy` comes up in the treatment of negative zeros:
```python
>>> x = np.array([0.0, -0.0])
>>> np.argsort(x)
array([0, 1])
>>> jnp.argsort(x)
DeviceArray([1, 0], dtype=int32)
``` | 2022-01-12T19:43:50 |
google/jax | 9,184 | google__jax-9184 | [
"9182"
] | 3b374e7dd9558b70a3c3b4378c7bbf7921c34a5f | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -5596,6 +5596,11 @@ def replace(tup, val):
@partial(jit, static_argnums=1)
def _unique_sorted_mask(ar, axis):
aux = moveaxis(ar, axis, 0)
+ if issubdtype(aux.dtype, np.complexfloating):
+ # Work around issue in sorting of complex numbers with Nan only in the
+ # imaginary component. This can be removed if sorting in this situation
+ # is fixed to match numpy.
+ aux = where(isnan(aux), lax._const(aux, nan), aux)
size, *out_shape = aux.shape
if _prod(out_shape) == 0:
size = 1
@@ -5604,7 +5609,13 @@ def _unique_sorted_mask(ar, axis):
perm = lexsort(aux.reshape(size, _prod(out_shape)).T[::-1])
aux = aux[perm]
if aux.size:
- mask = ones(size, dtype=bool).at[1:].set(any(aux[1:] != aux[:-1], tuple(range(1, aux.ndim))))
+ if issubdtype(aux.dtype, inexact):
+ # This is appropriate for both float and complex due to the documented behavior of np.unique:
+ # See https://github.com/numpy/numpy/blob/v1.22.0/numpy/lib/arraysetops.py#L212-L220
+ neq = lambda x, y: lax.ne(x, y) & ~(isnan(x) & isnan(y))
+ else:
+ neq = lax.ne
+ mask = ones(size, dtype=bool).at[1:].set(any(neq(aux[1:], aux[:-1]), tuple(range(1, aux.ndim))))
else:
mask = zeros(size, dtype=bool)
return aux, mask, perm
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2479,6 +2479,29 @@ def np_fun(x, fill_value=fill_value):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
+ @unittest.skipIf(numpy_version < (1, 21), "Numpy < 1.21 does not properly handle NaN values in unique.")
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": f"_{dtype.__name__}", "dtype": dtype}
+ for dtype in inexact_dtypes))
+ def testUniqueNans(self, dtype):
+ def args_maker():
+ x = [-0.0, 0.0, 1.0, 1.0, np.nan, -np.nan]
+ if np.issubdtype(dtype, np.complexfloating):
+ x = [complex(i, j) for i, j in itertools.product(x, repeat=2)]
+ return [np.array(x, dtype=dtype)]
+
+ kwds = dict(return_index=True, return_inverse=True, return_counts=True)
+ jnp_fun = partial(jnp.unique, **kwds)
+ def np_fun(x):
+ dtype = x.dtype
+ # numpy unique fails for bfloat16 NaNs, so we cast to float64
+ if x.dtype == jnp.bfloat16:
+ x = x.astype('float64')
+ u, *rest = np.unique(x, **kwds)
+ return (u.astype(dtype), *rest)
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
+
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_fixed_size={}".format(fixed_size),
"fixed_size": fixed_size}
| jnp.unique does not properly handle NaN values
Example:
```python
import numpy as np
import jax.numpy as jnp
x = np.array([0, 1, np.nan, np.nan])
print(np.unique(x))
# [ 0., 1., nan])
print(jnp.unique(x))
[ 0., 1., nan, nan]
```
| 2022-01-12T23:27:51 |
|
google/jax | 9,193 | google__jax-9193 | [
"9192"
] | d583f8761849d302cc036a3b9a3326bace87e613 | diff --git a/jax/_src/api.py b/jax/_src/api.py
--- a/jax/_src/api.py
+++ b/jax/_src/api.py
@@ -571,15 +571,15 @@ def compiler_ir(self):
representation of the program after such passes, whenever
possible.
"""
- return self._executable.xla_executable().hlo_modules()
+ return self._executable.xla_executable.hlo_modules()
def runtime_executable(self):
- return self._executable.xla_executable()
+ return self._executable.xla_executable
def _xla_executable(self):
# TODO(frostig): finalize API. For now, return the underlying
# executable directly via this method.
- return self._executable.xla_executable()
+ return self._executable.xla_executable
def __call__(self, *args, **kwargs):
if self._no_kwargs:
diff --git a/jax/_src/dispatch.py b/jax/_src/dispatch.py
--- a/jax/_src/dispatch.py
+++ b/jax/_src/dispatch.py
@@ -622,6 +622,7 @@ def from_xla_computation(
def is_trivial(self):
return self._xla_executable == None
+ @property
def xla_executable(self):
if self.is_trivial():
raise ValueError("A trivial compiled computation has no XLA executable")
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -844,6 +844,20 @@ def err():
"for a particular signature. Detected .*BatchTracer",
err)
+ def test_jit_lower_compiler_ir(self):
+ f = self.jit(lambda x: x + 4).lower(1.)
+ self.assertIsNotNone(f.compiler_ir())
+ self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
+ self.assertIsNotNone(f.compiler_ir(dialect='mhlo'))
+
+ def test_jit_lower_compile_compiler_ir(self):
+ f = self.jit(lambda x: x + 4).lower(1.).compile()
+ self.assertIsNotNone(f.compiler_ir())
+
+ def test_jit_lower_compile_executable(self):
+ f = self.jit(lambda x: x + 4).lower(1.).compile()
+ self.assertIsNotNone(f.runtime_executable())
+
@unittest.skipIf(xla_extension_version < 45, "requires jaxlib >= 0.1.75")
def test_jit_enum_as_dict_keys_fails(self):
class E(enum.Enum):
diff --git a/tests/pjit_test.py b/tests/pjit_test.py
--- a/tests/pjit_test.py
+++ b/tests/pjit_test.py
@@ -726,6 +726,48 @@ def f(x, y):
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
+ @jtu.with_mesh([('x', 2), ('y', 2)])
+ def testLowerCompilerIR(self):
+ @partial(pjit,
+ in_axis_resources=P(('x', 'y'),),
+ out_axis_resources=P(('x', 'y'),))
+ def f(x, y):
+ return x @ y
+
+ shape = (8, 8)
+ x = jnp.arange(np.prod(shape)).reshape(shape)
+ f = f.lower(x, x + 1)
+ self.assertIsNotNone(f.compiler_ir())
+ self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
+ self.assertIsNotNone(f.compiler_ir(dialect='mhlo'))
+
+ @jtu.with_mesh([('x', 2), ('y', 2)])
+ def testLowerCompileCompilerIR(self):
+ @partial(pjit,
+ in_axis_resources=P(('x', 'y'),),
+ out_axis_resources=P(('x', 'y'),))
+ def f(x, y):
+ return x @ y
+
+ shape = (8, 8)
+ x = jnp.arange(np.prod(shape)).reshape(shape)
+ f = f.lower(x, x + 1).compile()
+ self.assertIsNotNone(f.compiler_ir())
+
+ @jtu.with_mesh([('x', 2), ('y', 2)])
+ def testLowerCompileExecutable(self):
+ @partial(pjit,
+ in_axis_resources=P(('x', 'y'),),
+ out_axis_resources=P(('x', 'y'),))
+ def f(x, y):
+ return x @ y
+
+ shape = (8, 8)
+ x = jnp.arange(np.prod(shape)).reshape(shape)
+
+ f = f.lower(x, x + 1).compile()
+ self.assertIsNotNone(f.runtime_executable())
+
@jtu.with_mesh([('x', 2)])
def test_static_argnums(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None,
diff --git a/tests/pmap_test.py b/tests/pmap_test.py
--- a/tests/pmap_test.py
+++ b/tests/pmap_test.py
@@ -218,6 +218,29 @@ def testLowerCompileTrivialMultiArg(self):
ans = f_exe(x, y)
self.assertAllClose(ans, expected)
+ def testLowerCompilerIR(self):
+ f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
+ shape = (jax.device_count(), 4)
+ x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
+ f = f.lower(x)
+ self.assertIsNotNone(f.compiler_ir())
+ self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
+ self.assertIsNotNone(f.compiler_ir(dialect='mhlo'))
+
+ def testLowerCompileCompilerIR(self):
+ f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
+ shape = (jax.device_count(), 4)
+ x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
+ f = f.lower(x).compile()
+ self.assertIsNotNone(f.compiler_ir())
+
+ def testLowerCompileExecutable(self):
+ f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
+ shape = (jax.device_count(), 4)
+ x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
+ f = f.lower(x).compile()
+ self.assertIsNotNone(f.runtime_executable())
+
def testMean(self):
f = self.pmap(lambda x: x - lax.pmean(x, 'i'), axis_name='i')
diff --git a/tests/xmap_test.py b/tests/xmap_test.py
--- a/tests/xmap_test.py
+++ b/tests/xmap_test.py
@@ -660,6 +660,26 @@ def testLowerCompileArgTypeMismatch(self):
"called with:\n.*int32.*",
lambda: f_exe(x_i32))
+ def testLowerCompilerIR(self):
+ f = xmap(lambda x: x + 4, in_axes=['i', ...], out_axes=['i', ...])
+ x = jnp.arange(4, dtype=jnp.float32).reshape((2, 2))
+ f = f.lower(x)
+ self.assertIsNotNone(f.compiler_ir())
+ self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
+ self.assertIsNotNone(f.compiler_ir(dialect='mhlo'))
+
+ def testLowerCompileCompilerIR(self):
+ f = xmap(lambda x: x + 4, in_axes=['i', ...], out_axes=['i', ...])
+ x = jnp.arange(4, dtype=jnp.float32).reshape((2, 2))
+ f = f.lower(x).compile()
+ self.assertIsNotNone(f.compiler_ir())
+
+ def testLowerCompileExecutable(self):
+ f = xmap(lambda x: x + 4, in_axes=['i', ...], out_axes=['i', ...])
+ x = jnp.arange(4, dtype=jnp.float32).reshape((2, 2))
+ f = f.lower(x).compile()
+ self.assertIsNotNone(f.runtime_executable())
+
def testNewCheckpoint(self):
f = checkpoint(xmap(lambda x: x, in_axes=['i', ...], out_axes=['i', ...]))
self.assertAllClose(jax.grad(lambda x: f(x).sum())(jnp.arange(3.)), jnp.ones(3))
| Inconsistent `Compiled._executable`
There's an inconsistency between jit and parallel executables:
`xla_executable` is a method `XlaCompiledComputation`, but a slot in the others, so the accesses in `Compiled` fail (`.xla_executable` vs `.xla_executable()`).
| cc @froystig @skye | 2022-01-13T23:46:02 |
google/jax | 9,239 | google__jax-9239 | [
"5647"
] | d6ea8b28973c820f2f44ab347d34767d94e10763 | diff --git a/jax/_src/scipy/linalg.py b/jax/_src/scipy/linalg.py
--- a/jax/_src/scipy/linalg.py
+++ b/jax/_src/scipy/linalg.py
@@ -289,28 +289,20 @@ def _calc_P_Q(A):
A_L1 = np_linalg.norm(A,1)
n_squarings = 0
if A.dtype == 'float64' or A.dtype == 'complex128':
- U3, V3 = _pade3(A)
- U5, V5 = _pade5(A)
- U7, V7 = _pade7(A)
- U9, V9 = _pade9(A)
maxnorm = 5.371920351148152
n_squarings = jnp.maximum(0, jnp.floor(jnp.log2(A_L1 / maxnorm)))
A = A / 2**n_squarings
- U13, V13 = _pade13(A)
conds = jnp.array([1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000])
- U = jnp.select((A_L1 < conds), (U3, U5, U7, U9), U13)
- V = jnp.select((A_L1 < conds), (V3, V5, V7, V9), V13)
+ idx = jnp.digitize(A_L1, conds)
+ U, V = lax.switch(idx, [_pade3, _pade5, _pade7, _pade9, _pade13], A)
elif A.dtype == 'float32' or A.dtype == 'complex64':
- U3,V3 = _pade3(A)
- U5,V5 = _pade5(A)
maxnorm = 3.925724783138660
n_squarings = jnp.maximum(0, jnp.floor(jnp.log2(A_L1 / maxnorm)))
A = A / 2**n_squarings
- U7,V7 = _pade7(A)
conds = jnp.array([4.258730016922831e-001, 1.880152677804762e+000])
- U = jnp.select((A_L1 < conds), (U3, U5), U7)
- V = jnp.select((A_L1 < conds), (V3, V5), V7)
+ idx = jnp.digitize(A_L1, conds)
+ U, V = lax.switch(idx, [_pade3, _pade5, _pade7], A)
else:
raise TypeError("A.dtype={} is not supported.".format(A.dtype))
P = U + V # p_m(A) : numerator
| Redundant computation in expm?
Hi all:
It looks to me there are redundant calculations in the `expm` function, but wanted to ask to see if I'm missing something (perhaps something clever is happening in compilation).
E.g., for `float64` and `complex128`, the numerator/denominator for the Pade approximant is generated as follows:
```
if A.dtype == 'float64' or A.dtype == 'complex128':
U3, V3 = _pade3(A)
U5, V5 = _pade5(A)
U7, V7 = _pade7(A)
U9, V9 = _pade9(A)
maxnorm = 5.371920351148152
n_squarings = jnp.maximum(0, jnp.floor(jnp.log2(A_L1 / maxnorm)))
A = A / 2**n_squarings
U13, V13 = _pade13(A)
conds=jnp.array([1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000])
U = jnp.select((A_L1<conds), (U3, U5, U7, U9), U13)
V = jnp.select((A_L1<conds), (V3, V5, V7, V9), V13)
```
The algorithm being used (I believe by Al-Mohy and Higham, same as in `scipy`) prescribes choosing between 1 of 5 different Pade approximations (of various orders) based on the norm of `A`. In the above, it appears all potential Pade approximations are being computed, and only afterwards is one chosen based on the norm of `A`. Hence, the computations for the others are discarded and ultimately unnecessary.
It seems this function could benefit from changing the above logic to use nested `lax.cond` statements (or perhaps `switch`?) to avoid computing all possible versions. To motivate this: I quickly tried modifying the `expm` function to simply always do the `U13, V13` version of of the computation (the most expensive) and not compute any of the lower order versions, and observed roughly a factor of 2 speed increase in compiled execution. My speed tests weren't rigorous - I didn't test it for a variety of norms - but I believe this should speed things up in all cases (again, unless I'm missing something with lax behaviour). Edit: This is on CPU.
Thoughts?
P.s. In some of my use cases of jax, the most expensive piece is a ton of matrix exponentials, so I'm very interested in potential speed gains for this function :D.
| This sounds right to me! Indeed using `jnp.select` is wasting compute, and `cond`/`switch` could save it. I don't remember any context around why `expm` was written this way (@zhangqiaorjc do you remember?) but it may just have been for simplicity. In that case, what you describe would be a welcome upgrade!
I guess that on GPU it _might_ be better to compute everything rather than pass control flow back to the host. But that's really an optimization problem for XLA GPU, not something we should worry about in JAX.
In general, matrix-matrix multiplication (L3 BLAS) seems like the level of computation where we should be using `cond` rather than `where`.
Does `cond` always pass control flow back to the host on GPU? That sounds plausible, though also it seems like at least in some cases XLA:GPU could avoid that. (At the very least, it could decide to optimize a `cond` into a `select` if needed!) On TPU (where control flow can stay on the device) and CPU (where there's no host/device separation) it seems profitable.
Good rule of thumb!
If there are no objections, given how small this is, and my personal interest in seeing it go through asap, I'm willing to implement this change. My plan would be to just change it to use nested `cond` statements. In a PR I can show some local CPU benchmarks.
@DanPuzzuoli Feel free to send in a PR. I don't think anyone from JAX team is currently working on this. Thanks! | 2022-01-19T15:20:10 |
|
google/jax | 9,276 | google__jax-9276 | [
"9274"
] | 31b53084987053e64df31ee2004fde681942d2ec | diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -4150,9 +4150,17 @@ def _rng_bit_generator_translation_rule(
# sidestep issues with the jax_enable_x64=False configuration. As a result, we
# need to convert u32[4] -> u64[2] here in the translation rule. However, we
# also polymorphically allow a u64[2] for backward compatibility.
+ #
+ # Separately, xops.RngBitGenerator doesn't support generating u8 or
+ # u16, so we request u32 and truncate in that case.
assert ((key_shape == (4,) and key_dtype == np.dtype('uint32')) or
(key_shape == (2,) and key_dtype == np.dtype('uint64'))), (key_shape, key_dtype)
- xla_shape = xc.Shape.array_shape(np.dtype(dtype), shape)
+ dtype = np.dtype(dtype)
+ if dtype == np.dtype('uint32') or dtype == np.dtype('uint64'):
+ rbg_dtype = dtype
+ else:
+ rbg_dtype = np.dtype('uint32')
+ xla_shape = xc.Shape.array_shape(rbg_dtype, shape)
if key_dtype == np.dtype('uint32'):
u64_etype = xla.dtype_to_primitive_type(np.dtype('uint64'))
key = xops.BitcastConvertType(xops.Reshape(key, (2, 2)), u64_etype)
@@ -4161,6 +4169,9 @@ def _rng_bit_generator_translation_rule(
if key_dtype == np.dtype('uint32'):
u32_etype = xla.dtype_to_primitive_type(np.dtype('uint32'))
out_key = xops.Reshape(xops.BitcastConvertType(out_key, u32_etype), (4,))
+ if rbg_dtype != dtype:
+ out_vals = xops.ConvertElementType(
+ out_vals, xla.dtype_to_primitive_type(dtype))
return [out_key, out_vals]
@@ -4199,6 +4210,10 @@ def rng_bit_generator(key, shape, dtype=np.uint32,
friendly API.
"""
shape = jax.core.canonicalize_shape(shape)
+ dtype = dtypes.canonicalize_dtype(dtype)
+ if np.dtype(dtype) not in {np.dtype('uint8'), np.dtype('uint16'),
+ np.dtype('uint32'), np.dtype('uint64')}:
+ raise TypeError(f'rng_bit_generator: unsupported dtype {dtype}')
return tuple(
rng_bit_generator_p.bind(
key, shape=shape, dtype=dtype, algorithm=algorithm))
| diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2501,6 +2501,18 @@ def fn(k):
self.assertArraysEqual(out[0], out_jit[0])
self.assertArraysEqual(out[1], out_jit[1])
+ def testRngBitGenerator2(self):
+ def f(key):
+ return lax.rng_bit_generator(key, shape=(5, 7))
+
+ key = np.array((1, 2, 3, 4)).astype(np.uint32)
+ out1 = f(key)
+ out2 = jax.jit(f)(key)
+ self.assertEqual(out1[0].shape, (4,))
+ self.assertEqual(out1[1].shape, (5, 7))
+ self.assertArraysEqual(out1[0], out2[0])
+ self.assertArraysEqual(out1[1], out2[1])
+
@jtu.skip_on_devices("tpu")
def testRngBitGeneratorReturnedKey(self):
# This test ensures that the key bit-packing/unpacking operations used in
diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -51,6 +51,11 @@ def _prng_key_as_array(key):
return key.unsafe_raw_array() if config.jax_enable_custom_prng else key
+PRNG_IMPLS = [('threefry2x32', prng.threefry_prng_impl),
+ ('rbg', prng.rbg_prng_impl),
+ ('unsafe_rbg', prng.unsafe_rbg_prng_impl)]
+
+
@jtu.with_config(jax_numpy_rank_promotion="raise")
class PrngTest(jtu.JaxTestCase):
@@ -128,6 +133,35 @@ def testRngRandomBits(self):
expected64 = np.array([676898860, 3164047411, 4010691890], dtype=np.uint32)
self.assertArraysEqual(bits64, expected64)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": "_" + name, "prng_name": name}
+ for name, _ in PRNG_IMPLS))
+ def testRngRandomBitsShapeDtype(self, prng_name):
+ # Like testRngRandomBits, but only meant to exercise random_bits
+ # on every PRNG implementation. Instead of values, only checks
+ # that shapes/dtypes are as expected.
+
+ with jax.default_prng_impl(prng_name):
+ key = random.PRNGKey(1701)
+
+ bits8 = jax._src.random._random_bits(key, 8, (3,))
+ self.assertEqual(bits8.shape, (3,))
+ self.assertEqual(bits8.dtype, np.dtype('uint8'))
+
+ bits16 = jax._src.random._random_bits(key, 16, (3,))
+ self.assertEqual(bits16.shape, (3,))
+ self.assertEqual(bits16.dtype, np.dtype('uint16'))
+
+ bits32 = jax._src.random._random_bits(key, 32, (3,))
+ self.assertEqual(bits32.shape, (3,))
+ self.assertEqual(bits32.dtype, np.dtype('uint32'))
+
+ with jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype.*"):
+ bits64 = jax._src.random._random_bits(key, 64, (3,))
+ expected_dtype = np.dtype('uint64' if config.x64_enabled else 'uint32')
+ self.assertEqual(bits64.shape, (3,))
+ self.assertEqual(bits64.dtype, expected_dtype)
+
def testRngRandomBitsViewProperty(self):
# TODO: add 64-bit if it ever supports this property.
# TODO: will this property hold across endian-ness?
@@ -209,9 +243,7 @@ def test_prng_seeds_and_keys(self, seed, type, jit, key):
def test_default_prng_selection(self):
if not config.jax_enable_custom_prng:
self.skipTest("test requires config.jax_enable_custom_prng")
- for name, impl in [('threefry2x32', prng.threefry_prng_impl),
- ('rbg', prng.rbg_prng_impl),
- ('unsafe_rbg', prng.unsafe_rbg_prng_impl)]:
+ for name, impl in PRNG_IMPLS:
with jax.default_prng_impl(name):
self.assertIs(random.default_prng_impl(), impl)
key = random.PRNGKey(42)
@@ -223,9 +255,7 @@ def test_default_prng_selection(self):
def test_default_prng_selection_without_custom_prng_mode(self):
if config.jax_enable_custom_prng:
self.skipTest("test requires that config.jax_enable_custom_prng is False")
- for name, impl in [('threefry2x32', prng.threefry_prng_impl),
- ('rbg', prng.rbg_prng_impl),
- ('unsafe_rbg', prng.unsafe_rbg_prng_impl)]:
+ for name, impl in PRNG_IMPLS:
with jax.default_prng_impl(name):
self.assertIs(random.default_prng_impl(), impl)
key = random.PRNGKey(42)
@@ -1334,15 +1364,6 @@ def test_ravel(self):
keys = jnp.ravel(keys)
self.assertEqual(keys.shape, (4,))
-def _sampler_unimplemented_with_rbg(*args, **kwargs):
- # TODO(mattjj): enable these tests if/when RngBitGenerator supports them
- raise SkipTest('8- and 16-bit types not supported with RBG PRNG')
-
-for attr in dir(LaxRandomWithRBGPRNGTest):
- if 'int8' in attr or 'int16' in attr or 'float16' in attr:
- setattr(LaxRandomWithRBGPRNGTest, attr, _sampler_unimplemented_with_rbg)
- setattr(LaxRandomWithUnsafeRBGPRNGTest, attr, _sampler_unimplemented_with_rbg)
-
def _sampler_unimplemented_with_custom_prng(*args, **kwargs):
raise SkipTest('sampler only implemented for default RNG')
| RBG-based PRNG doesn't work with bf16
bf16 is the norm for TPU training, and RBG-based PRNG is useful to speed up random number generation for large models. It seems that our RBG-based PRNG doesn't yet support bf16. Would be nice to enable this.
See repro
```python
import jax
import jax.numpy as jnp
print('-- default software PRNG')
print(jax.random.uniform(jax.random.PRNGKey(1), (2, 2), jnp.bfloat16))
print('-- RBG-based PRNG')
from jax import prng
jax.config.update('jax_enable_custom_prng', True)
def rbg_key(seed: int):
return prng.seed_with_impl(prng.rbg_prng_impl, seed)
jax.random.PRNGKey = rbg_key
print(jax.random.uniform(jax.random.PRNGKey(1), (2, 2), jnp.bfloat16))
```
Default PRNG works
```
-- default software PRNG
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
[[0.53125 0.453125]
[0.5 0.984375]]
```
RBG-based says unsupported
```
-- RBG-based PRNG
Traceback (most recent call last):
File "/usr/local/google/home/zhangqiaorjc/Downloads/t.py", line 16, in <module>
print(jax.random.uniform(jax.random.PRNGKey(1), (2, 2), jnp.bfloat16))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/random.py", line 239, in uniform
return _uniform(key, shape, dtype, minval, maxval) # type: ignore
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/random.py", line 258, in _uniform
bits = _random_bits(key, nbits, shape)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/random.py", line 88, in _random_bits
return key._random_bits(bit_width, shape)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/prng.py", line 184, in _random_bits
return self.impl.random_bits(self._keys, bit_width, shape)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/prng.py", line 540, in _rbg_random_bits
_, bits = lax.rng_bit_generator(key, shape, dtype=UINT_DTYPES[bit_width])
jax._src.source_info_util.JaxStackTraceBeforeTransformation: RuntimeError: INVALID_ARGUMENT: Unsupported shape for RngBitGenerator: U16
The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/google/home/zhangqiaorjc/Downloads/t.py", line 16, in <module>
print(jax.random.uniform(jax.random.PRNGKey(1), (2, 2), jnp.bfloat16))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/random.py", line 239, in uniform
return _uniform(key, shape, dtype, minval, maxval) # type: ignore
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/traceback_util.py", line 165, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/api.py", line 429, in cache_miss
donated_invars=donated_invars, inline=inline)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 1671, in bind
return call_bind(self, fun, *args, **params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 1683, in call_bind
outs = top_trace.process_call(primitive, fun, tracers, params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 596, in process_call
return primitive.impl(f, *tracers, **params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dispatch.py", line 143, in _xla_call_impl
*unsafe_map(arg_spec, args))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/linear_util.py", line 272, in memoized_fun
ans = call(fun, *args)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dispatch.py", line 170, in _xla_callable_uncached
*arg_specs).compile().unsafe_call
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dispatch.py", line 265, in lower_xla_callable
arg_partitions=None, out_partitions=None)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/interpreters/xla.py", line 772, in lower_jaxpr_to_xla_module
out_nodes = jaxpr_subcomp(ctx, jaxpr.jaxpr, xla_consts, *xla_args)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/interpreters/xla.py", line 600, in jaxpr_subcomp
*in_nodes, **eqn.params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/lax/lax.py", line 4160, in _rng_bit_generator_translation_rule
c, xops.RngBitGenerator(algorithm, key, xla_shape))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/interpreters/xla.py", line 611, in xla_destructure
num_elements = len(c.get_shape(ans).tuple_shapes())
jax._src.traceback_util.UnfilteredStackTrace: RuntimeError: INVALID_ARGUMENT: Unsupported shape for RngBitGenerator: U16
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/google/home/zhangqiaorjc/Downloads/t.py", line 16, in <module>
print(jax.random.uniform(jax.random.PRNGKey(1), (2, 2), jnp.bfloat16))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/random.py", line 239, in uniform
return _uniform(key, shape, dtype, minval, maxval) # type: ignore
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/interpreters/xla.py", line 611, in xla_destructure
num_elements = len(c.get_shape(ans).tuple_shapes())
RuntimeError: INVALID_ARGUMENT: Unsupported shape for RngBitGenerator: U16```
| 2022-01-21T06:58:51 |
|
google/jax | 9,281 | google__jax-9281 | [
"9090"
] | e11cd44f829fb9620f7e18529c6016c30127f16d | diff --git a/jax/_src/numpy/util.py b/jax/_src/numpy/util.py
--- a/jax/_src/numpy/util.py
+++ b/jax/_src/numpy/util.py
@@ -121,14 +121,15 @@ def wrap(op):
try:
parsed = _parse_numpydoc(docstr)
- if update_doc and hasattr(op, '__code__') and 'Parameters' in parsed.sections:
+ if update_doc and 'Parameters' in parsed.sections:
+ code = getattr(getattr(op, "__wrapped__", op), "__code__", None)
# Remove unrecognized parameter descriptions.
parameters = _parse_parameters(parsed.sections['Parameters'])
parsed.sections['Parameters'] = (
"Parameters\n"
"----------\n" +
"\n".join(_versionadded.split(desc)[0].rstrip() for p, desc in parameters.items()
- if p in op.__code__.co_varnames and p not in skip_params)
+ if (code is None or p in code.co_varnames) and p not in skip_params)
)
docstr = parsed.summary.strip() + "\n" if parsed.summary else ""
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -44,7 +44,7 @@
from jax import tree_util
from jax.test_util import check_grads
from jax._src.util import prod, safe_zip
-from jax._src.numpy.util import _parse_numpydoc, ParsedDoc
+from jax._src.numpy.util import _parse_numpydoc, ParsedDoc, _wraps
from jax._src.numpy.lax_numpy import _promote_dtypes, _promote_dtypes_inexact
from jax.config import config
@@ -6207,6 +6207,46 @@ def test_lax_numpy_docstrings(self):
if obj.__doc__ and "*Original docstring below.*" not in obj.__doc__:
raise Exception(f"jnp.{name} does not have a wrapped docstring.")
+ @parameterized.named_parameters(
+ {"testcase_name": "_jit" if jit else "", "jit": jit} for jit in [True, False])
+ def test_wrapped_function_parameters(self, jit):
+ def orig(x):
+ """Example Docstring
+
+ Parameters
+ ----------
+ x : array_like
+ Input Data
+
+ .. versionadded:: 1.8.0
+ out : array_like, optional
+ Output to overwrite
+ other_arg : Any
+ not used
+
+ Returns
+ -------
+ x : input
+ """
+ return x
+
+ def wrapped(x, out=None):
+ return x
+
+ if jit:
+ wrapped = jax.jit(wrapped)
+
+ wrapped = _wraps(orig, skip_params=['out'])(wrapped)
+ doc = wrapped.__doc__
+
+ self.assertStartsWith(doc, "Example Docstring")
+ self.assertIn("Original docstring below", doc)
+ self.assertIn("Parameters", doc)
+ self.assertIn("Returns", doc)
+ self.assertNotIn('out', doc)
+ self.assertNotIn('other_arg', doc)
+ self.assertNotIn('versionadded', doc)
+
def test_parse_numpydoc(self):
# Unit test ensuring that _parse_numpydoc correctly parses docstrings for all
| docstring processing no longer fully working
e.g.
```python
>>> print('versionadded' in jnp.nansum.__doc__)
True
```
In the past this was removed here: https://github.com/google/jax/blob/97a5719fcb40af7231b5f803f965063538282f8e/jax/_src/numpy/util.py#L127-L132
But something has changed and that is no longer happening.
| I think the `jit` decorators removed `__code__` (https://github.com/google/jax/blob/04f322e065a2cdbb988bf73f26b6ae4968bf1ea5/jax/_src/numpy/util.py#L124) | 2022-01-21T20:14:13 |
google/jax | 9,290 | google__jax-9290 | [
"8416"
] | f6d329b2d9b5f83c6a59e5739aa1ca8d4d1ffa1c | diff --git a/jax/core.py b/jax/core.py
--- a/jax/core.py
+++ b/jax/core.py
@@ -1643,7 +1643,7 @@ def __eq__(self, other):
return (self.__positional, self.__named) == (other.__positional, other.__named)
if isinstance(other, tuple):
return not self.__named and self.__positional == other
- raise TypeError(f"NamedShape doesn't support comparisons with {type(other)}")
+ return False
def __hash__(self):
named = frozenset(self.__named.items())
| diff --git a/tests/core_test.py b/tests/core_test.py
--- a/tests/core_test.py
+++ b/tests/core_test.py
@@ -515,6 +515,13 @@ def test_typecompat_named_shape(self):
aval3 = core.ShapedArray((2, 3), np.float32, False, {'i': 5})
self.assertFalse(core.typecompat(aval1, aval3))
+ def test_named_shape_comparision(self):
+ self.assertTrue(core.NamedShape(2, 3) == (2, 3))
+ self.assertFalse(core.NamedShape(2, i=3) == (2,))
+ self.assertFalse(core.NamedShape(2, i=3) == (2, 3))
+ self.assertFalse(core.NamedShape(2, i=3) == None)
+ self.assertFalse(core.NamedShape() == [])
+
class DynamicShapesTest(jtu.JaxTestCase):
| NamedShape cannot be compared with None
Occasionally, when running `jax.random.bernoulli(rng_key)` on CI, I'm seeing the error
```
test/test_distributions.py:657: in gen_values_outside_bounds
sign = random.bernoulli(key1)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
key = array([2986633504, 617412143], dtype=uint32)
p = DeviceArray(0.5, dtype=float32), shape = None
def bernoulli(key: jnp.ndarray,
p: RealArray = np.float32(0.5),
shape: Optional[Union[Sequence[int], NamedShape]] = None) -> jnp.ndarray:
"""Sample Bernoulli random values with given shape and mean.
Args:
key: a PRNGKey used as the random key.
p: optional, a float or array of floats for the mean of the random
variables. Must be broadcast-compatible with ``shape``. Default 0.5.
shape: optional, a tuple of nonnegative integers representing the result
shape. Must be broadcast-compatible with ``p.shape``. The default (None)
produces a result shape equal to ``p.shape``.
Returns:
A random array with boolean dtype and shape given by ``shape`` if ``shape``
is not None, or else ``p.shape``.
"""
dtype = dtypes.canonicalize_dtype(lax.dtype(p))
if shape is not None:
shape = core.as_named_shape(shape)
if not jnp.issubdtype(dtype, np.floating):
msg = "bernoulli probability `p` must have a floating dtype, got {}."
raise TypeError(msg.format(dtype))
p = lax.convert_element_type(p, dtype)
> return _bernoulli(key, p, shape) # type: ignore
E ValueError: static arguments should be comparable using __eq__.The following error was raised when comparing two objects of types <class 'jax.core.NamedShape'> and <class 'NoneType'>. The error was:
E TypeError: NamedShape doesn't support comparisons with <class 'NoneType'>
```
Looking deeper into the JAX API, I think that this is triggered when using `cpp_jit`. But it seems that under `cpp_jit`, a static argument can accept both non-eq argument and None, as illustrated in the following code
```python
import jax
class NoEq: # mimic jax/tests/api_test.py
def __hash__(self):
return 1
def __eq__(self, other):
raise NotImplementedError("Cannot compare!")
f = lambda x, y: x + 3
jf = jax.jit(f, static_argnums=(1,))
jf(1, NoEq())
jf(1, None) # pass
jf(1, 1) # fail
```
That makes me confused. I'm not sure why such comparison happens with `random.bernoulli`. It happens [occasionally on CI](https://github.com/pyro-ppl/numpyro/runs/4059917016?check_suite_focus=true) and to resolve it, I can rerun all the CI jobs. I can't replicate the issue locally so my assumption is such comparison only happens in some specific system. I guess a solution is to allow comparing NamedShape and None but this seems to be an issue of `cpp_jit`, rather than NamedShape.
| 2022-01-22T17:40:28 |
|
google/jax | 9,357 | google__jax-9357 | [
"9349"
] | 0b3f497c850153b9398b873fd26bc6d984839960 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2147,6 +2147,7 @@ def where(condition, x=None, y=None, *, size=None, fill_value=None):
_check_arraylike("where", condition)
return nonzero(condition, size=size, fill_value=fill_value)
else:
+ _check_arraylike("where", condition, x, y)
if size is not None or fill_value is not None:
raise ValueError("size and fill_value arguments cannot be used in three-term where function.")
return _where(condition, x, y)
| Improve jnp.where error message about not handling pytree
jax.numpy.where doesn't seem to handle pytree?
```python
import jax.numpy as jnp
jnp.where(True, {'a':1}, {'b':2})
```
gives an uninformative error message
```bash
>>> jnp.where(True, {'a':1}, {'b':2})
Traceback (most recent call last):
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 1248, in _len
return self.shape[0]
IndexError: tuple index out of range
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/numpy/lax_numpy.py", line 2152, in where
return _where(condition, x, y)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/traceback_util.py", line 165, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/api.py", line 429, in cache_miss
out_flat = xla.xla_call(
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 1675, in bind
return call_bind(self, fun, *args, **params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 1687, in call_bind
outs = top_trace.process_call(primitive, fun, tracers, params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 596, in process_call
return primitive.impl(f, *tracers, **params)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dispatch.py", line 142, in _xla_call_impl
compiled_fun = _xla_callable(fun, device, backend, name, donated_invars,
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/linear_util.py", line 272, in memoized_fun
ans = call(fun, *args)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dispatch.py", line 169, in _xla_callable_uncached
return lower_xla_callable(fun, device, backend, name, donated_invars,
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dispatch.py", line 197, in lower_xla_callable
jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/interpreters/partial_eval.py", line 1643, in trace_to_jaxpr_final
jaxpr, out_avals, consts = trace_to_subjaxpr_dynamic(fun, main, in_avals)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/interpreters/partial_eval.py", line 1614, in trace_to_subjaxpr_dynamic
ans = fun.call_wrapped(*in_tracers_)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/linear_util.py", line 166, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/numpy/lax_numpy.py", line 2129, in _where
x, y = _promote_dtypes(x, y)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/numpy/lax_numpy.py", line 514, in _promote_dtypes
to_dtype, weak_type = dtypes._lattice_result_type(*args)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dtypes.py", line 362, in _lattice_result_type
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dtypes.py", line 362, in <genexpr>
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dtypes.py", line 251, in _dtype_and_weaktype
return dtype(value), any(value is typ for typ in _weak_types) or is_weakly_typed(value)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/dtypes.py", line 355, in dtype
dt = np.result_type(x)
File "<__array_function__ internals>", line 5, in result_type
File "/usr/local/google/home/zhangqiaorjc/.pyenv/versions/3.9.4/lib/python3.9/site-packages/numpy/core/_internal.py", line 61, in _usefields
names, formats, offsets, titles = _makenames_list(adict, align)
File "/usr/local/google/home/zhangqiaorjc/.pyenv/versions/3.9.4/lib/python3.9/site-packages/numpy/core/_internal.py", line 29, in _makenames_list
n = len(obj)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 464, in __len__
return self.aval._len(self)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/core.py", line 1250, in _len
raise TypeError("len() of unsized object") from err # same as numpy error
jax._src.traceback_util.UnfilteredStackTrace: TypeError: len() of unsized object
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/numpy/lax_numpy.py", line 2152, in where
return _where(condition, x, y)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/numpy/lax_numpy.py", line 2129, in _where
x, y = _promote_dtypes(x, y)
File "/usr/local/google/home/zhangqiaorjc/repos/jax/jax/_src/numpy/lax_numpy.py", line 514, in _promote_dtypes
to_dtype, weak_type = dtypes._lattice_result_type(*args)
File "<__array_function__ internals>", line 5, in result_type
File "/usr/local/google/home/zhangqiaorjc/.pyenv/versions/3.9.4/lib/python3.9/site-packages/numpy/core/_internal.py", line 61, in _usefields
names, formats, offsets, titles = _makenames_list(adict, align)
File "/usr/local/google/home/zhangqiaorjc/.pyenv/versions/3.9.4/lib/python3.9/site-packages/numpy/core/_internal.py", line 29, in _makenames_list
n = len(obj)
TypeError: len() of unsized object
```
| @mattjj this should be the repro of what I saw earlier today
It's not clear to me that `jnp.where` should be able to handle pytrees β after all, its API is modeled after `np.where`, which does not support non-array inputs, and in general no other `jax.numpy` function that I'm aware of is written to support pytree inputs. This looks to me like a specific case of the idea discussed in #1012 and #8504, so perhaps the best approach would be to do this with the package that came out of those discussions: https://github.com/google/tree-math/
We could certainly improve the error message, though, probably by calling `_check_arraylike` on the inputs to `jnp.where`.
Thanks Jake! Yeah, I ended up using jax.tree_map. The error message could perhaps be clearer about jnp.where not handling pytree as you suggested. | 2022-01-27T19:22:47 |
|
google/jax | 9,391 | google__jax-9391 | [
"9380"
] | 0382a6a04eddd7506a4ef6bb0c93f0f660ee3df6 | diff --git a/jax/interpreters/mlir.py b/jax/interpreters/mlir.py
--- a/jax/interpreters/mlir.py
+++ b/jax/interpreters/mlir.py
@@ -241,9 +241,10 @@ def _ndarray_constant_handler(val: np.ndarray, canonicalize_types
register_constant_handler(np.ndarray, _ndarray_constant_handler)
for _scalar_type in [np.int8, np.int16, np.int32, np.int64,
- np.uint8, np.uint16, np.uint32, np.uint64,
- np.float16, np.float32, np.float64,
- np.bool_, np.longlong, dtypes.bfloat16]:
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.float16, np.float32, np.float64,
+ np.complex64, np.complex128,
+ np.bool_, np.longlong, dtypes.bfloat16]:
register_constant_handler(_scalar_type, _ndarray_constant_handler)
def _python_scalar_handler(dtype, val, canonicalize_dtypes):
| diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -3120,6 +3120,16 @@ def f(_):
expected = jnp.arange(1) + 1
self.assertAllClose(ans, expected)
+ @parameterized.named_parameters([
+ {"testcase_name": f"{dtype.__name__}", "dtype": dtype}
+ for dtype in jtu.dtypes.all])
+ def test_constant_handlers(self, dtype):
+ # https://github.com/google/jax/issues/9380
+ @jax.jit
+ def f():
+ return jnp.exp(dtype(0))
+ f() # doesn't error
+
def test_large_python_ints(self):
with self.assertRaises(OverflowError):
jnp.multiply(2 ** 100, 3.)
| Type specification/inferrence now fails in @jax.jit
Please:
- [x] Check for duplicate issues.
- [x] Provide a complete example of how to reproduce the bug, wrapped in triple backticks like this:
The following [test case](https://github.com/ska-sa/codex-africanus/blob/master/africanus/rime/jax/tests/test_jax_phase_delay.py) has worked for a while, but has recently started failing. It seems that there isn't a constant handler for `complex64` anymore? The following reproducer demonstrates the issue, but replacing `out_dtype.type(1j)` with `1j` fixes the problem.
This works on jax 0.2.26 and jaxlib 0.1.75, but fails on jax 0.2.27 and 0.1.76.
```python
import numpy as np
import jax
import jax.numpy as jnp
@jax.jit
def phase_delay(lm, uvw, frequency):
out_dtype = jnp.result_type(lm, uvw, frequency, np.complex64)
one = lm.dtype.type(1.0)
neg_two_pi_over_c = lm.dtype.type(-2*np.pi/3e8)
l = lm[:, 0, None, None] # noqa
m = lm[:, 1, None, None]
u = uvw[None, :, 0, None]
v = uvw[None, :, 1, None]
w = uvw[None, :, 2, None]
n = jnp.sqrt(one - l**2 - m**2) - one
real_phase = (neg_two_pi_over_c *
(l * u + m * v + n * w) *
frequency[None, None, :])
# replacing out_dtype.type(1j) with 1j fixes this problem
return jnp.exp(out_dtype.type(1j)*real_phase)
if __name__ == "__main__":
uvw = np.random.random(size=(100, 3)).astype(np.float32)
lm = np.random.random(size=(10, 2)).astype(np.float32)*0.001
frequency = np.linspace(.856e9, .856e9*2, 64).astype(np.float32)
complex_phase = phase_delay(lm, uvw, frequency)
```
- [x] #9390
```
$ python test_complex_constant_fail.py
WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
Traceback (most recent call last):
File "test_complex_constant_fail.py", line 32, in <module>
complex_phase = phase_delay(lm, uvw, frequency)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/traceback_util.py", line 165, in reraise_with_filtered_traceback
return fun(*args, **kwargs)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/api.py", line 429, in cache_miss
donated_invars=donated_invars, inline=inline)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/core.py", line 1671, in bind
return call_bind(self, fun, *args, **params)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/core.py", line 1683, in call_bind
outs = top_trace.process_call(primitive, fun, tracers, params)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/core.py", line 596, in process_call
return primitive.impl(f, *tracers, **params)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/dispatch.py", line 143, in _xla_call_impl
*unsafe_map(arg_spec, args))
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/linear_util.py", line 272, in memoized_fun
ans = call(fun, *args)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/dispatch.py", line 170, in _xla_callable_uncached
*arg_specs).compile().unsafe_call
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/profiler.py", line 206, in wrapper
return func(*args, **kwargs)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/dispatch.py", line 260, in lower_xla_callable
donated_invars)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/interpreters/mlir.py", line 403, in lower_jaxpr_to_module
input_output_aliases=input_output_aliases)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/interpreters/mlir.py", line 541, in lower_jaxpr_to_fun
*args)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/interpreters/mlir.py", line 606, in jaxpr_subcomp
in_nodes = map(read, eqn.invars)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/_src/util.py", line 44, in safe_map
return list(map(f, *args))
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/interpreters/mlir.py", line 583, in read
return ir_constants(v.val, canonicalize_types=True)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/interpreters/mlir.py", line 171, in ir_constants
raise TypeError("No constant handler for type: {}".format(type(val)))
jax._src.traceback_util.UnfilteredStackTrace: TypeError: No constant handler for type: <class 'numpy.complex64'>
The stack trace below excludes JAX-internal frames.
The preceding is the original exception that occurred, unmodified.
--------------------
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "test_complex_constant_fail.py", line 32, in <module>
complex_phase = phase_delay(lm, uvw, frequency)
File "/home/sperkins/venv/afr/lib/python3.7/site-packages/jax/interpreters/mlir.py", line 171, in ir_constants
raise TypeError("No constant handler for type: {}".format(type(val)))
TypeError: No constant handler for type: <class 'numpy.complex64'>
```
| Thanks for the report. I can reproduce with jaxlib 0.1.76; the jax version doesn't appear to matter.
Shorter repro:
```python
import numpy as np
import jax
import jax.numpy as jnp
@jax.jit
def f():
return jnp.exp(np.complex64(1j))
f()
```
I think the issue is that complex dtypes were left out of the list here: https://github.com/google/jax/blob/0382a6a04eddd7506a4ef6bb0c93f0f660ee3df6/jax/interpreters/mlir.py#L243-L247 | 2022-01-31T18:57:21 |
google/jax | 9,430 | google__jax-9430 | [
"9429"
] | 4432f473133f4bb150bf11e2ce5b8302549bdb0d | diff --git a/jax/_src/tree_util.py b/jax/_src/tree_util.py
--- a/jax/_src/tree_util.py
+++ b/jax/_src/tree_util.py
@@ -267,6 +267,24 @@ def tree_all(tree):
lambda s, values: collections.defaultdict(s[0], safe_zip(s[1], values))) # type: ignore[index]
+
+class _HashableCallableShim:
+ """Object that delegates __call__, __hash__, and __eq__ to another object."""
+ def __init__(self, fun):
+ self.fun = fun
+
+ def __call__(self, *args, **kw):
+ return self.fun(*args, **kw)
+
+ def __hash__(self):
+ return hash(self.fun)
+
+ def __eq__(self, other):
+ if isinstance(other, _HashableCallableShim):
+ return self.fun == other.fun
+ return self.fun == other
+
+
class Partial(functools.partial):
"""A version of functools.partial that works in pytrees.
@@ -318,16 +336,19 @@ class Partial(functools.partial):
def __new__(klass, func, *args, **kw):
# In Python 3.10+, if func is itself a functools.partial instance,
# functools.partial.__new__ would merge the arguments of this Partial
- # instance with the arguments of the func. We box func in another lambda to
- # avoid this optimization since it would change which arguments are
- # considered part of the pytree.
+ # instance with the arguments of the func. We box func in a class that does
+ # not (yet) have a `func` attribute to defeat this optimization, since we
+ # care exactly which arguments are considered part of the pytree.
if isinstance(func, functools.partial):
original_func = func
- func = lambda *args, **kw: original_func(*args, **kw)
+ func = _HashableCallableShim(original_func)
+ out = super(Partial, klass).__new__(klass, func, *args, **kw)
func.func = original_func.func
func.args = original_func.args
func.keywords = original_func.keywords
- return super(Partial, klass).__new__(klass, func, *args, **kw)
+ return out
+ else:
+ return super(Partial, klass).__new__(klass, func, *args, **kw)
register_pytree_node(
| diff --git a/tests/tree_util_test.py b/tests/tree_util_test.py
--- a/tests/tree_util_test.py
+++ b/tests/tree_util_test.py
@@ -200,6 +200,16 @@ def f(a, b, c): pass
h = tree_util.Partial(g, 3)
self.assertEqual(h.args, (3,))
+ def testPartialFuncAttributeHasStableHash(self):
+ # https://github.com/google/jax/issues/9429
+ fun = functools.partial(print, 1)
+ p1 = tree_util.Partial(fun, 2)
+ p2 = tree_util.Partial(fun, 2)
+ self.assertEqual(fun, p1.func)
+ self.assertEqual(p1.func, fun)
+ self.assertEqual(p1.func, p2.func)
+ self.assertEqual(hash(p1.func), hash(p2.func))
+
@parameterized.parameters(*(TREES + LEAVES))
def testRoundtripViaBuild(self, inputs):
xs, tree = _process_pytree(tuple, inputs)
| jax.tree_util.Partial is not hash-stable in jax>=0.2.22
Following #8101 , `jax.tree_util.Partial.func` is no more hash-stable:
jax==0.2.21
```python
>>> import jax
>>> from jax.tree_util import Partial
>>> from functools import partial
>>> fun = partial(print, 1)
>>> hash(fun)
278539187
>>> hash(Partial(fun, 2).func)
278539187
>>> hash(Partial(fun, 2).func)
278539187
```
latest release
```python
>>> from jax.tree_util import Partial
>>> from functools import partial
>>> fun = partial(print, 1)
>>> hash(fun)
8749850128694
>>> hash(Partial(fun, 2).func)
8750211514691
>>> hash(Partial(fun, 2).func)
8750211514664
# instead, if the function wrapped is just a standard function, everything works fine
>>> hash(Partial(print, 1).func)
56249262
>>> hash(Partial(print, 1).func)
56249262
```
This is annoying because in some code in NetKet, we were wrapping user-passed functions into a `tree_util.Partial` in order to simplify some code, but now this triggers recompilation if the user's supplied function is wrapped in a partial or not.
cc @hawkinsp who committed the changes
| 2022-02-03T15:35:35 |
|
google/jax | 9,432 | google__jax-9432 | [
"9431"
] | d04dce3fa2ae85ce5ff43710f0756202ad5fb945 | diff --git a/jax/experimental/sparse/bcoo.py b/jax/experimental/sparse/bcoo.py
--- a/jax/experimental/sparse/bcoo.py
+++ b/jax/experimental/sparse/bcoo.py
@@ -426,10 +426,14 @@ def _bcoo_extract_batching_rule(batched_args, batch_dims):
bdim = batch_dims[1]
indices = lax.expand_dims(indices, (bdim,))
elif batch_dims[1] is None:
+ # TODO(jakevdp) can we handle this case without explicit broadcasting?
bdim = batch_dims[0]
- mat = lax.expand_dims(mat, (bdim,))
+ result_shape = list(mat.shape)
+ result_shape.insert(bdim, indices.shape[bdim])
+ mat = lax.broadcast_in_dim(mat, result_shape, (bdim,))
else:
- assert batch_dims[0] == batch_dims[1]
+ if batch_dims[0] != batch_dims[1]:
+ raise NotImplementedError("bcoo_extract with unequal batch dimensions.")
bdim = batch_dims[0]
n_batch = indices.ndim - 2
if bdim >= n_batch:
| diff --git a/tests/sparse_test.py b/tests/sparse_test.py
--- a/tests/sparse_test.py
+++ b/tests/sparse_test.py
@@ -693,6 +693,26 @@ def test_bcoo_extract(self, shape, dtype, n_batch, n_dense):
data3 = jit(sparse.bcoo_extract)(indices, M)
self.assertArraysEqual(data, data3)
+ def test_bcoo_extract_batching(self):
+ # https://github.com/google/jax/issues/9431
+ indices = jnp.zeros((4, 1, 1), dtype=int)
+ mat = jnp.arange(4.).reshape((4, 1))
+
+ # in_axes = (0, None)
+ expected = jnp.vstack([sparse.bcoo_extract(i, mat[0]) for i in indices])
+ actual = vmap(sparse.bcoo_extract, in_axes=(0, None))(indices, mat[0])
+ self.assertArraysEqual(expected, actual)
+
+ # in_axes = (None, 0)
+ expected = jnp.vstack([sparse.bcoo_extract(indices[0], m) for m in mat])
+ actual = vmap(sparse.bcoo_extract, in_axes=(None, 0))(indices[0], mat)
+ self.assertArraysEqual(expected, actual)
+
+ # in_axes = (0, 0)
+ expected = jnp.vstack([sparse.bcoo_extract(i, m) for i, m in zip(indices, mat)])
+ actual = vmap(sparse.bcoo_extract, in_axes=0)(indices, mat)
+ self.assertArraysEqual(expected, actual)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_nbatch={}_ndense={}".format(
jtu.format_shape_dtype_string(shape, dtype), n_batch, n_dense),
| [sparse] bug in bcoo_extract batching rule
```python
from jax.experimental import sparse
from jax import vmap
import jax.numpy as jnp
indices = jnp.zeros((4, 1, 1), dtype=int)
v = jnp.ones(1)
# Manual mapping
print(jnp.concatenate([sparse.bcoo_extract(ind, v) for ind in indices]))
# [1. 1. 1. 1.]
# vmap
vmap(sparse.bcoo_extract, in_axes=(0, None))(indices, v)
# ValueError: indices batch dimensions not compatible for indices.shape=(4, 1, 1), shape=(1, 1)
```
| 2022-02-03T17:37:33 |
|
google/jax | 9,447 | google__jax-9447 | [
"9445"
] | 248572c3e8a296e835a7d759cbbc906f0033053b | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2260,6 +2260,8 @@ def broadcast_to(arr, shape):
def _split(op, ary, indices_or_sections, axis=0):
+ _check_arraylike(op, ary)
+ ary = asarray(ary)
axis = core.concrete_or_error(int, axis, f"in jax.numpy.{op} argument `axis`")
size = ary.shape[axis]
if isinstance(indices_or_sections, (tuple, list)):
@@ -2298,15 +2300,15 @@ def _split(op, ary, indices_or_sections, axis=0):
def split(ary, indices_or_sections, axis: int = 0):
return _split("split", ary, indices_or_sections, axis=axis)
-def _split_on_axis(np_fun, axis):
- @_wraps(np_fun, update_doc=False)
+def _split_on_axis(op, axis):
+ @_wraps(getattr(np, op), update_doc=False)
def f(ary, indices_or_sections):
- return split(ary, indices_or_sections, axis=axis)
+ return _split(op, ary, indices_or_sections, axis=axis)
return f
-vsplit = _split_on_axis(np.vsplit, axis=0)
-hsplit = _split_on_axis(np.hsplit, axis=1)
-dsplit = _split_on_axis(np.dsplit, axis=2)
+vsplit = _split_on_axis("vsplit", axis=0)
+hsplit = _split_on_axis("hsplit", axis=1)
+dsplit = _split_on_axis("dsplit", axis=2)
@_wraps(np.array_split)
def array_split(ary, indices_or_sections, axis: int = 0):
| Jax numpy array_split is slow compared to numpy
I was writing Jax code, substituting all of the places where I would normally use NumPy with jax.numpy
In using jax.numpy.array_split with a large input, I found that the program was incredibly slow (for jax+numpy or jax then the program didn't finish in 5 minutes for me)
While running there was no issue however if I ctrl+c to terminate then the follow error was thrown
`F external/org_tensorflow/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc:446] ptxas returned an error during compilation of ptx to sass: 'INTERNAL: ptxas exited with non-zero error code 2, output: ' If the error message indicates that a file could not be written, please verify that sufficient filesystem space is provided.
Aborted (core dumped)`
It seems that Jax is trying to jit/compile the array input which is slow due to the size, I just wanted to check if this was expected or not
If this is expected, could some documentation be added to note this problem on the relative functions
Example code
```
from tqdm import tqdm
import numpy as onp
import jax.numpy as jnp
print('numpy')
a = [batch for batch in tqdm(onp.array_split(onp.arange(10_000), 1_000))]
b = [batch for batch in tqdm(onp.array_split(onp.arange(100_000), 10_000))]
print('jax + numpy')
c = [batch for batch in tqdm(jnp.array_split(onp.arange(100_000), 10_000))]
d = [batch for batch in tqdm(jnp.array_split(onp.arange(100_000), 10_000))]
print('jax')
e = [batch for batch in tqdm(jnp.array_split(jnp.arange(100_000), 10_000))]
f = [batch for batch in tqdm(jnp.array_split(jnp.arange(100_000), 10_000))]
```
Thanks for any help
| Thanks for the report β I think the issue is that `jnp.array_split` fails to push the numpy input to device before processing it, meaning that the splitting is effectively done on client side and then each output is pushed to device individually, incurring that overhead thousands of times instead of one time.
I'll work on a fix.
@jakevdp It's also possible that that code would be faster if it used `dynamic_slice` rather than `slice`, especially when not under a `jit`. Otherwise we need to compile one "slice" operator for each index. In a `jit` it will make no difference.
Good point - my first PR gives an order of magnitude improvement by calling device_put, so I'll get that in first and then take a look at `dynamic_slice`. | 2022-02-04T16:30:35 |
|
google/jax | 9,461 | google__jax-9461 | [
"9277"
] | fbda1a650f7b53ea424ee02527d9943389d5a3ae | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6406,13 +6406,31 @@ def _quantile(a, q, axis, interpolation, keepdims, squash_nans):
raise ValueError("interpolation can only be 'linear', 'lower', 'higher', "
"'midpoint', or 'nearest'")
a, q = _promote_dtypes_inexact(a, q)
+ keepdim = []
if issubdtype(a.dtype, np.complexfloating):
raise ValueError("quantile does not support complex input, as the operation is poorly defined.")
if axis is None:
a = ravel(a)
axis = 0
elif isinstance(axis, tuple):
- raise NotImplementedError("Tuple values for axis are not implemented")
+ keepdim = list(shape(a))
+ nd = ndim(a)
+ axis = tuple([_canonicalize_axis(ax, nd) for ax in axis])
+ if len(set(axis)) != len(axis):
+ raise ValueError('repeated axis')
+ for ax in axis:
+ keepdim[ax] = 1
+
+ keep = set(range(nd)) - set(axis)
+ # prepare permutation
+ dimensions = list(range(nd))
+ for i, s in enumerate(sorted(keep)):
+ dimensions[i], dimensions[s] = dimensions[s], dimensions[i]
+ do_not_touch_shape = tuple(x for idx,x in enumerate(shape(a)) if idx not in axis)
+ touch_shape = tuple(x for idx,x in enumerate(shape(a)) if idx in axis)
+ a = lax.reshape(a, do_not_touch_shape + (int(np.prod(touch_shape)),), dimensions)
+ keepdim = tuple(keepdim)
+ axis = _canonicalize_axis(-1, ndim(a))
else:
axis = _canonicalize_axis(axis, ndim(a))
@@ -6499,7 +6517,10 @@ def _quantile(a, q, axis, interpolation, keepdims, squash_nans):
result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))
else:
raise ValueError(f"interpolation={interpolation!r} not recognized")
-
+ if keepdims and keepdim:
+ if q_ndim > 0:
+ keepdim = (shape(q)[0],) + keepdim
+ result = reshape(result, keepdim)
return lax.convert_element_type(result, a.dtype)
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -4595,10 +4595,14 @@ def args_maker(): return []
for a_shape, axis in (
((7,), None),
((47, 7), 0),
+ ((47, 7), ()),
((4, 101), 1),
+ ((4, 47, 7), (1, 2)),
+ ((4, 47, 7), (0, 2)),
+ ((4, 47, 7), (1, 0, 2)),
)
for q_dtype in [np.float32]
- for q_shape in scalar_shapes + [(4,)]
+ for q_shape in scalar_shapes + [(1,), (4,)]
for keepdims in [False, True]
for method in ['linear', 'lower', 'higher', 'nearest', 'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
| jax.numpy.nanpercentile with axis as tuple
currently, the following code results in a NotImplementedError:
```
jax.numpy.nanpercentile(jnp.ones((3,3,12,24)), 99.9, axis=(0,1,2))
```
```
File "/home/clemens/.local/lib/python3.8/site-packages/jax/_src/numpy/lax_numpy.py", line 6359, in _quantile
raise NotImplementedError("Tuple values for axis are not implemented")
```
However the docstring actually explicitly mentions tuple as a valid option for axis: https://jax.readthedocs.io/en/latest/_autosummary/jax.numpy.nanpercentile.html
same for jax.numpy.percentile.html
| Thanks for the report βΒ this feature hasn't been implemented yet. Regarding the docstring, the bulk of the content is copied directly from the corresponding numpy function (it's easy to miss, but at the top you'll see something like "original docstring below") because of that, the details are not always accurate, particularly when corner cases are explicitly unimplemented.
We can leave this issue open to track implementing this feature.
I'd like to give this one a try. Might take some time though as I'm quite new to Jax. If that's no problem, feel free to assign it to me! | 2022-02-06T16:50:52 |
google/jax | 9,471 | google__jax-9471 | [
"9462"
] | 287c476eec9e74cfde5d420ba738031c8f42f487 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -424,6 +424,7 @@ def _make_scalar_type(np_scalar_type):
float_ = float32 if dtypes.float_ == np.float32 else float64
complex_ = complex64 if dtypes.complex_ == np.complex64 else complex128
+generic = np.generic
number = np.number
inexact = np.inexact
complexfloating = np.complexfloating
diff --git a/jax/numpy/__init__.py b/jax/numpy/__init__.py
--- a/jax/numpy/__init__.py
+++ b/jax/numpy/__init__.py
@@ -160,6 +160,7 @@
full as full,
full_like as full_like,
gcd as gcd,
+ generic as generic,
geomspace as geomspace,
get_printoptions as get_printoptions,
gradient as gradient,
| Various type inconsistencies
Please consider adding `jax.numpy.generic` as an alias for `numpy.generic` (it seems to be missing). Also,
```
In [5]: jnp.issubdtype(jnp.bfloat16, jnp.inexact)
Out[5]: True
In [6]: issubclass(jnp.bfloat16, jnp.inexact)
Out[6]: False
In [7]: issubclass(np.float32, np.inexact)
Out[7]: True
```
Line 6 should probably return true, which means somehow registering the types like numpy does.
| I'm not sure that would be correct; for example:
```python
import numpy as np
print(type(np.float16(0)))
# numpy.float16
import jax.numpy as jnp
type(jnp.float16(0))
# jaxlib.xla_extension.DeviceArray
```
JAX does not return an instance of a `float16` scalar in this case, but rather returns a zero-dimensional JAX array. THis choice to unify arrays and scalars in JAX is a deliberate one: we have specifically decided to *not* honor the numpy Python type hierarchy in the case of scalar-like instantiations, so enforcing that `jnp.float16` is a subclass of `jnp.inexact` would be misleading.
What do you think?
That's a great point. How am I supposed to say that `T` is one of the inexact types? In Numpy, I say: `T: type[np.inexact]`. The corresponding thing doesn't work in Jax.
I think you're looking for `jnp.issubdtype(t, jnp.inexact)`. This should handle `bfloat16` as well as the standard numpy types.
Yes, I know how to check it programmatically. I'm asking how I should annotate it.
Sorry, I just noticed that your question is more a question of type annotation than in-program type checking (that wasn't clear in your initial question).
Setting JAX aside, I don't think using `T: type[np.inexact]` is the best approach, becuase (for example) `np.dtype('float64')` will fail this.
Instead, you should probably use the new [`DtypeLike`](https://numpy.org/devdocs/reference/typing.html#dtypelike) typedef in newer numpy versions. Unfortunately, this doesn't give any mechanism to limit inputs to inexact types, but I think that Python type checking simply doesn't have the granularity to do that correctly.
It's one of the reasons that JAX internally aliases so many types to `Any`: Python type declaration is just generally not expressive enough to work properly with array computing code, except at a very superficial level.
> Python type declaration is just generally not expressive enough to work properly with array computing code, except at a very superficial level.
I agree with this. I'm just trying to do the best I can.
Anyway, for this issue, you've convinced me. I guess all that's left is adding `jnp.generic`?
> Anyway, for this issue, you've convinced me. I guess all that's left is adding jnp.generic?
Sure β I'll take care of that
https://github.com/google/jax/pull/9471 | 2022-02-07T17:26:27 |
|
google/jax | 9,493 | google__jax-9493 | [
"8996"
] | 82d8261308920af09c7de6d0c8cd0c8161140ac0 | diff --git a/jax/_src/tree_util.py b/jax/_src/tree_util.py
--- a/jax/_src/tree_util.py
+++ b/jax/_src/tree_util.py
@@ -13,11 +13,13 @@
# limitations under the License.
import collections
+import difflib
import functools
from functools import partial
import operator as op
from typing import (Any, Callable, Hashable, Iterable, Optional, Tuple, List,
Dict, Type, TypeVar, overload, TYPE_CHECKING, NamedTuple)
+import textwrap
from jax._src.lib import pytree
@@ -380,9 +382,10 @@ def flatten_one_level(pytree: Any) -> Tuple[List[Any], Hashable]:
else:
raise ValueError(f"can't tree-flatten type: {type(pytree)}")
-def prefix_errors(prefix_tree: Any, full_tree: Any
+def prefix_errors(prefix_tree: Any, full_tree: Any,
+ is_leaf: Optional[Callable[[Any], bool]] = None,
) -> List[Callable[[str], ValueError]]:
- return list(_prefix_error(KeyPath(()), prefix_tree, full_tree))
+ return list(_prefix_error(KeyPath(()), prefix_tree, full_tree, is_leaf))
class KeyPathEntry(NamedTuple):
key: Any
@@ -437,38 +440,59 @@ def register_keypaths(ty: Type, handler: Callable[[Any], List[KeyPathEntry]]
register_keypaths(dict,
lambda dct: [GetitemKeyPathEntry(k) for k in sorted(dct)])
-def _prefix_error(key_path: KeyPath, prefix_tree: Any, full_tree: Any
+def _prefix_error(key_path: KeyPath, prefix_tree: Any, full_tree: Any,
+ is_leaf: Optional[Callable[[Any], bool]] = None,
) -> Iterable[Callable[[str], ValueError]]:
# A leaf is a valid prefix of any tree:
- if treedef_is_leaf(tree_structure(prefix_tree)): return
+ if treedef_is_leaf(tree_structure(prefix_tree, is_leaf=is_leaf)): return
# The subtrees may disagree because their roots are of different types:
if type(prefix_tree) != type(full_tree):
yield lambda name: ValueError(
- "pytree structure error: different types "
- f"at {{name}}{key_path.pprint()}: "
- f"prefix pytree {{name}} has type {type(prefix_tree)} "
- f"where full pytree has type {type(full_tree)}.".format(name=name))
+ "pytree structure error: different types at key path\n"
+ f" {{name}}{key_path.pprint()}\n"
+ f"At that key path, the prefix pytree {{name}} has a subtree of type\n"
+ f" {type(prefix_tree)}\n"
+ f"but at the same key path the full pytree has a subtree of different type\n"
+ f" {type(full_tree)}.".format(name=name))
return # don't look for more errors in this subtree
- # Or they may disagree if their roots have different numbers of children:
+ # Or they may disagree if their roots have different numbers of children (note
+ # that because both prefix_tree and full_tree have the same type at this
+ # point, and because prefix_tree is not a leaf, each can be flattened once):
prefix_tree_children, prefix_tree_meta = flatten_one_level(prefix_tree)
full_tree_children, full_tree_meta = flatten_one_level(full_tree)
if len(prefix_tree_children) != len(full_tree_children):
yield lambda name: ValueError(
- "pytree structure error: different numbers of pytree children "
- f"at {{name}}{key_path.pprint()}: "
- f"prefix pytree {{name}} has {len(prefix_tree_children)} children where "
- f"full pytree has {len(full_tree_children)} children.".format(name=name))
+ "pytree structure error: different numbers of pytree children at key path\n"
+ f" {{name}}{key_path.pprint()}\n"
+ f"At that key path, the prefix pytree {{name}} has a subtree of type\n"
+ f" {type(prefix_tree)}\n"
+ f"with {len(prefix_tree_children)} children, "
+ f"but at the same key path the full pytree has a subtree of the same "
+ f"type but with {len(full_tree_children)} children.".format(name=name))
return # don't look for more errors in this subtree
# Or they may disagree if their roots have different pytree metadata:
if prefix_tree_meta != full_tree_meta:
+ prefix_tree_meta_str = str(prefix_tree_meta)
+ full_tree_meta_str = str(full_tree_meta)
+ metadata_diff = textwrap.indent(
+ '\n'.join(difflib.ndiff(prefix_tree_meta_str.splitlines(),
+ full_tree_meta_str.splitlines())),
+ prefix=" ")
yield lambda name: ValueError(
- "pytree structure error: different pytree metadata "
- f"at {{name}}{key_path.pprint()}: "
- f"prefix pytree {{name}} has metadata {prefix_tree_meta} where "
- f"full pytree has metadata {full_tree_meta}.".format(name=name))
+ "pytree structure error: different pytree metadata at key path\n"
+ f" {{name}}{key_path.pprint()}\n"
+ f"At that key path, the prefix pytree {{name}} has a subtree of type\n"
+ f" {type(prefix_tree)}\n"
+ f"with metadata\n"
+ f" {prefix_tree_meta_str}\n"
+ f"but at the same key path the full pytree has a subtree of the same "
+ f"type but with metadata\n"
+ f" {full_tree_meta_str}\n"
+ f"so the diff in the metadata at these pytree nodes is\n"
+ f"{metadata_diff}".format(name=name))
return # don't look for more errors in this subtree
# If the root types and numbers of children agree, there must be an error
diff --git a/jax/experimental/pjit.py b/jax/experimental/pjit.py
--- a/jax/experimental/pjit.py
+++ b/jax/experimental/pjit.py
@@ -40,7 +40,9 @@
from jax.interpreters import partial_eval as pe
from jax.interpreters.sharded_jit import PartitionSpec
from jax._src.lib import xla_client as xc
-from jax.tree_util import tree_map, tree_flatten, tree_unflatten
+from jax.tree_util import (tree_map, tree_flatten, tree_unflatten,
+ treedef_is_leaf, tree_structure)
+from jax._src.tree_util import prefix_errors
from jax._src.util import (extend_name_stack, HashableFunction, safe_zip,
wrap_name, wraps, distributed_debug_log,
split_list, cache, tuple_insert)
@@ -293,11 +295,60 @@ def flatten_axis_resources(what, tree, axis_resources, tupled_args):
try:
return tuple(flatten_axes(what, tree, axis_resources, tupled_args=tupled_args))
except ValueError:
- pass
+ pass # Raise a tree prefix error below
+
+ # Tree leaves are always valid prefixes, so if there was a prefix error as
+ # assumed here, axis_resources must not be a leaf.
+ assert not treedef_is_leaf(tree_structure(axis_resources))
+
+ # Check the type directly rather than using isinstance because of namedtuples.
+ if tupled_args and (type(axis_resources) is not tuple or
+ len(axis_resources) != len(tree.children())):
+ # We know axis_resources is meant to be a tuple corresponding to the args
+ # tuple, but while it is a non-leaf pytree, either it wasn't a tuple or it
+ # wasn't the right length.
+ msg = (f"{what} specification must be a tree prefix of the positional "
+ f"arguments tuple passed to the `pjit`-decorated function. In "
+ f"particular, {what} must either be a None, a PartitionSpec, or "
+ f"a tuple of length equal to the number of positional arguments.")
+ # If `tree` represents an args tuple, then `axis_resources` must be a tuple.
+ # TODO(mattjj,apaszke): disable implicit list casts, remove 'or list' below
+ if type(axis_resources) is not tuple:
+ msg += f" But {what} is not a tuple: got {type(axis_resources)} instead."
+ elif len(axis_resources) != len(tree.children()):
+ msg += (f" But {what} is the wrong length: got a tuple or list of length "
+ f"{len(axis_resources)} for an args tuple of length "
+ f"{len(tree.children())}.")
+
+ # As an extra hint, let's check if the user just forgot to wrap
+ # in_axis_resources in a singleton tuple.
+ if len(tree.children()) == 1:
+ try: flatten_axes(what, tree, (axis_resources,))
+ except ValueError: pass # That's not the issue.
+ else:
+ msg += (f" Given the corresponding argument being "
+ f"passed, it looks like {what} might need to be wrapped in "
+ f"a singleton tuple.")
+
+ raise ValueError(msg)
+
# Replace axis_resources with unparsed versions to avoid revealing internal details
- flatten_axes(what, tree, tree_map(lambda parsed: parsed.user_spec, axis_resources),
- tupled_args=tupled_args)
- raise AssertionError("Please open a bug request!") # This should be unreachable
+ axis_tree = tree_map(lambda parsed: parsed.user_spec, axis_resources)
+
+ # Because ecause we only have the `tree` treedef and not the full pytree here,
+ # we construct a dummy tree to compare against. Revise this in callers?
+ dummy_tree = tree_unflatten(tree, [PytreeLeaf()] * tree.num_leaves)
+ errors = prefix_errors(axis_tree, dummy_tree)
+ if errors:
+ e = errors[0] # Only show information about the first disagreement found.
+ raise e(what)
+
+ # At this point we've failed to find a tree prefix error.
+ assert False, "Please open a bug report!" # This should be unreachable.
+
+class PytreeLeaf:
+ def __repr__(self): return "pytree leaf"
+
@lu.cache
def _pjit_jaxpr(fun, mesh, local_in_avals,
@@ -472,8 +523,7 @@ def __repr__(self):
def _prepare_axis_resources(axis_resources,
arg_name,
allow_unconstrained_dims=False):
- # PyTrees don't treat None values as leaves, so we explicitly need
- # to explicitly declare them as such
+ # PyTrees don't treat None values as leaves, so we use an is_leaf function.
entries, treedef = tree_flatten(axis_resources, is_leaf=lambda x: x is None)
what = f"{arg_name} leaf specifications"
entries = [
diff --git a/jax/tree_util.py b/jax/tree_util.py
--- a/jax/tree_util.py
+++ b/jax/tree_util.py
@@ -55,4 +55,7 @@
treedef_children as treedef_children,
treedef_is_leaf as treedef_is_leaf,
treedef_tuple as treedef_tuple,
+ register_keypaths as register_keypaths,
+ AttributeKeyPathEntry as AttributeKeyPathEntry,
+ GetitemKeyPathEntry as GetitemKeyPathEntry,
)
| diff --git a/tests/pjit_test.py b/tests/pjit_test.py
--- a/tests/pjit_test.py
+++ b/tests/pjit_test.py
@@ -1138,15 +1138,32 @@ def testEmptyMesh(self):
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
+
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
+
error = re.escape(
- r"pjit in_axis_resources specification must be a tree prefix of the "
- r"corresponding value, got specification (None, None, None) for value "
- r"tree PyTreeDef((*, *)). Note that pjit in_axis_resources that are "
- r"non-trivial pytrees should always be wrapped in a tuple representing "
- r"the argument list.")
+ "pjit in_axis_resources specification must be a tree prefix of the "
+ "positional arguments tuple passed to the `pjit`-decorated function. "
+ "In particular, pjit in_axis_resources must either be a None, a "
+ "PartitionSpec, or a tuple of length equal to the number of positional "
+ "arguments. But pjit in_axis_resources is the wrong length: got a "
+ "tuple or list of length 3 for an args tuple of length 2.")
with self.assertRaisesRegex(ValueError, error):
- pjit(lambda x, y: x, p, p)(x, x) # Error, but make sure we hint at tupling
+ pjit(lambda x, y: x, p, p)(x, x)
+
+ Foo = namedtuple('Foo', ['x'])
+ error = "in_axis_resources is not a tuple.*might need to be wrapped"
+ with self.assertRaisesRegex(ValueError, error):
+ pjit(lambda x: x, Foo(None), Foo(None))(Foo(x))
+
+ pjit(lambda x: x, (Foo(None),), Foo(None))(Foo(x)) # OK w/ singleton tuple
+
+ # TODO(apaszke,mattjj): Disable implicit list casts and enable this
+ # error = ("it looks like pjit in_axis_resources might need to be wrapped in "
+ # "a singleton tuple.")
+ # with self.assertRaisesRegex(ValueError, error):
+ # pjit(lambda x, y: x, p, p)([x, x, x])
+
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
@@ -1158,10 +1175,16 @@ def testAxisResourcesMismatch(self):
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
+
error = re.escape(
- r"pjit out_axis_resources specification must be a tree prefix of the "
- r"corresponding value, got specification [[None, None, None], None] for "
- r"value tree PyTreeDef([*, *, *]).")
+ "pytree structure error: different numbers of pytree children at "
+ "key path\n"
+ " pjit out_axis_resources tree root\n"
+ "At that key path, the prefix pytree pjit out_axis_resources has a "
+ "subtree of type\n"
+ " <class 'list'>\n"
+ "with 2 children, but at the same key path the full pytree has a "
+ "subtree of the same type but with 3 children.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
diff --git a/tests/tree_util_test.py b/tests/tree_util_test.py
--- a/tests/tree_util_test.py
+++ b/tests/tree_util_test.py
@@ -439,79 +439,91 @@ class TreePrefixErrorsTest(jtu.JaxTestCase):
def test_different_types(self):
e, = prefix_errors((1, 2), [1, 2])
- expected = "pytree structure error: different types at in_axes tree root"
+ expected = ("pytree structure error: different types at key path\n"
+ " in_axes tree root")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
def test_different_types_nested(self):
e, = prefix_errors(((1,), (2,)), ([3], (4,)))
- expected = r"pytree structure error: different types at in_axes\[0\]"
+ expected = ("pytree structure error: different types at key path\n"
+ r" in_axes\[0\]")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
def test_different_types_multiple(self):
e1, e2 = prefix_errors(((1,), (2,)), ([3], [4]))
- expected = r"pytree structure error: different types at in_axes\[0\]"
+ expected = ("pytree structure error: different types at key path\n"
+ r" in_axes\[0\]")
with self.assertRaisesRegex(ValueError, expected):
raise e1('in_axes')
- expected = r"pytree structure error: different types at in_axes\[1\]"
+ expected = ("pytree structure error: different types at key path\n"
+ r" in_axes\[1\]")
with self.assertRaisesRegex(ValueError, expected):
raise e2('in_axes')
def test_different_num_children(self):
e, = prefix_errors((1,), (2, 3))
expected = ("pytree structure error: different numbers of pytree children "
- "at in_axes tree root")
+ "at key path\n"
+ " in_axes tree root")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
def test_different_num_children_nested(self):
e, = prefix_errors([[1]], [[2, 3]])
expected = ("pytree structure error: different numbers of pytree children "
- r"at in_axes\[0\]")
+ "at key path\n"
+ r" in_axes\[0\]")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
def test_different_num_children_multiple(self):
e1, e2 = prefix_errors([[1], [2]], [[3, 4], [5, 6]])
expected = ("pytree structure error: different numbers of pytree children "
- r"at in_axes\[0\]")
+ "at key path\n"
+ r" in_axes\[0\]")
with self.assertRaisesRegex(ValueError, expected):
raise e1('in_axes')
expected = ("pytree structure error: different numbers of pytree children "
- r"at in_axes\[1\]")
+ "at key path\n"
+ r" in_axes\[1\]")
with self.assertRaisesRegex(ValueError, expected):
raise e2('in_axes')
def test_different_metadata(self):
e, = prefix_errors({1: 2}, {3: 4})
expected = ("pytree structure error: different pytree metadata "
- "at in_axes tree root")
+ "at key path\n"
+ " in_axes tree root")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
def test_different_metadata_nested(self):
e, = prefix_errors([{1: 2}], [{3: 4}])
expected = ("pytree structure error: different pytree metadata "
- r"at in_axes\[0\]")
+ "at key path\n"
+ r" in_axes\[0\]")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
def test_different_metadata_multiple(self):
e1, e2 = prefix_errors([{1: 2}, {3: 4}], [{3: 4}, {5: 6}])
expected = ("pytree structure error: different pytree metadata "
- r"at in_axes\[0\]")
+ "at key path\n"
+ r" in_axes\[0\]")
with self.assertRaisesRegex(ValueError, expected):
raise e1('in_axes')
expected = ("pytree structure error: different pytree metadata "
- r"at in_axes\[1\]")
+ "at key path\n"
+ r" in_axes\[1\]")
with self.assertRaisesRegex(ValueError, expected):
raise e2('in_axes')
def test_fallback_keypath(self):
e, = prefix_errors(Special(1, [2]), Special(3, 4))
- expected = ("pytree structure error: different types at "
- r"in_axes\[<flat index 1>\]")
+ expected = ("pytree structure error: different types at key path\n"
+ r" in_axes\[<flat index 1>\]")
with self.assertRaisesRegex(ValueError, expected):
raise e('in_axes')
| Better `pjit in_axis_resources specification must be a tree prefix of the corresponding value` error
When using pjit with large, complicated pytrees of arguments and partition specification, the runtime error `ValueError: pjit in_axis_resources specification must be a tree prefix of the corresponding value, got specification` is hard to work with, because it outputs the pspec pytree, and the value pytree (with asterisks) as huge single-line blobs.
An easy first step towards making these errors easier to work with would be to print out the pytrees like JSONs, with linebreaks and multiple indentation levels.
An more involved improvement would be to point out the difference between the two trees to the user in an easy-to-work-with way. For example, the smallest differing sub-pytrees could be printed, together with their locations in the main pytree (as list of keys/indices `(k_i)` that one needs to select in order, to traverse from the root down to the differing sub-pytree: `sub_pytree == root[k_1][...][k_n]`).
| 2022-02-08T20:54:46 |
|
google/jax | 9,539 | google__jax-9539 | [
"9538"
] | 2ae10ea7b8ccc1fdfcd29858d6bd8b4fea1f0bd6 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -6774,6 +6774,8 @@ def wrapped(*args, **kwargs):
def _defer_to_unrecognized_arg(binary_op):
# Ensure that other array types have the chance to override arithmetic.
def deferring_binary_op(self, other):
+ if hasattr(other, '__jax_array__'):
+ other = other.__jax_array__()
if not isinstance(other, _accepted_binop_types):
return NotImplemented
return binary_op(self, other)
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -3852,6 +3852,12 @@ def __array__(self, dtype=None):
ans = jnp.array(a)
self.assertEqual(ans, 3.)
+ def testJaxArrayOps(self):
+ class arraylike:
+ def __jax_array__(self):
+ return jnp.array(3.)
+ self.assertArraysEqual(arraylike() * jnp.arange(10), jnp.array(3.) * jnp.arange(10))
+
def testMemoryView(self):
self.assertAllClose(
jnp.array(bytearray(b'\x2a')),
| `DeviceArray.__mul__` etc. don't respect `__jax_array__`
```python
import jax.numpy as jnp
class M:
def __jax_array__(self):
return jnp.array([1, 1])
M() * jnp.array([1, 1])
```
produces `TypeError: unsupported operand type(s) for *: 'M' and 'DeviceArray'`
In contrast `jnp.multiply(M(), jnp.array([1, 1]))` handles this just fine. No error, works as expected.
Also, NumPy handles this fine:
```python
import numpy as np
class M:
def __array__(self):
return np.array([1, 1])
M() * np.array([1, 1]) # no error!
```
| Thanks for the report. I think the issue is we need to check for `__jax_array__` here: https://github.com/google/jax/blob/f02c6fcd728878f8a239de895eb67836fb2f9b46/jax/_src/numpy/lax_numpy.py#L6777
I can work on a fix later this afternoon if nobody else gets to it first. | 2022-02-11T20:45:16 |
google/jax | 9,564 | google__jax-9564 | [
"9548"
] | f229a703e7a23fcba9445510c336e8e602f18462 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -435,6 +435,11 @@ def choice(key: KeyArray,
axis: int = 0) -> jnp.ndarray:
"""Generates a random sample from a given array.
+ .. warning::
+ If ``p`` has fewer non-zero elements than the requested number of samples,
+ as specified in ``shape``, and ``replace=False``, the output of this
+ function is ill-defined. Please make sure to use appropriate inputs.
+
Args:
key: a PRNG key used as the random key.
a : array or int. If an ndarray, a random sample is generated from
| jax.random.choice has undefined behavior when sampling without replacement and using probabilities
Summary: if enough weights/probabilities are zero such that sampling w/o replacement leads to too few possibilities to sample, jax should raise an error.
Simple case:
```python
import jax
import jax.numpy as jnp
prob = jnp.zeros(10)
prob = prob.at[9].set(1)
jax.random.choice(jax.random.PRNGKey(5), jnp.arange(10), shape=(5,), replace=False, p=prob)
# DeviceArray([9, 0, 1, 2, 3], dtype=int32)
```
Numpy raises a
```
ValueError: Fewer non-zero entries in p than size
```
with equivalent code.
Fix:
https://github.com/google/jax/blob/main/jax/_src/random.py#L493
Here, do a check that the number of nonzero entries is >= desired number of entries.
| Sounds indeed like Jax should mimic the numpy behaviour, I can take a look.
Thanks for the report β the reason for this behavior is that JAX cannot raise warnings or errors that depend on the values within arrays. This is due to the nature of JAX's tracing and compilation. It's the same reason that, for example, we cannot raise errors for out-of-bound indexing.
Our best option here is to probably add a warning to the docstring, informing users that if there are not enough nonzero probabilities, the output is ill-defined. Are you interested in opening a pull request for that?
Interesting, sure I'll open new PR with a warning in the docstring. OK if I just close the other one?
> Interesting, sure I'll open new PR with a warning in the docstring. OK if I just close the other one?
Sure, sounds good.
So I've found that it is (mostly) possible to raise value-dependent errors under jit, by using `jax.experimental.host_callback`. For example see
https://github.com/patrick-kidger/diffrax/blob/2b4e4d863c15abc7143919bac7825090bbfe50be/diffrax/misc/errors.py#L15
for an implementation, and the comment on
#9457
for a way to get it (mostly) working on the GPU as well.
I find that this is really useful functionality. Maybe with some tidying-up this could find its way into `jax.experimental` somewhere?
Thanks for pointing that out β there's also work on value-dependent checks here: https://github.com/google/jax/blob/main/jax/experimental/checkify.py
Ah nice -- that's where #8468 ended up! Thanks for the link. Once the details of that have been worked out (being able to stage `checkify.check` + merging `checkify.checkify` into `jax.jit` (?)) then that looks like it'll be a much more elegant replacement. | 2022-02-14T20:09:21 |
|
google/jax | 9,658 | google__jax-9658 | [
"9657"
] | a65841f5dbd2bede960543cf773f175b4d77590b | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -70,6 +70,7 @@
'sphinx_autodoc_typehints',
'myst_nb',
"sphinx_remove_toctrees",
+ 'sphinx_copybutton',
'jax_extensions',
]
| [QoL] Add copy button in docs code snippets
Since I'm a bit lazy, I'd like to have a "copy to clipboard" button in jax docs to copy over code snippets instead of drag-select-copying it. Like this:

Dupicate Checks:
Nothing relevant comes up when searching for "copy button", "docs copy button" or even "button" for that matter.
| After a bit of digging, I added a "sphinx-copybutton" to the requirements and `config.py`, built anew (without executing notebooks). And it seems to work. | 2022-02-22T09:57:24 |
|
google/jax | 9,702 | google__jax-9702 | [
"9700"
] | d5a1c64d135ae8519c61e15a2f32a75d8de36ab3 | diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -648,7 +648,10 @@ def load(*args, **kwargs):
# numpy does not recognize bfloat16, so arrays are serialized as void16
if out.dtype == 'V2':
out = out.view(bfloat16)
- out = asarray(out)
+ try:
+ out = asarray(out)
+ except TypeError: # Unsupported dtype
+ pass
return out
### implementations of numpy functions in terms of lax
| diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -538,15 +538,19 @@ def testNotImplemented(self):
func()
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_{}".format(dtype), "dtype": dtype}
- for dtype in float_dtypes))
- def testLoad(self, dtype):
+ {"testcase_name": "_{}_allow_picke={}".format(dtype, allow_pickle),
+ "dtype": dtype, "allow_pickle": allow_pickle}
+ for dtype in float_dtypes + [object]
+ for allow_pickle in [True, False]))
+ def testLoad(self, dtype, allow_pickle):
+ if dtype == object and not allow_pickle:
+ self.skipTest("dtype=object requires allow_pickle=True")
rng = jtu.rand_default(self.rng())
arr = rng((10), dtype)
with io.BytesIO() as f:
jnp.save(f, arr)
f.seek(0)
- arr_out = jnp.load(f)
+ arr_out = jnp.load(f, allow_pickle=allow_pickle)
self.assertArraysEqual(arr, arr_out)
@parameterized.named_parameters(itertools.chain.from_iterable(
| jnp.load() crashes if save file contains object that is not array
Minimum repro:
```python
import jax.numpy as jnp
class TestClass:
pass
jnp.save("object.npy", TestClass())
print(jnp.load("object.npy", allow_pickle=True))
```
Output:
```
Traceback (most recent call last):
File ".../lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py", line 3632, in array
dtype = dtypes._lattice_result_type(*leaves)[0] if leaves else dtypes.float_
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 362, in _lattice_result_type
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 362, in <genexpr>
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 251, in _dtype_and_weaktype
return dtype(value), any(value is typ for typ in _weak_types) or is_weakly_typed(value)
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 357, in dtype
raise TypeError(f"Value '{x}' with dtype {dt} is not a valid JAX array "
TypeError: Value '<__main__.TestClass object at 0x7f8528b71160>' with dtype object is not a valid JAX array type. Only arrays of numeric types are supported by JAX.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "repro.py", line 9, in <module>
print(jnp.load("object.npy", allow_pickle=True))
File ".../lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py", line 652, in load
out = asarray(out)
File ".../lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py", line 3693, in asarray
return array(a, dtype=dtype, copy=False, order=order)
File ".../lib/python3.9/site-packages/jax/_src/numpy/lax_numpy.py", line 3637, in array
dtype = dtypes._lattice_result_type(*leaves)[0]
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 362, in _lattice_result_type
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 362, in <genexpr>
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 251, in _dtype_and_weaktype
return dtype(value), any(value is typ for typ in _weak_types) or is_weakly_typed(value)
File ".../lib/python3.9/site-packages/jax/_src/dtypes.py", line 357, in dtype
raise TypeError(f"Value '{x}' with dtype {dt} is not a valid JAX array "
TypeError: Value '<__main__.TestClass object at 0x7f8528b71160>' with dtype object is not a valid JAX array type. Only arrays of numeric types are supported by JAX.
```
jax: 0.3.1
jaxlib: 0.3.0
numpy: 1.21.2
Python: 3.9.7
Maybe I am doing something wrong, but in previous jax versions (e.g. jax-0.2.25) this used to work. `jnp.load` would return an array containing a single element, the Python object, which is what the numpy version does.
Potentially this commit is what changed the behaviour? 1137aa1
Let me know if you need any more information!
| 2022-02-25T17:28:04 |
|
google/jax | 9,721 | google__jax-9721 | [
"9719"
] | 92cb865b3cc464af9a0ba5b8c81dcb8adef38b46 | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -53,6 +53,9 @@
### utilities
+def _isnan(x):
+ return lax.ne(x, x)
+
def _check_prng_key(key):
# TODO(frostig): remove once we always enable_custom_prng
if type(key) is prng.PRNGKeyArray:
@@ -1096,7 +1099,7 @@ def _poisson(key, lam, shape, dtype):
# https://github.com/numpy/numpy/blob/v1.18.3/numpy/random/src/distributions/distributions.c#L574
# For lambda < 10, we use the Knuth algorithm; otherwise, we use transformed
# rejection sampling.
- use_knuth = lam < 10
+ use_knuth = _isnan(lam) | (lam < 10)
lam_knuth = lax.select(use_knuth, lam, lax.full_like(lam, 0.0))
# The acceptance probability for rejection sampling maxes out at 89% as
# Ξ» -> β, so pick some arbitrary large value.
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -791,6 +791,12 @@ def testPoissonZeros(self):
samples = random.poisson(key, lam, shape=(2, 20))
self.assertArraysEqual(samples[:, :10], jnp.zeros_like(samples[:, :10]))
+ def testPoissonCornerCases(self):
+ key = self.seed_prng(0)
+ lam = jnp.array([-1, 0, jnp.nan])
+ samples = random.poisson(key, lam, shape=(3,))
+ self.assertArraysEqual(samples, jnp.array([-1, 0, -1]))
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in jtu.dtypes.floating))
| jax.random.poisson hangs if lam is NaN
The following causes Python to hang:
```python
import jax
import numpy as np
key = jax.random.PRNGKey(0)
jax.random.poisson(key=key, lam=jnp.array([np.nan]), shape=(1,))
# hangs
```
My expectation is that when lam=np.nan, the output will be np.nan.
| Thanks for the report - it looks like this corner case leads to a very large (but not technically infinite) loop: https://github.com/google/jax/blob/main/jax/_src/random.py#L1104
We should be able to fix this pretty quickly.
The fix is in #9721. Note that we can't return NaN in this case, because the dtype of the returned array is integer. But the fix returns `-1`, similar to other invalid values of `lam`.
Wow, that was fast. Thanks! | 2022-02-28T19:40:02 |
google/jax | 9,853 | google__jax-9853 | [
"9837"
] | ee6749608a1588f1f458e0e8ad8c9ecc8942aa83 | diff --git a/jax/_src/lax/windowed_reductions.py b/jax/_src/lax/windowed_reductions.py
--- a/jax/_src/lax/windowed_reductions.py
+++ b/jax/_src/lax/windowed_reductions.py
@@ -278,8 +278,6 @@ def _generic_reduce_window_batch_rule(
operands, init_values = util.split_list(batched_args, [num_operands])
operand_bdims, init_value_bdims = util.split_list(batch_dims, [num_operands])
- operand, init = batched_args
- bdim, init_bdim = batch_dims
if any(init_bdim is not None for init_bdim in init_value_bdims):
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
| diff --git a/tests/lax_vmap_test.py b/tests/lax_vmap_test.py
--- a/tests/lax_vmap_test.py
+++ b/tests/lax_vmap_test.py
@@ -24,6 +24,7 @@
import numpy as np
import jax
+import jax.numpy as jnp
from jax import dtypes
from jax import lax
@@ -790,6 +791,37 @@ def testSort(self, shape, dimension, arity, bdims, is_stable):
# TODO Collapse
# TODO Scatter
+ # TODO(b/183233858): variadic reduce-window is not implemented on XLA:GPU
+ @jtu.skip_on_devices("gpu")
+ def test_variadic_reduce_window(self):
+ # https://github.com/google/jax/discussions/9818 and
+ # https://github.com/google/jax/issues/9837
+ def normpool(x):
+ norms = jnp.linalg.norm(x, axis=-1)
+ idxs = jnp.arange(x.shape[0])
+
+ def g(a, b):
+ an, ai = a
+ bn, bi = b
+ which = an >= bn
+ return (jnp.where(which, an, bn), jnp.where(which, ai, bi))
+
+ _, idxs = lax.reduce_window((norms, idxs), (-np.inf, -1), g,
+ window_dimensions=(2,), window_strides=(2,),
+ padding=((0, 0),))
+ return x[idxs]
+
+
+ inpt = jnp.array([
+ [1.0, 0.0, 1.0],
+ [2.0, 2.0, 0.0],
+ [3.0, 0.0, 1.0],
+ [0.0, 1.0, 1.0],
+ ])
+ output = jax.vmap(normpool)(inpt[None, ...]) # doesn't crash
+ expected = jnp.array([[[2.0, 2.0, 0.0], [3.0, 0.0, 1.0]]])
+ self.assertAllClose(output, expected, check_dtypes=False)
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| Cannot vmap variadic lax.reduce_window
### Discussed in https://github.com/google/jax/discussions/9818
@mattjj @hawkinsp @YouJiacheng
Minimal code to reproduce
```python
import jax
import jax.numpy as jnp
from jax import lax
def index_maxpool(x):
def g(a, b):
an, ai = a
bn, bi = b
which = an >= bn
return (jnp.where(which, an, bn), jnp.where(which, ai, bi))
_, idxs = lax.reduce_window(
(x, jnp.arange(x.shape[0])),
(-jnp.inf, -1),
g,
window_dimensions=(2,),
window_strides=(2,),
padding=((0, 0),),
)
return idxs
index_maxpool(jnp.ones((10,)))
jax.vmap(index_maxpool)(jnp.ones((10, 10)))
```
Error message
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_26237/2064479805.py in <module>
22
23 index_maxpool(jnp.ones((10,)))
---> 24 jax.vmap(index_maxpool)(jnp.ones((10, 10)))
[... skipping hidden 3 frame]
/tmp/ipykernel_26237/2064479805.py in index_maxpool(x)
11 return (jnp.where(which, an, bn), jnp.where(which, ai, bi))
12
---> 13 _, idxs = lax.reduce_window(
14 (x, jnp.arange(x.shape[0])),
15 (-jnp.inf, -1),
[... skipping hidden 4 frame]
~/base/lib/python3.9/site-packages/jax/_src/lax/windowed_reductions.py in _generic_reduce_window_batch_rule(batched_args, batch_dims, jaxpr, consts, window_dimensions, window_strides, padding, base_dilation, window_dilation)
279 operand_bdims, init_value_bdims = util.split_list(batch_dims, [num_operands])
280
--> 281 operand, init = batched_args
282 bdim, init_bdim = batch_dims
283 if any(init_bdim is not None for init_bdim in init_value_bdims):
ValueError: too many values to unpack (expected 2)
```
Note: there is as well an issue when running that code on GPU (see @YouJiacheng comment in the Discussion thread)
| This might be an easy one! I think Peter basically already solved this in his original commit, except [these two lines were accidentally left in](https://github.com/google/jax/commit/5415306257faede98f6c45f0ce67aeb1cd26b8ff#diff-9abc3334d4a9d008767f88a5df1b9d3fa2b176d5c168c6e47a7d3bcd2c844480R279-R280). That plus not adding tests for this case :P | 2022-03-11T19:38:00 |
google/jax | 9,889 | google__jax-9889 | [
"9888"
] | 4fba0e787f464cc3e68358154f88b1ac13453667 | diff --git a/jax/_src/ops/scatter.py b/jax/_src/ops/scatter.py
--- a/jax/_src/ops/scatter.py
+++ b/jax/_src/ops/scatter.py
@@ -115,7 +115,6 @@ def _scatter_impl(x, y, scatter_op, treedef, static_idx, dynamic_idx,
-
def _get_identity(op, dtype):
"""Get an appropriate identity for a given operation in a given dtype."""
if op is lax.scatter_add:
@@ -123,11 +122,15 @@ def _get_identity(op, dtype):
elif op is lax.scatter_mul:
return 1
elif op is lax.scatter_min:
- if jnp.issubdtype(dtype, jnp.integer):
+ if dtype == dtypes.bool_:
+ return True
+ elif jnp.issubdtype(dtype, jnp.integer):
return jnp.iinfo(dtype).max
return float('inf')
elif op is lax.scatter_max:
- if jnp.issubdtype(dtype, jnp.integer):
+ if dtype == dtypes.bool_:
+ return False
+ elif jnp.issubdtype(dtype, jnp.integer):
return jnp.iinfo(dtype).min
return -float('inf')
else:
| diff --git a/tests/lax_numpy_indexing_test.py b/tests/lax_numpy_indexing_test.py
--- a/tests/lax_numpy_indexing_test.py
+++ b/tests/lax_numpy_indexing_test.py
@@ -1229,6 +1229,49 @@ def fn(data, segment_ids):
self.assertAllClose(grad, np.array([0., 0.], np.float32))
+ @parameterized.named_parameters(itertools.chain.from_iterable(
+ jtu.cases_from_list({
+ "testcase_name": "_{}_{}_num_segments={}_bucket_size={}".format(
+ jtu.format_shape_dtype_string(shape, dtype),
+ reducer.__name__, num_segments, bucket_size),
+ "dtype": dtype, "shape": shape,
+ "reducer": reducer, "op": op, "identity": identity,
+ "num_segments": num_segments, "bucket_size": bucket_size}
+ for dtype in [np.bool_]
+ for shape in [(8,), (7, 4), (6, 4, 2)]
+ for bucket_size in [None, 2]
+ for num_segments in [None, 1, 3])
+ for reducer, op, identity in [
+ (ops.segment_min, np.minimum, True),
+ (ops.segment_max, np.maximum, False),
+ ]))
+ def testSegmentReduceBoolean(self, shape, dtype, reducer, op, identity, num_segments, bucket_size):
+ rng = jtu.rand_default(self.rng())
+ idx_rng = jtu.rand_int(self.rng(), low=-2, high=3)
+ args_maker = lambda: [rng(shape, dtype), idx_rng(shape[:1], jnp.int32)]
+
+ if np.issubdtype(dtype, np.integer):
+ if np.isposinf(identity):
+ identity = np.iinfo(dtype).max
+ elif np.isneginf(identity):
+ identity = np.iinfo(dtype).min
+
+ jnp_fun = lambda data, segment_ids: reducer(
+ data, segment_ids, num_segments=num_segments, bucket_size=bucket_size)
+
+ def np_fun(data, segment_ids):
+ size = num_segments if num_segments is not None else (segment_ids.max() + 1)
+ out = np.full((size,) + shape[1:], identity, dtype)
+ for i, val in zip(segment_ids, data):
+ if 0 <= i < size:
+ out[i] = op(out[i], val).astype(dtype)
+ return out
+
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
+ if num_segments is not None:
+ self._CompileAndCheck(jnp_fun, args_maker)
+
+
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list({
"testcase_name": "_{}_{}_num_segments={}_bucket_size={}".format(
| Unexpected behavior of `segment_max` with boolean arrays
## Problem
`segment_max` applied to boolean arrays seems to generally return `True` for all segments. This is in contrast to the behavior of `segment_min` as well as `min` and `max`.
## Example
```python
import jax.numpy as jnp
from jax.ops import segment_max
# max over boolean array
jnp.array([False, False]).max()
# DeviceArray(False, dtype=bool); As expected
# min over boolean array
jnp.array([True, False]).min()
# DeviceArray(False, dtype=bool); As expected
# segment_max over boolean array
segment_max(
jnp.array([True, False, False, False]),
jnp.array([0, 0, 1, 1])
)
# DeviceArray([ True, True], dtype=bool); Expected result was [True, False]
# segment_min over boolean array
segment_min(
jnp.array([True, True, False, False]),
jnp.array([0, 0, 1, 1])
)
# DeviceArray([ True, False], dtype=bool); As expected
```
## Package versions
jax: 0.3.0
jaxlib: 0.3.0
| Thanks for the report - I think it's an issue with the identity for this computation. I'll work on a fix. | 2022-03-15T16:10:54 |
google/jax | 9,890 | google__jax-9890 | [
"9885"
] | 98ad01679460ee886fbc1dd9f184d283343ce234 | diff --git a/jax/_src/lax/linalg.py b/jax/_src/lax/linalg.py
--- a/jax/_src/lax/linalg.py
+++ b/jax/_src/lax/linalg.py
@@ -1461,7 +1461,7 @@ def _tridiagonal_solve_jax(dl, d, du, b, **kw):
def tridiagonal_solve(dl, d, du, b):
r"""Computes the solution of a tridiagonal linear system.
- This function computes the solution of a tridiagonal linear system::
+ This function computes the solution of a tridiagonal linear system:
.. math::
A . X = B
| Add jax.scipy.linalg.eigh_tridiagonal to document
Just let users know this algorithm has been officially implemented.
I hope others can avoid wasting time like me:
> I check the document and do not find `eigh_tridiagonal`, thus I migrate tensorflow's implementation by myself. When I start to compare my code against `jax.scipy.linalg.eigh`, I suddenly find that there is `eigh_tridiagonal`!!(Thanks to vscode's auto-complete)ππππ
| 2022-03-15T16:38:42 |
||
google/jax | 9,906 | google__jax-9906 | [
"9896"
] | 1ffa285bd641a12385bf609b21bf72df1b13782e | diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -749,6 +749,7 @@ def beta(key: KeyArray,
shape = core.canonicalize_shape(shape)
return _beta(key, a, b, shape, dtype)
+
def _beta(key, a, b, shape, dtype):
if shape is None:
shape = lax.broadcast_shapes(np.shape(a), np.shape(b))
@@ -760,9 +761,13 @@ def _beta(key, a, b, shape, dtype):
key_a, key_b = _split(key)
a = jnp.broadcast_to(a, shape)
b = jnp.broadcast_to(b, shape)
- gamma_a = gamma(key_a, a, shape, dtype)
- gamma_b = gamma(key_b, b, shape, dtype)
- return gamma_a / (gamma_a + gamma_b)
+ log_gamma_a = loggamma(key_a, a, shape, dtype)
+ log_gamma_b = loggamma(key_b, b, shape, dtype)
+ # Compute gamma_a / (gamma_a + gamma_b) without losing precision.
+ log_max = lax.max(log_gamma_a, log_gamma_b)
+ gamma_a_scaled = jnp.exp(log_gamma_a - log_max)
+ gamma_b_scaled = jnp.exp(log_gamma_b - log_max)
+ return gamma_a_scaled / (gamma_a_scaled + gamma_b_scaled)
def cauchy(key: KeyArray,
@@ -840,8 +845,19 @@ def _dirichlet(key, alpha, shape, dtype):
_check_shape("dirichlet", shape, np.shape(alpha)[:-1])
alpha = lax.convert_element_type(alpha, dtype)
- gamma_samples = gamma(key, alpha, shape + np.shape(alpha)[-1:], dtype)
- return gamma_samples / jnp.sum(gamma_samples, axis=-1, keepdims=True)
+
+ # Compute gamma in log space, otherwise small alpha can lead to poor behavior.
+ log_gamma_samples = loggamma(key, alpha, shape + np.shape(alpha)[-1:], dtype)
+ return _softmax(log_gamma_samples, -1)
+
+
+def _softmax(x, axis):
+ """Utility to compute the softmax of x along a given axis."""
+ if not dtypes.issubdtype(x.dtype, np.floating):
+ raise TypeError(f"_softmax only accepts floating dtypes, got {x.dtype}")
+ x_max = jnp.max(x, axis, keepdims=True)
+ unnormalized = jnp.exp(x - lax.stop_gradient(x_max))
+ return unnormalized / unnormalized.sum(axis, keepdims=True)
def exponential(key: KeyArray,
@@ -875,7 +891,7 @@ def _exponential(key, shape, dtype):
return lax.neg(lax.log1p(lax.neg(u)))
-def _gamma_one(key: KeyArray, alpha):
+def _gamma_one(key: KeyArray, alpha, log_space):
# Ref: A simple method for generating gamma variables, George Marsaglia and Wai Wan Tsang
# The algorithm can also be founded in:
# https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma-distributed_random_variables
@@ -887,13 +903,20 @@ def _gamma_one(key: KeyArray, alpha):
squeeze_const = _lax_const(alpha, 0.0331)
dtype = lax.dtype(alpha)
- key, subkey = _split(key)
# for alpha < 1, we boost alpha to alpha + 1 and get a sample according to
- # Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)
- boost = lax.select(lax.ge(alpha, one),
- one,
- lax.pow(uniform(subkey, (), dtype=dtype), lax.div(one, alpha)))
- alpha = lax.select(lax.ge(alpha, one), alpha, lax.add(alpha, one))
+ # Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)
+ # When alpha is very small, this boost can be problematic because it may result
+ # in floating point underflow; for this reason we compute it in log space if
+ # specified by the `log_space` argument:
+ # log[Gamma(alpha)] ~ log[Gamma(alpha + 1)] + log[Uniform()] / alpha
+ # Note that log[Uniform()] ~ Exponential(), but the exponential() function is
+ # computed via log[1 - Uniform()] to avoid taking log(0). We want the generated
+ # sequence to match between log_space=True and log_space=False, so we avoid this
+ # for now to maintain backward compatibility with the original implementation.
+ # TODO(jakevdp) should we change the convention to avoid -inf in log-space?
+ boost_mask = lax.ge(alpha, one)
+ alpha_orig = alpha
+ alpha = lax.select(boost_mask, alpha, lax.add(alpha, one))
d = lax.sub(alpha, one_over_three)
c = lax.div(one_over_three, lax.sqrt(d))
@@ -926,21 +949,42 @@ def _next_kxv(kxv):
return key, X, V, U
# initial state is chosen such that _cond_fn will return True
+ key, subkey = _split(key)
+ u_boost = uniform(subkey, (), dtype=dtype)
_, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _lax_const(alpha, 2)))
- z = lax.mul(lax.mul(d, V), boost)
- return lax.select(lax.eq(z, zero), jnp.finfo(z.dtype).tiny, z)
+ if log_space:
+ # TODO(jakevdp): there are negative infinities here due to issues mentioned above. How should
+ # we handle those?
+ log_boost = lax.select(boost_mask, zero, lax.mul(lax.log(u_boost), lax.div(one, alpha_orig)))
+ return lax.add(lax.add(lax.log(d), lax.log(V)), log_boost)
+ else:
+ boost = lax.select(boost_mask, one, lax.pow(u_boost, lax.div(one, alpha_orig)))
+ z = lax.mul(lax.mul(d, V), boost)
+ return lax.select(lax.eq(z, zero), jnp.finfo(z.dtype).tiny, z)
-def _gamma_grad(sample, a):
+def _gamma_grad(sample, a, *, prng_impl, log_space):
+ del prng_impl # unused
samples = jnp.reshape(sample, -1)
alphas = jnp.reshape(a, -1)
+ if log_space:
+ # d[log(sample)] = d[sample] / sample
+ # This requires computing exp(log_sample), which may be zero due to float roundoff.
+ # In this case, we use the same zero-correction used in gamma() above.
+ samples = lax.exp(samples)
+ zero = lax_internal._const(sample, 0)
+ tiny = lax.full_like(samples, jnp.finfo(samples.dtype).tiny)
+ samples = lax.select(lax.eq(samples, zero), tiny, samples)
+ gamma_grad = lambda alpha, sample: lax.random_gamma_grad(alpha, sample) / sample
+ else:
+ gamma_grad = lax.random_gamma_grad
if xla_bridge.get_backend().platform == 'cpu':
- grads = lax.map(lambda args: lax.random_gamma_grad(*args), (alphas, samples))
+ grads = lax.map(lambda args: gamma_grad(*args), (alphas, samples))
else:
- grads = vmap(lax.random_gamma_grad)(alphas, samples)
+ grads = vmap(gamma_grad)(alphas, samples)
return grads.reshape(np.shape(a))
-def _gamma_impl(raw_key, a, *, prng_impl, use_vmap=False):
+def _gamma_impl(raw_key, a, *, prng_impl, log_space, use_vmap=False):
a_shape = jnp.shape(a)
# split key to match the shape of a
key_ndim = len(raw_key.shape) - len(prng_impl.key_shape)
@@ -950,24 +994,24 @@ def _gamma_impl(raw_key, a, *, prng_impl, use_vmap=False):
keys = prng.PRNGKeyArray(prng_impl, keys)
alphas = jnp.reshape(a, -1)
if use_vmap:
- samples = vmap(_gamma_one)(keys, alphas)
+ samples = vmap(partial(_gamma_one, log_space=log_space))(keys, alphas)
else:
- samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))
+ samples = lax.map(lambda args: _gamma_one(*args, log_space=log_space), (keys, alphas))
return jnp.reshape(samples, a_shape)
-def _gamma_batching_rule(batched_args, batch_dims, *, prng_impl):
+def _gamma_batching_rule(batched_args, batch_dims, *, prng_impl, log_space):
k, a = batched_args
bk, ba = batch_dims
size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None)
k = batching.bdim_at_front(k, bk, size)
a = batching.bdim_at_front(a, ba, size)
- return random_gamma_p.bind(k, a, prng_impl=prng_impl), 0
+ return random_gamma_p.bind(k, a, prng_impl=prng_impl, log_space=log_space), 0
random_gamma_p = core.Primitive('random_gamma')
random_gamma_p.def_impl(_gamma_impl)
random_gamma_p.def_abstract_eval(lambda key, a, **_: core.raise_to_shaped(a))
-ad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a, **_: tangent * _gamma_grad(ans, a))
+ad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a, **kwds: tangent * _gamma_grad(ans, a, **kwds))
xla.register_translation(random_gamma_p, xla.lower_fun(
partial(_gamma_impl, use_vmap=True),
multiple_results=False, new_style=True))
@@ -995,6 +1039,10 @@ def gamma(key: KeyArray,
Returns:
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``a.shape``.
+
+ See Also:
+ loggamma : sample gamma values in log-space, which can provide improved
+ accuracy for small values of ``a``.
"""
key, _ = _check_prng_key(key)
if not dtypes.issubdtype(dtype, np.floating):
@@ -1003,10 +1051,52 @@ def gamma(key: KeyArray,
dtype = dtypes.canonicalize_dtype(dtype)
if shape is not None:
shape = core.canonicalize_shape(shape)
- return _gamma(key, a, shape, dtype)
+ return _gamma(key, a, shape=shape, dtype=dtype)
-@partial(jit, static_argnums=(2, 3), inline=True)
-def _gamma(key, a, shape, dtype):
+
+def loggamma(key: KeyArray,
+ a: RealArray,
+ shape: Optional[Sequence[int]] = None,
+ dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:
+ """Sample log-gamma random values with given shape and float dtype.
+
+ This function is implemented such that the following will hold for a
+ dtype-appropriate tolerance::
+
+ np.testing.assert_allclose(jnp.exp(loggamma(*args)), gamma(*args), rtol=rtol)
+
+ The benefit of log-gamma is that for samples very close to zero (which occur frequently
+ when `a << 1`) sampling in log space provides better precision.
+
+ Args:
+ key: a PRNG key used as the random key.
+ a: a float or array of floats broadcast-compatible with ``shape``
+ representing the parameter of the distribution.
+ shape: optional, a tuple of nonnegative integers specifying the result
+ shape. Must be broadcast-compatible with ``a``. The default (None)
+ produces a result shape equal to ``a.shape``.
+ dtype: optional, a float dtype for the returned values (default float64 if
+ jax_enable_x64 is true, otherwise float32).
+
+ Returns:
+ A random array with the specified dtype and with shape given by ``shape`` if
+ ``shape`` is not None, or else by ``a.shape``.
+
+ See Also:
+ gamma : standard gamma sampler.
+ """
+ key, _ = _check_prng_key(key)
+ if not dtypes.issubdtype(dtype, np.floating):
+ raise ValueError(f"dtype argument to `gamma` must be a float "
+ f"dtype, got {dtype}")
+ dtype = dtypes.canonicalize_dtype(dtype)
+ if shape is not None:
+ shape = core.canonicalize_shape(shape)
+ return _gamma(key, a, shape=shape, dtype=dtype, log_space=True)
+
+
+@partial(jit, static_argnames=('shape', 'dtype', 'log_space'), inline=True)
+def _gamma(key, a, shape, dtype, log_space=False):
if shape is None:
shape = np.shape(a)
else:
@@ -1015,7 +1105,7 @@ def _gamma(key, a, shape, dtype):
a = lax.convert_element_type(a, dtype)
if np.shape(a) != shape:
a = jnp.broadcast_to(a, shape)
- return random_gamma_p.bind(key.unsafe_raw_array(), a, prng_impl=key.impl)
+ return random_gamma_p.bind(key.unsafe_raw_array(), a, prng_impl=key.impl, log_space=log_space)
@partial(jit, static_argnums=(2, 3, 4), inline=True)
diff --git a/jax/random.py b/jax/random.py
--- a/jax/random.py
+++ b/jax/random.py
@@ -98,6 +98,7 @@
gumbel as gumbel,
laplace as laplace,
logistic as logistic,
+ loggamma as loggamma,
maxwell as maxwell,
multivariate_normal as multivariate_normal,
normal as normal,
| diff --git a/tests/random_test.py b/tests/random_test.py
--- a/tests/random_test.py
+++ b/tests/random_test.py
@@ -648,6 +648,19 @@ def testBeta(self, a, b, dtype):
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.beta(a, b).cdf)
+ def testBetaSmallParameters(self, dtype=np.float32):
+ # Regression test for beta version of https://github.com/google/jax/issues/9896
+ key = self.seed_prng(0)
+ a, b = 0.0001, 0.0002
+ samples = random.beta(key, a, b, shape=(100,), dtype=dtype)
+
+ # With such small parameters, all samples should be exactly zero or one.
+ zeros = samples[samples < 0.5]
+ self.assertAllClose(zeros, jnp.zeros_like(zeros))
+
+ ones = samples[samples >= 0.5]
+ self.assertAllClose(ones, jnp.ones_like(ones))
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
@@ -684,6 +697,21 @@ def testDirichlet(self, alpha, dtype):
for i, a in enumerate(alpha):
self._CheckKolmogorovSmirnovCDF(samples[..., i], scipy.stats.beta(a, alpha_sum - a).cdf)
+ def testDirichletSmallAlpha(self, dtype=np.float32):
+ # Regression test for https://github.com/google/jax/issues/9896
+ key = self.seed_prng(0)
+ alpha = 0.0001 * jnp.ones(3)
+ samples = random.dirichlet(key, alpha, shape=(100,), dtype=dtype)
+
+ # Check that results lie on the simplex.
+ self.assertAllClose(samples.sum(1), jnp.ones(samples.shape[0]),
+ check_dtypes=False, rtol=1E-5)
+
+ # Check that results contain 1 in one of the dimensions:
+ # this is highly likely to be true when alpha is small.
+ self.assertAllClose(samples.max(1), jnp.ones(samples.shape[0]),
+ check_dtypes=False, rtol=1E-5)
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(np.dtype(dtype).name), "dtype": dtype}
for dtype in float_dtypes))
@@ -698,6 +726,22 @@ def testExponential(self, dtype):
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.expon().cdf)
+ @parameterized.named_parameters(jtu.cases_from_list(
+ {"testcase_name": "_a={}_dtype={}_prng={}".format(a, np.dtype(dtype).name,
+ prng_name),
+ "a": a, "dtype": dtype, "prng_impl": prng_impl}
+ for prng_name, prng_impl in PRNG_IMPLS
+ for a in [0.1, 1., 10.]
+ for dtype in jtu.dtypes.floating))
+ def testGammaVsLogGamma(self, prng_impl, a, dtype):
+ key = prng.seed_with_impl(prng_impl, 0)
+ rand_gamma = lambda key, a: random.gamma(key, a, (10000,), dtype)
+ rand_loggamma = lambda key, a: random.loggamma(key, a, (10000,), dtype)
+ crand_loggamma = jax.jit(rand_loggamma)
+
+ self.assertAllClose(rand_gamma(key, a), jnp.exp(rand_loggamma(key, a)))
+ self.assertAllClose(rand_gamma(key, a), jnp.exp(crand_loggamma(key, a)))
+
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_dtype={}_prng={}".format(a, np.dtype(dtype).name,
prng_name),
@@ -722,15 +766,22 @@ def testGammaShape(self):
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
- {"testcase_name": "_a={}_prng={}".format(alpha, prng_name),
- "alpha": alpha, "prng_impl": prng_impl}
+ {"testcase_name": "_a={}_prng={}_logspace={}".format(alpha, prng_name, log_space),
+ "alpha": alpha, "log_space": log_space, "prng_impl": prng_impl}
for prng_name, prng_impl in PRNG_IMPLS
+ for log_space in [True, False]
for alpha in [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]))
- def testGammaGrad(self, prng_impl, alpha):
+ def testGammaGrad(self, log_space, prng_impl, alpha):
rng = prng.seed_with_impl(prng_impl, 0)
alphas = np.full((100,), alpha)
z = random.gamma(rng, alphas)
- actual_grad = jax.grad(lambda x: random.gamma(rng, x).sum())(alphas)
+ if log_space:
+ actual_grad = jax.grad(lambda x: lax.exp(random.loggamma(rng, x)).sum())(alphas)
+ # TODO(jakevdp): this NaN correction is required because we generate negative infinities
+ # in the log-space computation; see related TODO in the source of random._gamma_one().
+ actual_grad = jnp.where(jnp.isnan(actual_grad), 0.0, actual_grad)
+ else:
+ actual_grad = jax.grad(lambda x: random.gamma(rng, x).sum())(alphas)
eps = 0.01 * alpha / (1.0 + np.sqrt(alpha))
cdf_dot = (scipy.stats.gamma.cdf(z, alpha + eps)
| Distribution of jax.random.dirichlet samples is incorrect for small values of the concentration parameters
`jax.random.dirichlet` seems to produce incorrect distributions for small values of the concentration parameters (i.e. where most of the probability mass should be on the vertices of the simplex).
I've only tested this for 3D input (because this can easily be visualised in 2D), so I don't know if it happens for other dimensionalities.
I'm producing samples as follows:
```
samples = jax.random.dirichlet(key, 0.01 * np.array([1.0, 1.0, 1.0]), (100000,))
```
On CPU and TPU, there seems to be an additional vertex added to the simplex (i.e. it's actually 4D, projected into 2D), which lives at the barycenter of the three existing vertices.
On GPU, the problem is even worse, with additional extra vertices at the midpoints between each pair of vertices (so 4 spurious vertices in total).
Visualisations and more info at http://go/jax-dirichlet-simplex-issue
| Thanks for the report - I believe this is a floating point precision issue. In both numpy and JAX, the dirichlet distribution is implemented via normalization of an N-dimensional gamma distribution. JAX generates these numbers in float32, while numpy generates them in float64. Additionally, it looks like JAX never rounds a generated value to zero, while numpy does
Observe the following for JAX:
```python
from jax import random
key = random.PRNGKey(0)
out = random.gamma(key, 0.01, (10000,))
print(out.dtype)
# float32
print(out.min())
# 1.1754944e-38
print((out == out.min()).sum())
# 4160
```
and for NumPy:
```python
import numpy as np
out = np.random.gamma(0.01, size=10000)
print(out.dtype)
# float64
print(out.min())
# 0.0
print((out == out.min()).sum())
# 7
```
The `NaN` values in numpy occur when all three dimensions hit the minimum value of zero (`0 / (3 * 0)` is `NaN`); the added simplex in the JAX version comes from the equivalent situation in JAX, where all three branches hit the nonzero minimum value generated by `jax.random.gamma`: `a / (3 * a)` is `0.3333`)
The way to fix this would be to allow for infinitely small values to be generated by the gamma distribution, but look at this:
```python
print(np.finfo(np.float32).tiny)
# 1.1754944e-38
```
JAX's gamma sampler is frequently generating the smallest possible nonzero float32 value when Ξ± is this small.
So what could we do? We need the sampler to not get "stuck" at the smallest float32 value. ~The easiest thing to do would be to allow `gamma` to generate zeros, in which case JAX would behave like numpy and return NaNs~ (rethinking this: we'd still have issues with quantization near zero leading to spoke-like artifacts in the output distribution). Better would be to find a way to generate log-gamma rather than gamma, in which case precision at the small end would not be limited by the precision of float32. I'm not sure how easy that would be, but it's worth looking into.
What do you think?
It looks like Numpy uses a different generating approach for small alpha to alleviate a similar issue; see https://github.com/numpy/numpy/blob/v1.22.3/numpy/random/_generator.pyx#L4256-L4281
Unfortunately, it's an iterative algorithm that would not be very performant on accelerators. | 2022-03-16T00:19:08 |
google/jax | 9,909 | google__jax-9909 | [
"9865"
] | c35a3ca0feb0cae5fca5ce3407ce38326c96d932 | diff --git a/docs/autodidax.py b/docs/autodidax.py
--- a/docs/autodidax.py
+++ b/docs/autodidax.py
@@ -92,11 +92,16 @@ def mul(x, y): return bind1(mul_p, x, y)
def neg(x): return bind1(neg_p, x)
def sin(x): return bind1(sin_p, x)
def cos(x): return bind1(cos_p, x)
-def reduce_sum(x, axis=None): return bind1(reduce_sum_p, x, axis=axis)
def greater(x, y): return bind1(greater_p, x, y)
def less(x, y): return bind1(less_p, x, y)
def transpose(x, perm): return bind1(transpose_p, x, perm=perm)
def broadcast(x, shape, axes): return bind1(broadcast_p, x, shape=shape, axes=axes)
+def reduce_sum(x, axis=None):
+ if axis is None:
+ axis = tuple(range(np.ndim(x)))
+ if type(axis) is int:
+ axis = (axis,)
+ return bind1(reduce_sum_p, x, axis=axis)
def bind1(prim, *args, **params):
out, = bind(prim, *args, **params)
@@ -873,8 +878,8 @@ def vectorized_unop_batching_rule(op, axis_size, vals_in, dims_in):
def reduce_sum_batching_rule(axis_size, vals_in, dims_in, *, axis):
(x,), (x_bdim,) = vals_in, dims_in
- new_axis = axis + (x_bdim <= axis)
- out_bdim = x_bdim - (new_axis < x_bdim)
+ new_axis = tuple(ax + (x_bdim <= ax) for ax in axis)
+ out_bdim = x_bdim - sum(ax < x_bdim for ax in axis)
return [reduce_sum(x, new_axis)], [out_bdim]
vmap_rules[reduce_sum_p] = reduce_sum_batching_rule
@@ -1271,8 +1276,10 @@ def vectorized_unop_abstract_eval(x: ShapedArray) -> List[ShapedArray]:
abstract_eval_rules[cos_p] = vectorized_unop_abstract_eval
abstract_eval_rules[neg_p] = vectorized_unop_abstract_eval
-def reduce_sum_abstract_eval(x: ShapedArray, *, axis: int) -> List[ShapedArray]:
- new_shape = [d for i, d in enumerate(x.shape) if i != axis]
+def reduce_sum_abstract_eval(x: ShapedArray, *, axis: Tuple[int, ...]
+ ) -> List[ShapedArray]:
+ axis_ = set(axis)
+ new_shape = [d for i, d in enumerate(x.shape) if i not in axis_]
return [ShapedArray(tuple(new_shape), x.dtype)]
abstract_eval_rules[reduce_sum_p] = reduce_sum_abstract_eval
@@ -1643,7 +1650,7 @@ def reduce_sum_translation(c, in_avals, in_vals, *, axis):
subc = xc.XlaBuilder('add')
shape = _xla_shape(ShapedArray((), x_aval.dtype))
xops.Add(xops.Parameter(subc, 0, shape), xops.Parameter(subc, 1, shape))
- return [xops.Reduce(c, [x], [zero], subc.build(), [axis])]
+ return [xops.Reduce(c, [x], [zero], subc.build(), axis)]
xla_translations[reduce_sum_p] = reduce_sum_translation
def broadcast_translation(c, in_avals, in_vals, *, shape, axes):
@@ -2201,8 +2208,9 @@ def tracers_to_jaxpr(tracers_in: List[PartialEvalTracer],
var = constid_to_var.get(id(val))
if var is None:
aval = raise_to_shaped(get_aval(val))
- var = tracer_to_var[id(t)] = constid_to_var[id(val)] = Var(aval)
+ var = constid_to_var[id(val)] = Var(aval)
constvar_to_val[var] = val
+ tracer_to_var[id(t)] = var
elif isinstance(t.recipe, JaxprEqnRecipe):
if id(t.recipe) not in processed_eqns:
eqns.append(recipe_to_eqn(tracer_to_var, t.recipe))
@@ -2556,6 +2564,11 @@ def add_transpose_rule(cts, x, y):
return [z_bar, z_bar]
transpose_rules[add_p] = add_transpose_rule
+def reduce_sum_transpose_rule(cts, x, *, axis):
+ y_bar, = cts
+ return [broadcast(y_bar, x.aval.shape, axis)]
+transpose_rules[reduce_sum_p] = reduce_sum_transpose_rule
+
def xla_call_transpose_rule(cts, *invals, jaxpr, num_consts):
del num_consts # Unused
undef_primals = [type(x) is UndefPrimal for x in invals]
| Autodidax has some bugs involving reduce_sum
This works in JAX:
```python
import jax
from jax import grad, vmap
import jax.numpy as np
def simple(a):
b = a
t = a + b
return t * b
def f(a):
L0 = np.sum(simple(a))
return L0
def g(a):
dL0_da = vmap(grad(f), in_axes=0)(a)
L1 = np.sum(dL0_da * dL0_da)
return L1
key = jax.random.PRNGKey(0)
print(grad(g)(jax.random.normal(key,(2,4))))
```
However, the same code does not work in autodidax:
```
def simple(a):
b = a
t = a + b
return t * b
def f(a):
L0 = reduce_sum(simple(a))
return L0
def g(a):
dL0_da = vmap(grad(f), in_axes=0)(a)
L1 = reduce_sum(dL0_da * dL0_da)
return L1
print(grad(g)(np.random.rand(2,4)))
```
gives
```
[<ipython-input-27-03f392c346bd>](https://localhost:8080/#) in batched_f(*args)
17 args_flat, in_tree = tree_flatten(args)
18 in_axes_flat, in_tree2 = tree_flatten(in_axes)
---> 19 if in_tree != in_tree2: raise TypeError
20 f_flat, out_tree = flatten_fun(f, in_tree)
21 outs_flat = vmap_flat(f_flat, in_axes_flat, *args_flat)
TypeError:
```
Actually, I was thinking that it would fail because there doesn't appear to be any transpose rule for reduce_sum, but it doesn't seem that Autodidax got that far. What I was actually trying to do was see how Autodidax formulated the transpose rule for sum, because if you support vmap + arbitrary dims to reduce on it gets pretty complicated.
| Thanks for finding this! (And I'm glad you're looking at Autodidax!) I'll fix it.
But in the meantime maybe take a look at [JAX's transpose rule for `reduce_sum`](https://github.com/google/jax/blob/main/jax/_src/lax/lax.py#L3560-L3566) if you haven't already. It's basically just a call to broadcast. What complexity do you have in mind?
Oh this is great! I think BroadcastInDim answers the question; in PyTorch we don't natively have this operator so we have to manually implement it with unsqueezes and expands.
On the off chance it's useful, `broadcast_in_dim` can be implemented in [a few lines of NumPy like this](https://github.com/google/jax/blob/c35a3ca0feb0cae5fca5ce3407ce38326c96d932/jax/_src/lax_reference.py#L242). We handled `np.sum` directly in the original Autograd [this way](https://github.com/HIPS/autograd/blob/01eacff7a4f12e6f7aebde7c4cb4c1c2633f217d/autograd/numpy/numpy_vjps.py#L297-L300) with a [helper](https://github.com/HIPS/autograd/blob/01eacff7a4f12e6f7aebde7c4cb4c1c2633f217d/autograd/numpy/numpy_vjps.py#L274-L285). | 2022-03-16T03:07:48 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.