repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
listlengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
google/jax
| 19,086 |
google__jax-19086
|
[
"19076"
] |
14fe47c5b738918a074dbb450a3fff8ed8227f69
|
diff --git a/jax/_src/lax/linalg.py b/jax/_src/lax/linalg.py
--- a/jax/_src/lax/linalg.py
+++ b/jax/_src/lax/linalg.py
@@ -1159,13 +1159,13 @@ def body(k, state):
else:
magnitude = ufuncs.abs(a[:, k])
i = jnp.argmax(jnp.where(m_idx >= k, magnitude, -jnp.inf))
- pivot = pivot.at[k].set(i)
+ pivot = pivot.at[k].set(i.astype(pivot.dtype))
a = a.at[[k, i],].set(a[[i, k],])
perm = perm.at[[i, k],].set(perm[[k, i],])
# a[k+1:, k] /= a[k, k], adapted for loop-invariant shapes
x = a[k, k]
- a = a.at[:, k].set(jnp.where(m_idx > k, a[:, k] / x, a[:, k]))
+ a = a.at[:, k].set(jnp.where((m_idx > k) & (x != 0), a[:, k] / x, a[:, k]))
# a[k+1:, k+1:] -= jnp.outer(a[k+1:, k], a[k, k+1:])
a = a - jnp.where((m_idx[:, None] > k) & (n_idx[None, :] > k),
|
diff --git a/tests/linalg_test.py b/tests/linalg_test.py
--- a/tests/linalg_test.py
+++ b/tests/linalg_test.py
@@ -23,7 +23,7 @@
import scipy.linalg
import scipy as osp
-from absl.testing import absltest
+from absl.testing import absltest, parameterized
import jax
from jax import jit, grad, jvp, vmap
@@ -32,6 +32,7 @@
from jax import scipy as jsp
from jax._src.lib import version as jaxlib_version
from jax._src import config
+from jax._src.lax import linalg as lax_linalg
from jax._src import test_util as jtu
from jax._src import xla_bridge
from jax._src.numpy.util import promote_dtypes_inexact
@@ -109,7 +110,6 @@ def testDetGrad(self, shape, dtype):
jtu.check_grads(jnp.linalg.det, (a,), 2, atol=1e-1, rtol=1e-1)
# make sure there are no NaNs when a matrix is zero
if len(shape) == 2:
- pass
jtu.check_grads(
jnp.linalg.det, (jnp.zeros_like(a),), 1, atol=1e-1, rtol=1e-1)
else:
@@ -1279,6 +1279,13 @@ def testLuOfSingularMatrix(self):
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, np.matmul(p, np.matmul(l, u)))
+ @parameterized.parameters(lax_linalg.lu, lax_linalg._lu_python)
+ def testLuOnZeroMatrix(self, lu):
+ # Regression test for https://github.com/google/jax/issues/19076
+ x = jnp.zeros((2, 2), dtype=np.float32)
+ x_lu, _, _ = lu(x)
+ self.assertArraysEqual(x_lu, x)
+
@jtu.sample_product(
shape=[(1, 1), (4, 5), (10, 5), (10, 10), (6, 7, 7)],
dtype=float_types + complex_types,
|
Default lowering pass for lu_p has accuracy issue.
### Description
The background is, I am developing another device backend for JAX, I found the default lowering pass for lu_p has accuracy issue. It can be reproduced on NV platform.
By default if runnning `python tests/linalg_test.py NumpyLinalgTest.testDetGrad1` it works well.
But if I comment cuda lowering pass [here](https://github.com/google/jax/blob/jaxlib-v0.4.20/jax/_src/lax/linalg.py#L1387-L1390), it goes default pass [here](https://github.com/google/jax/blob/jaxlib-v0.4.20/jax/_src/lax/linalg.py#L1378), then running UT raises error:
```
AssertionError:
Not equal to tolerance rtol=0.1, atol=0.1
JVP primal
x and y nan location mismatch:
x: array(nan, dtype=float32)
y: array(0., dtype=float32)
```
### What jax/jaxlib version are you using?
jax 0.4.20, jaxlib 0.4.20+cuda11.cudnn86
### Which accelerator(s) are you using?
GPU
### Additional system info?
python 3.9
### NVIDIA GPU info
_No response_
|
Hi - thanks for the report. It looks like this `_lu_python` implementation is dead code: it is never invoked, because there are platform-specific lowering rules for `cpu`, `cuda`, `rocm`, and `tpu`, which are all the platforms JAX supports.
With that in mind, I think we could probably delete this code, as it is unused. Would that resolve your issue?
Ah, actually it is not dead code, because it is used in the `jax2tf` lowering path here: https://github.com/google/jax/blob/42ae8432185bf03f61ddd2e7bc279d3abb5247fd/jax/experimental/jax2tf/jax2tf.py#L3299
The issue is that the `_lu_python` implementation does not handle the gradient of the determinant for zero-valued inputs. Here's a self-contained reproduction of the issue (run on CPU):
```python
import jax.numpy as jnp
from jax._src.interpreters import mlir
from jax.lax import linalg
import jax.test_util as jtu
x = jnp.zeros((3, 3))
assert x.device().platform == 'cpu'
del mlir._platform_specific_lowerings['cpu'][linalg.lu_p] # this ensures that the default lowering is used.
jtu.check_grads(jnp.linalg.det, (x,), order=1)
# Not equal to tolerance rtol=0.002, atol=0.002
# JVP primal
# x and y nan location mismatch:
# x: array(nan, dtype=float32)
# y: array(0., dtype=float32)
```
Even more minimal:
```python
import jax.numpy as jnp
from jax._src.lax import linalg
x = jnp.zeros((2, 2))
print(linalg.lu(x)[0])
# [[0. 0.]
# [0. 0.]]
print(linalg._lu_python(x)[0])
# [[ 0. 0.]
# [nan nan]]
```
The root cause is the division operation here: https://github.com/google/jax/blob/b07d176419552dd7147da8002aa80fe4b42bc917/jax/_src/lax/linalg.py#L1166-L1168
When `a` is all zeros, then this is `0 / 0` which becomes `NaN`
| 2023-12-21T19:16:31 |
google/jax
| 19,166 |
google__jax-19166
|
[
"19150"
] |
697f17adf12d67f69cc1afeba7a385ee843c25df
|
diff --git a/jax/_src/scipy/stats/binom.py b/jax/_src/scipy/stats/binom.py
--- a/jax/_src/scipy/stats/binom.py
+++ b/jax/_src/scipy/stats/binom.py
@@ -33,7 +33,7 @@ def logpmf(k: ArrayLike, n: ArrayLike, p: ArrayLike, loc: ArrayLike = 0) -> Arra
)
log_linear_term = lax.add(xlogy(y, p), xlog1py(lax.sub(n, y), lax.neg(p)))
log_probs = lax.add(comb_term, log_linear_term)
- return jnp.where(lax.lt(k, loc), -jnp.inf, log_probs)
+ return jnp.where(lax.ge(k, loc) & lax.lt(k, loc + n + 1), log_probs, -jnp.inf)
@_wraps(osp_stats.nbinom.pmf, update_doc=False)
|
diff --git a/tests/scipy_stats_test.py b/tests/scipy_stats_test.py
--- a/tests/scipy_stats_test.py
+++ b/tests/scipy_stats_test.py
@@ -1159,6 +1159,10 @@ def args_maker():
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=tol, atol=tol)
+ def testBinomPmfOutOfRange(self):
+ # Regression test for https://github.com/google/jax/issues/19150
+ self.assertEqual(lsp_stats.binom.pmf(k=6.5, n=5, p=0.8), 0.0)
+
def testIssue972(self):
self.assertAllClose(
np.ones((4,), np.float32),
|
Unexpected behavior of `jax.scipy.stats.binom.pmf`
### Description
pmf of a random variable should be zero outside of its range. While plotting the graph for `jax.scipy.stats.binom.pmf`, I notice that for $n>5$ and $p>0.5$, there are some oscillations in the values of the pmf, which should not be there. For evidence, I am attaching a plot too.
```python
import jax
from jax import numpy as jnp
from matplotlib import pyplot as plt
x = jnp.linspace(-1, 10, 1000)
xxf = jax.scipy.stats.binom.pmf(k=x, n=5, p=0.8)
plt.plot(x, xxf)
plt.tight_layout()
plt.show()
```

The side left to the zero is as expected.
### What jax/jaxlib version are you using?
jax v0.4.23
### Which accelerator(s) are you using?
CPU
| 2024-01-02T18:54:10 |
|
google/jax
| 19,233 |
google__jax-19233
|
[
"1100"
] |
9461b12a3f20e8b113b43af36acd2e6f2ce1c6de
|
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -53,13 +53,14 @@
from jax._src.custom_derivatives import custom_jvp
from jax._src import dispatch
from jax._src import dtypes
+from jax._src import xla_bridge
from jax._src.api_util import _ensure_index_tuple
from jax._src.array import ArrayImpl
from jax._src.core import ShapedArray, ConcreteArray
from jax._src.lax.lax import (_array_copy, _sort_lt_comparator,
_sort_le_comparator, PrecisionLike)
from jax._src.lax import lax as lax_internal
-from jax._src.lib import xla_client as xc
+from jax._src.lib import xla_client as xc, xla_extension_version
from jax._src.numpy import reductions
from jax._src.numpy import ufuncs
from jax._src.numpy import util
@@ -2121,9 +2122,25 @@ def array(object: Any, dtype: DTypeLike | None = None, copy: bool = True,
# to be used for type inference below.
if isinstance(object, (bool, int, float, complex)):
_ = dtypes.coerce_to_array(object, dtype)
+ elif not isinstance(object, Array):
+ # Check if object supports any of the data exchange protocols
+ # (except dlpack, see data-apis/array-api#301). If it does,
+ # consume the object as jax array and continue (but not return) so
+ # that other array() arguments get processed against the input
+ # object.
+ #
+ # Notice that data exchange protocols define dtype in the
+ # corresponding data structures and it may not be available as
+ # object.dtype. So, we'll resolve the protocols here before
+ # evaluating object.dtype.
+ if hasattr(object, '__jax_array__'):
+ object = object.__jax_array__()
+ elif hasattr(object, '__cuda_array_interface__'):
+ if xla_extension_version >= 237:
+ cai = object.__cuda_array_interface__
+ backend = xla_bridge.get_backend("cuda")
+ object = xc._xla.cuda_array_interface_to_buffer(cai, backend)
- if hasattr(object, '__jax_array__'):
- object = object.__jax_array__()
object = tree_map(lambda leaf: leaf.__jax_array__()
if hasattr(leaf, "__jax_array__") else leaf, object)
leaves = tree_leaves(object, is_leaf=lambda x: x is None)
|
diff --git a/tests/array_interoperability_test.py b/tests/array_interoperability_test.py
--- a/tests/array_interoperability_test.py
+++ b/tests/array_interoperability_test.py
@@ -274,6 +274,87 @@ def testJaxToCuPy(self, shape, dtype):
z.__cuda_array_interface__["data"][0])
self.assertAllClose(x, cupy.asnumpy(z))
+ @unittest.skipIf(xla_extension_version < 237, "Requires newer jaxlib")
+ @jtu.sample_product(
+ shape=all_shapes,
+ dtype=jtu.dtypes.supported(cuda_array_interface_dtypes),
+ )
+ @unittest.skipIf(not cupy, "Test requires CuPy")
+ @jtu.run_on_devices("cuda")
+ def testCuPyToJax(self, shape, dtype):
+ rng = jtu.rand_default(self.rng())
+ x = rng(shape, dtype)
+ y = cupy.asarray(x)
+ z = jnp.array(y, copy=False) # this conversion uses dlpack protocol
+ self.assertEqual(z.dtype, dtype)
+ self.assertEqual(y.__cuda_array_interface__["data"][0],
+ z.__cuda_array_interface__["data"][0])
+ self.assertAllClose(np.asarray(z), cupy.asnumpy(y))
+
+ @unittest.skipIf(xla_extension_version < 237, "Requires newer jaxlib")
+ @jtu.sample_product(
+ shape=all_shapes,
+ dtype=jtu.dtypes.supported(cuda_array_interface_dtypes),
+ )
+ @jtu.run_on_devices("cuda")
+ def testCaiToJax(self, shape, dtype):
+ if xb.using_pjrt_c_api():
+ self.skipTest("CUDA Array Interface support is incomplete in the PJRT C API") # TODO(jakevdp)
+ rng = jtu.rand_default(self.rng())
+ x = rng(shape, dtype)
+
+ # using device with highest device_id for testing the correctness
+ # of detecting the device id from a pointer value
+ device = jax.devices('cuda')[-1]
+ with jax.default_device(device):
+ y = jnp.array(x, dtype=dtype)
+ self.assertEqual(y.dtype, dtype)
+
+ # Using a jax array CAI provider support to construct an object
+ # that implements the CUDA Array Interface, versions 2 and 3.
+ cai = y.__cuda_array_interface__
+ stream = tuple(y.devices())[0].get_stream_for_external_ready_events()
+
+ class CAIWithoutStridesV2:
+ __cuda_array_interface__ = cai.copy()
+ __cuda_array_interface__["version"] = 2
+ # CAI version 2 may not define strides and does not define stream
+ __cuda_array_interface__.pop("strides", None)
+ __cuda_array_interface__.pop("stream", None)
+
+ class CAIWithoutStrides:
+ __cuda_array_interface__ = cai.copy()
+ __cuda_array_interface__["version"] = 3
+ __cuda_array_interface__["strides"] = None
+ __cuda_array_interface__["stream"] = None # default stream
+
+ class CAIWithStrides:
+ __cuda_array_interface__ = cai.copy()
+ __cuda_array_interface__["version"] = 3
+ strides = (dtype.dtype.itemsize,) if shape else ()
+ for s in reversed(shape[1:]):
+ strides = (strides[0] * s, *strides)
+ __cuda_array_interface__['strides'] = strides
+ __cuda_array_interface__["stream"] = stream
+
+ for CAIObject in [CAIWithoutStridesV2, CAIWithoutStrides,
+ CAIWithStrides]:
+ z = jnp.array(CAIObject(), copy=False)
+ self.assertEqual(y.__cuda_array_interface__["data"][0],
+ z.__cuda_array_interface__["data"][0])
+ self.assertAllClose(x, z)
+ if 0 in shape:
+ # the device id detection from a zero pointer value is not
+ # possible
+ pass
+ else:
+ self.assertEqual(y.devices(), z.devices())
+
+ z = jnp.array(CAIObject(), copy=True)
+ if 0 not in shape:
+ self.assertNotEqual(y.__cuda_array_interface__["data"][0],
+ z.__cuda_array_interface__["data"][0])
+ self.assertAllClose(x, z)
class Bfloat16Test(jtu.JaxTestCase):
|
Support __cuda_array_interface__ on GPU
https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html
It would not be hard to make `DeviceArray` implement this interface on GPU.
It would be slightly harder to support wrapping a `DeviceArray` around an existing CUDA array, but not that hard.
|
I think even just supporting one of the directions (i.e. making `DeviceArray` implement this interface on GPU) would already be a great addition.
I would be happy to help, but I am not sure where to find the pointer to GPU memory / what else to pay attention to.
TensorFlow now supports dlpack: https://github.com/VoVAllen/tf-dlpack/issues/3
PR #2133 added `__cuda_array_interface__` export. You'll need a jaxlib built from GitHub head or you'll need to wait for us to make another jaxlib wheel release.
Because of https://github.com/pytorch/pytorch/issues/32868 you can't directly import the resulting arrays to PyTorch. But because of https://github.com/cupy/cupy/issues/2616 you can "launder" the array via CuPy and into PyTorch if you want.
(Another option for interoperability is DLPack, which JAX supports at Github head, in both directions.)
Could this be reopened until import support is added as well?
I don't follow. We support both directions, I believe?
Edit: I am wrong, apparently we don't support imports.
> Edit: I am wrong, apparently we don't support imports.
Yeah this need came up again recently ( cc @leofang @quasiben ).
Although note that DLPack imports should work, so that's an option if the exporter supports DLPack.
Thanks John! Yeah we just finished a GPU Hackathon, and a few of our teams evaluating JAX asked us why JAX can't work with other libraries like CuPy and PyTorch _bidirectionally_. It'd be very useful, say, to do autograd in JAX, postprocess in CuPy, then bring it back to JAX.
Also: I haven't tried this, but since CuPy supports both `__cuda_array_interface__` and DLPack, you can most likely "launder" an array via CuPy into JAX:
* export the array via `__cuda_array_interface__` to CuPy.
* export the array via DLPack from CuPy.
* import the DLPack into JAX.
(Obviously this isn't ideal, but it might unblock you.)
Hi @hawkinsp I recently pm'd @apaszke in an occasion where this support was mentioned. It'd be nice if JAX can prioritize the bi-directional support for the CUDA Array Interface (and update to [the latest v3](https://numba.readthedocs.io/en/latest/cuda/cuda_array_interface.html) protocol, in which the synchronization semantics is specified).
As you pointed out in a DLPack issue (https://github.com/dmlc/dlpack/issues/50), DLPack lacks the support for complex numbers and it's unlikely to be resolved in the foreseeable future. For array libraries this is simply not an acceptable workaround and is actually a blocker for several applications that I am aware.
Thanks, and happy new year!
> Could this be reopened until import support is added as well?
Hi!
I was wondering if there is any update on this. Thanks!
Miguel
I'm very interested in this too
@hawkinsp this is seeing internal traction, given how some of the JAX internals have evolved (i.e. arrays, shmaps etc., in the context of MGMN) is there work being done here?
i would be interested in this
Just FYI, a new Python package [pydlpack](https://github.com/pearu/pydlpack) has been released that supports bidirectional data exchange of many array providers and consumers.
Here follows an example of zero-copy data exchange in between jax and torch:
```python
>>> from dlpack import asdlpack
>>> a1 = jax.numpy.array([[1, 2], [3, 4]])
>>> t1 = torch.from_dlpack(asdlpack(a1))
>>> t1
tensor([[1, 2],
[3, 4]], device='cuda:0', dtype=torch.int32)
>>> t2 = torch.tensor([[5, 6], [7, 8]]).cuda()
>>> a2 = jax.numpy.from_dlpack(asdlpack(t2))
>>> a2
Array([[5, 6],
[7, 8]], dtype=int32)
```
An other example is exchanging cuda buffers between jax and numba:
```python
>>> from dlpack import asdlpack
>>> import numba.cuda, numpy, jax
>>> a = numba.cuda.to_device(numpy.array([[1, 2], [3, 4]]))
>>> arr = jax.numpy.from_dlpack(asdlpack(a))
>>> arr
Array([[1, 2],
[3, 4]], dtype=int32)
```
| 2024-01-07T17:33:06 |
google/jax
| 19,379 |
google__jax-19379
|
[
"19376"
] |
3e8067060e3e851c68d930bc7f5117f926453b85
|
diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -69,12 +69,18 @@ def _isnan(x: ArrayLike) -> Array:
return lax.ne(x, x)
-def _check_prng_key(key: KeyArrayLike) -> tuple[KeyArray, bool]:
+# TODO(jakevdp) Finalize batched input deprecation by setting error_on_batched=True.
+# FutureWarning Added 2024-01-17
+def _check_prng_key(name: str, key: KeyArrayLike, *,
+ allow_batched: bool = False,
+ error_on_batched: bool = False) -> tuple[KeyArray, bool]:
if isinstance(key, Array) and dtypes.issubdtype(key.dtype, dtypes.prng_key):
- return key, False
+ wrapped_key = key
+ wrapped = False
elif _arraylike(key):
# Call random_wrap here to surface errors for invalid keys.
wrapped_key = prng.random_wrap(key, impl=default_prng_impl())
+ wrapped = True
if config.legacy_prng_key.value == 'error':
raise ValueError(
'Legacy uint32 key array passed as key to jax.random function. '
@@ -91,10 +97,20 @@ def _check_prng_key(key: KeyArrayLike) -> tuple[KeyArray, bool]:
'Raw arrays as random keys to jax.random functions are deprecated. '
'Assuming valid threefry2x32 key for now.',
FutureWarning)
- return wrapped_key, True
else:
raise TypeError(f'unexpected PRNG key type {type(key)}')
+ if (not allow_batched) and wrapped_key.ndim:
+ msg = (f"{name} accepts a single key, but was given a key array of "
+ f"shape {np.shape(key)} != (). Use jax.vmap for batching.")
+ if error_on_batched:
+ raise ValueError(msg)
+ else:
+ warnings.warn(msg + " In a future JAX version, this will be an error.",
+ FutureWarning, stacklevel=3)
+
+ return wrapped_key, wrapped
+
def _return_prng_keys(was_wrapped, key):
# TODO(frostig): remove once we always enable_custom_prng
@@ -245,10 +261,7 @@ def fold_in(key: KeyArrayLike, data: IntegerArray) -> KeyArray:
A new PRNG key that is a deterministic function of the inputs and is
statistically safe for producing a stream of new pseudo-random values.
"""
- key, wrapped = _check_prng_key(key)
- if np.ndim(key):
- raise TypeError("fold_in accepts a single key, but was given a key array of"
- f"shape {np.shape(key)} != (). Use jax.vmap for batching.")
+ key, wrapped = _check_prng_key("fold_in", key, error_on_batched=True)
if np.ndim(data):
raise TypeError("fold_in accepts a scalar, but was given an array of"
f"shape {np.shape(data)} != (). Use jax.vmap for batching.")
@@ -262,7 +275,7 @@ def _split(key: KeyArray, num: int | tuple[int, ...] = 2) -> KeyArray:
# to always enable_custom_prng
assert jnp.issubdtype(key.dtype, dtypes.prng_key)
if key.ndim:
- raise TypeError("split accepts a single key, but was given a key array of"
+ raise TypeError("split accepts a single key, but was given a key array of "
f"shape {key.shape} != (). Use jax.vmap for batching.")
shape = tuple(num) if isinstance(num, Sequence) else (num,)
return prng.random_split(key, shape=shape)
@@ -278,7 +291,7 @@ def split(key: KeyArrayLike, num: int | tuple[int, ...] = 2) -> KeyArray:
Returns:
An array-like object of `num` new PRNG keys.
"""
- typed_key, wrapped = _check_prng_key(key)
+ typed_key, wrapped = _check_prng_key("split", key, error_on_batched=True)
return _return_prng_keys(wrapped, _split(typed_key, num))
@@ -288,7 +301,7 @@ def _key_impl(keys: KeyArray) -> PRNGImpl:
return keys_dtype._impl
def key_impl(keys: KeyArrayLike) -> Hashable:
- typed_keys, _ = _check_prng_key(keys)
+ typed_keys, _ = _check_prng_key("key_impl", keys, allow_batched=True)
return PRNGSpec(_key_impl(typed_keys))
@@ -298,7 +311,7 @@ def _key_data(keys: KeyArray) -> Array:
def key_data(keys: KeyArrayLike) -> Array:
"""Recover the bits of key data underlying a PRNG key array."""
- keys, _ = _check_prng_key(keys)
+ keys, _ = _check_prng_key("key_data", keys, allow_batched=True)
return _key_data(keys)
@@ -350,7 +363,7 @@ def bits(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("bits", key)
if dtype is None:
dtype = dtypes.canonicalize_dtype(jnp.uint)
else:
@@ -383,7 +396,7 @@ def uniform(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("uniform", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
@@ -452,7 +465,7 @@ def randint(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("randint", key)
dtypes.check_user_dtype_supported(dtype)
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
@@ -535,7 +548,7 @@ def shuffle(key: KeyArrayLike, x: ArrayLike, axis: int = 0) -> Array:
msg = ("jax.random.shuffle is deprecated and will be removed in a future release. "
"Use jax.random.permutation with independent=True.")
warnings.warn(msg, FutureWarning)
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("shuffle", key)
return _shuffle(key, x, axis) # type: ignore
@@ -556,7 +569,7 @@ def permutation(key: KeyArrayLike,
Returns:
A shuffled version of x or array range
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("permutation", key)
check_arraylike("permutation", x)
axis = canonicalize_axis(axis, np.ndim(x) or 1)
if not np.ndim(x):
@@ -630,7 +643,7 @@ def choice(key: KeyArrayLike,
Returns:
An array of shape `shape` containing samples from `a`.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("choice", key)
if not isinstance(shape, Sequence):
raise TypeError("shape argument of jax.random.choice must be a sequence, "
f"got {shape}")
@@ -697,7 +710,7 @@ def normal(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("normal", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.inexact):
raise ValueError(f"dtype argument to `normal` must be a float or complex dtype, "
@@ -764,7 +777,7 @@ def multivariate_normal(key: KeyArrayLike,
``shape + mean.shape[-1:]`` if ``shape`` is not None, or else
``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("multivariate_normal", key)
dtypes.check_user_dtype_supported(dtype)
mean, cov = promote_dtypes_inexact(mean, cov)
if method not in {'svd', 'eigh', 'cholesky'}:
@@ -843,7 +856,7 @@ def truncated_normal(key: KeyArrayLike,
``shape`` is not None, or else by broadcasting ``lower`` and ``upper``.
Returns values in the open interval ``(lower, upper)``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("truncated_normal", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `truncated_normal` must be a float "
@@ -901,7 +914,7 @@ def bernoulli(key: KeyArrayLike,
A random array with boolean dtype and shape given by ``shape`` if ``shape``
is not None, or else ``p.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("bernoulli", key)
dtype = dtypes.canonicalize_dtype(lax.dtype(p))
if shape is not None:
shape = core.as_named_shape(shape)
@@ -952,7 +965,7 @@ def beta(key: KeyArrayLike,
A random array with the specified dtype and shape given by ``shape`` if
``shape`` is not None, or else by broadcasting ``a`` and ``b``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("beta", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `beta` must be a float "
@@ -1005,7 +1018,7 @@ def cauchy(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("cauchy", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `cauchy` must be a float "
@@ -1057,7 +1070,7 @@ def dirichlet(key: KeyArrayLike,
``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else
``alpha.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("dirichlet", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `dirichlet` must be a float "
@@ -1116,7 +1129,7 @@ def exponential(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("exponential", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `exponential` must be a float "
@@ -1297,7 +1310,7 @@ def gamma(key: KeyArrayLike,
loggamma : sample gamma values in log-space, which can provide improved
accuracy for small values of ``a``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("gamma", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gamma` must be a float "
@@ -1339,7 +1352,7 @@ def loggamma(key: KeyArrayLike,
See Also:
gamma : standard gamma sampler.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("loggamma", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gamma` must be a float "
@@ -1475,7 +1488,7 @@ def poisson(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape is not None, or else by ``lam.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("poisson", key)
dtypes.check_user_dtype_supported(dtype)
# TODO(frostig): generalize underlying poisson implementation and
# remove this check
@@ -1515,7 +1528,7 @@ def gumbel(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("gumbel", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `gumbel` must be a float "
@@ -1550,7 +1563,7 @@ def categorical(key: KeyArrayLike,
A random array with int dtype and shape given by ``shape`` if ``shape``
is not None, or else ``np.delete(logits.shape, axis)``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("categorical", key)
check_arraylike("categorical", logits)
logits_arr = jnp.asarray(logits)
@@ -1593,7 +1606,7 @@ def laplace(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("laplace", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `laplace` must be a float "
@@ -1630,7 +1643,7 @@ def logistic(key: KeyArrayLike,
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("logistic", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `logistic` must be a float "
@@ -1673,7 +1686,7 @@ def pareto(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``b.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("pareto", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `pareto` must be a float "
@@ -1722,7 +1735,7 @@ def t(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``df.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("t", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `t` must be a float "
@@ -1775,7 +1788,7 @@ def chisquare(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``df.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("chisquare", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `chisquare` must be a float "
@@ -1833,7 +1846,7 @@ def f(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``df.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("f", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `f` must be a float "
@@ -1885,7 +1898,7 @@ def rademacher(key: KeyArrayLike,
a 50% change of being 1 or -1.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("rademacher", key)
dtypes.check_user_dtype_supported(dtype)
dtype = dtypes.canonicalize_dtype(dtype)
shape = core.canonicalize_shape(shape)
@@ -1921,7 +1934,7 @@ def maxwell(key: KeyArrayLike,
"""
# Generate samples using:
# sqrt(X^2 + Y^2 + Z^2), X,Y,Z ~N(0,1)
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("maxwell", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `maxwell` must be a float "
@@ -1964,7 +1977,7 @@ def double_sided_maxwell(key: KeyArrayLike,
A jnp.array of samples.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("double_sided_maxwell", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `double_sided_maxwell` must be a float"
@@ -2016,7 +2029,7 @@ def weibull_min(key: KeyArrayLike,
A jnp.array of samples.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("weibull_min", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError(f"dtype argument to `weibull_min` must be a float "
@@ -2055,7 +2068,7 @@ def orthogonal(
Returns:
A random array of shape `(*shape, n, n)` and specified dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("orthogonal", key)
dtypes.check_user_dtype_supported(dtype)
_check_shape("orthogonal", shape)
n = core.concrete_or_error(index, n, "The error occurred in jax.random.orthogonal()")
@@ -2090,7 +2103,7 @@ def generalized_normal(
Returns:
A random array with the specified shape and dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("generalized_normal", key)
dtypes.check_user_dtype_supported(dtype)
_check_shape("generalized_normal", shape)
keys = split(key)
@@ -2120,7 +2133,7 @@ def ball(
Returns:
A random array of shape `(*shape, d)` and specified dtype.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("ball", key)
dtypes.check_user_dtype_supported(dtype)
_check_shape("ball", shape)
d = core.concrete_or_error(index, d, "The error occurred in jax.random.ball()")
@@ -2158,7 +2171,7 @@ def rayleigh(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``scale.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("rayleigh", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `rayleigh` must be a float "
@@ -2212,7 +2225,7 @@ def wald(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``mean.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("wald", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `wald` must be a float "
@@ -2268,7 +2281,7 @@ def geometric(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``p.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("geometric", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.integer):
raise ValueError("dtype argument to `geometric` must be an int "
@@ -2330,7 +2343,7 @@ def triangular(key: KeyArrayLike,
A random array with the specified dtype and with shape given by ``shape`` if
``shape`` is not None, or else by ``left.shape``, ``mode.shape`` and ``right.shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("triangular", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
raise ValueError("dtype argument to `triangular` must be a float "
@@ -2384,7 +2397,7 @@ def lognormal(key: KeyArrayLike,
Returns:
A random array with the specified dtype and with shape given by ``shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("lognormal", key)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.inexact):
raise ValueError(f"dtype argument to `lognormal` must be a float or complex dtype, "
@@ -2597,7 +2610,7 @@ def binomial(
A random array with the specified dtype and with shape given by
``np.broadcast(n, p).shape``.
"""
- key, _ = _check_prng_key(key)
+ key, _ = _check_prng_key("binomial", key)
check_arraylike("binomial", n, p)
dtypes.check_user_dtype_supported(dtype)
if not dtypes.issubdtype(dtype, np.floating):
|
diff --git a/jax/experimental/jax2tf/tests/shape_poly_test.py b/jax/experimental/jax2tf/tests/shape_poly_test.py
--- a/jax/experimental/jax2tf/tests/shape_poly_test.py
+++ b/jax/experimental/jax2tf/tests/shape_poly_test.py
@@ -2108,7 +2108,7 @@ def f2(z, w): # z: f32[a, 5] w: f32[a + b, 5] -> f32[2*a + b, 10]
# non-partitionable), and unsafe_rbg.
[
PolyHarness("random_gamma", f"{flags_name}",
- lambda key, a: jax.random.gamma(key, a),
+ lambda key, a: jax.vmap(jax.random.gamma)(key, a),
arg_descriptors=[RandArg((3, key_size), np.uint32), RandArg((3, 4, 5), _f32)],
polymorphic_shapes=["b, ...", "b, w, ..."], tol=1E-5,
override_jax_config_flags=override_jax_config_flags), # type: ignore
diff --git a/tests/key_reuse_test.py b/tests/key_reuse_test.py
--- a/tests/key_reuse_test.py
+++ b/tests/key_reuse_test.py
@@ -616,7 +616,7 @@ def test_reuse_after_broadcast(self):
def f():
key = jax.random.key(0)
key2 = key[None]
- return jax.random.bits(key) + jax.random.bits(key2)
+ return jax.random.bits(key) + jax.vmap(jax.random.bits)(key2)
with self.assertRaisesRegex(KeyReuseError, self.random_bits_error):
self.check_key_reuse(f)
diff --git a/tests/random_lax_test.py b/tests/random_lax_test.py
--- a/tests/random_lax_test.py
+++ b/tests/random_lax_test.py
@@ -1247,6 +1247,34 @@ def testBinomialCornerCases(self):
self.assertArraysAllClose(samples2, jnp.array([jnp.nan, 0., jnp.nan, jnp.nan]), check_dtypes=False)
self.assertArraysAllClose(samples3, jnp.array([jnp.nan, jnp.nan, jnp.nan]), check_dtypes=False)
+ def test_batched_key_warnings(self):
+ keys = jax.random.split(self.make_key(0))
+ msg = "{} accepts a single key, but was given a key array of shape.*"
+
+ # Check a handful of functions that are expected to warn.
+ with self.assertWarnsRegex(FutureWarning, msg.format('bits')):
+ jax.random.bits(keys, shape=(2,))
+ with self.assertWarnsRegex(FutureWarning, msg.format('chisquare')):
+ jax.random.chisquare(keys, 1.0, shape=(2,))
+ with self.assertWarnsRegex(FutureWarning, msg.format('dirichlet')):
+ jax.random.dirichlet(keys, jnp.arange(2.0), shape=(2,))
+ with self.assertWarnsRegex(FutureWarning, msg.format('gamma')):
+ jax.random.gamma(keys, 1.0, shape=(2,))
+ with self.assertWarnsRegex(FutureWarning, msg.format('loggamma')):
+ jax.random.loggamma(keys, 1.0, shape=(2,))
+
+ # Other functions should error; test a few cases.
+ with self.assertRaisesRegex(ValueError, msg.format('fold_in')):
+ jax.random.fold_in(keys, 0)
+ with self.assertRaisesRegex(ValueError, msg.format('split')):
+ jax.random.split(keys)
+
+ # Some shouldn't error or warn
+ with self.assertNoWarnings():
+ jax.random.key_data(keys)
+ jax.random.key_impl(keys)
+
+
threefry_seed = prng_internal.threefry_seed
threefry_split = prng_internal.threefry_split
threefry_random_bits = prng_internal.threefry_random_bits
diff --git a/tests/shape_poly_test.py b/tests/shape_poly_test.py
--- a/tests/shape_poly_test.py
+++ b/tests/shape_poly_test.py
@@ -1917,7 +1917,7 @@ def test_vmap_error(self):
# non-partitionable), and unsafe_rbg.
[
PolyHarness("random_gamma", f"{flags_name}",
- lambda key, a: jax.random.gamma(
+ lambda key, a: jax.vmap(jax.random.gamma)(
jax.random.wrap_key_data(key), a),
arg_descriptors=[RandArg((3, key_size), np.uint32),
RandArg((3, 4, 5), _f32)],
|
[better errors] passing batched key to `jax.random` leads to inscrutible error
```python
import jax
batch_size = 8
sample_shape = (batch_size,)
rng = jax.random.PRNGKey(0)
rng = jax.random.split(rng, batch_size)
jax.random.normal(rng, shape=sample_shape)
```
```pytb
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[16], line 6
3 rng = jax.random.PRNGKey(0)
4 rng = jax.random.split(rng, batch_size)
----> 6 jax.random.normal(rng, shape=sample_shape)
File ~/github/google/jax/jax/_src/random.py:707, in normal(key, shape, dtype)
705 dtype = dtypes.canonicalize_dtype(dtype)
706 shape = core.as_named_shape(shape)
--> 707 return _normal(key, shape, dtype)
[... skipping hidden 12 frame]
File ~/github/google/jax/jax/_src/random.py:720, in _normal(key, shape, dtype)
718 return (_re + 1j * _im) / sqrt2
719 else:
--> 720 return _normal_real(key, shape, dtype)
[... skipping hidden 12 frame]
File ~/github/google/jax/jax/_src/random.py:727, in _normal_real(key, shape, dtype)
725 lo = np.nextafter(np.array(-1., dtype), np.array(0., dtype), dtype=dtype)
726 hi = np.array(1., dtype)
--> 727 u = uniform(key, shape, dtype, lo, hi) # type: ignore[arg-type]
728 return lax.mul(np.array(np.sqrt(2), dtype), lax.erf_inv(u))
File ~/github/google/jax/jax/_src/random.py:394, in uniform(key, shape, dtype, minval, maxval)
392 dtype = dtypes.canonicalize_dtype(dtype)
393 shape = core.as_named_shape(shape)
--> 394 return _uniform(key, shape, dtype, minval, maxval)
[... skipping hidden 12 frame]
File ~/github/google/jax/jax/_src/random.py:432, in _uniform(key, shape, dtype, minval, maxval)
425 float_bits = lax.bitwise_or(
426 lax.shift_right_logical(bits, np.array(rng_bits - nmant, uint_dtype)),
427 np.array(1.0, dtype).view(uint_dtype),
428 )
429 floats = lax.bitcast_convert_type(float_bits, dtype) - np.array(1., dtype)
430 return lax.max(
431 minval,
--> 432 lax.reshape(floats * (maxval - minval) + minval, shape.positional))
[... skipping hidden 8 frame]
File ~/github/google/jax/jax/_src/lax/lax.py:3386, in _reshape_shape_rule(operand, new_sizes, dimensions)
3383 if (not config.dynamic_shapes.value and
3384 not math.prod(np.shape(operand)) == math.prod(new_sizes)):
3385 msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
-> 3386 raise TypeError(msg.format(new_sizes, np.shape(operand)))
3387 if dimensions is not None:
3388 if set(dimensions) != set(range(np.ndim(operand))):
TypeError: reshape total size must be unchanged, got new_sizes (8,) for shape (8, 8).
```
| 2024-01-16T16:19:38 |
|
google/jax
| 19,381 |
google__jax-19381
|
[
"19362"
] |
620fe815a475c27c3f6e3767deff494f59479a37
|
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -670,7 +670,7 @@ def diff(a: ArrayLike, n: int = 1, axis: int = -1,
combined: list[Array] = []
if prepend is not None:
util.check_arraylike("diff", prepend)
- if isscalar(prepend):
+ if not ndim(prepend):
shape = list(arr.shape)
shape[axis] = 1
prepend = broadcast_to(prepend, tuple(shape))
@@ -680,7 +680,7 @@ def diff(a: ArrayLike, n: int = 1, axis: int = -1,
if append is not None:
util.check_arraylike("diff", append)
- if isscalar(append):
+ if not ndim(append):
shape = list(arr.shape)
shape[axis] = 1
append = broadcast_to(append, tuple(shape))
|
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -2744,6 +2744,16 @@ def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
+ def testDiffPrepoendScalar(self):
+ # Regression test for https://github.com/google/jax/issues/19362
+ x = jnp.arange(10)
+ result_jax = jnp.diff(x, prepend=x[0], append=x[-1])
+
+ x = np.array(x)
+ result_numpy = np.diff(x, prepend=x[0], append=x[-1])
+
+ self.assertArraysEqual(result_jax, result_numpy)
+
@jtu.sample_product(
op=["zeros", "ones"],
shape=[2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),
|
Concatenation error in `jax.numpy.diff`
### Description
Consider the following example:
```python
import jax.numpy as jnp
x = jnp.arange(10)
z = jnp.diff(x, prepend=x[0])
```
The snippet above will throw an error:
```python
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/.../miniconda3/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py", line 694, in diff
arr = concatenate(combined, axis)
File "/home/.../miniconda3/lib/python3.10/site-packages/jax/_src/numpy/lax_numpy.py", line 1841, in concatenate
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
ValueError: Zero-dimensional arrays cannot be concatenated.
```
Such an error does not appear for `numpy`. That is, `z = np.diff(x, prepend=x[0])` will work in numpy. Hence, I believe that this is a design bug in jax.numpy. The behaviou does not match the API description.
### What jax/jaxlib version are you using?
0.4.23
### Which accelerator(s) are you using?
CPU and GPU
### Additional system info?
Ubuntu
### NVIDIA GPU info
_No response_
|
Thanks for the report! This seems like a case we missed in the implementation.
| 2024-01-16T16:47:09 |
google/jax
| 19,441 |
google__jax-19441
|
[
"19440"
] |
a7023b18d53c979d4880eaa2e7beae3fd8a445aa
|
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -62,6 +62,7 @@
standard_primitive)
from jax._src import xla_bridge
from jax._src.lib import xla_client
+from jax._src.lib import xla_extension_version
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import chlo
from jax._src.lib.mlir.dialects import hlo
@@ -1375,7 +1376,14 @@ def full_like(x: ArrayLike | DuckTypedArray,
# TODO(yashkatariya): Use shard_like in tracing mode too i.e. remove the
# ArrayImpl check.
if shape is None and isinstance(x, array.ArrayImpl):
- return shard_alike.shard_alike(x, val)[1]
+ if xla_extension_version < 227:
+ sharding = x.sharding # type: ignore[union-attr]
+ if (not dispatch.is_single_device_sharding(sharding) and
+ not isinstance(sharding, PmapSharding)):
+ return array.make_array_from_callback(
+ type_cast(array.Shape, fill_shape), sharding, lambda idx: val[idx])
+ else:
+ return shard_alike.shard_alike(x, val)[1]
return val
diff --git a/jax/_src/shard_alike.py b/jax/_src/shard_alike.py
--- a/jax/_src/shard_alike.py
+++ b/jax/_src/shard_alike.py
@@ -23,6 +23,7 @@
from jax._src.interpreters import batching
from jax._src.util import safe_zip
from jax._src.lib import xla_client as xc
+from jax._src.lib import xla_extension_version
from jax._src.api_util import shaped_abstractify
from jax._src.lib.mlir import ir
@@ -30,6 +31,8 @@
def shard_alike(x, y):
"""Shards x and y alike."""
+ if xla_extension_version < 227:
+ raise ValueError("shard_alike requires jaxlib v0.4.24 or newer.")
x_flat, x_tree = tree_flatten(x)
y_flat, y_tree = tree_flatten(y)
|
diff --git a/tests/shard_alike_test.py b/tests/shard_alike_test.py
--- a/tests/shard_alike_test.py
+++ b/tests/shard_alike_test.py
@@ -50,6 +50,16 @@ def tearDownModule():
xla_bridge.get_backend.cache_clear()
+class ShardAlikeDownstreamTest(jtu.JaxTestCase):
+
+ def test_full_like(self):
+ x = jnp.arange(16, dtype='float32').reshape(8, 2)
+ mesh = jtu.create_global_mesh((8,), ("i",))
+ x = jax.device_put(x, NamedSharding(mesh, P('i', None)))
+ y = jnp.full_like(x, 1)
+ self.assertEqual(x.sharding, y.sharding)
+
+
class ShardAlikeTest(jtu.JaxTestCase):
def setUp(self):
|
BUG: `shard_alike` requires newer jaxlib
This failure occurs when running jax at HEAD with jaxlib 0.4.23
```python
import os
os.environ["XLA_FLAGS"] = " --xla_force_host_platform_device_count=8"
import jax
import jaxlib
import jax.numpy as jnp
from jax.sharding import Mesh, NamedSharding, PartitionSpec as P
print(f"{jax.__version__=}")
print(f"{jaxlib.__version__=}")
x = jnp.arange(16, dtype='float32').reshape(8, 2)
mesh = Mesh(jax.devices(), axis_names=("i"))
x = jax.device_put(x, NamedSharding(mesh, P('i', None)))
print(x.sharding)
y = jax.numpy.zeros_like(x)
print(y.sharding)
```
```pytb
jax.__version__='0.4.24.dev20240117'
jaxlib.__version__='0.4.23'
NamedSharding(mesh=Mesh('i': 8), spec=PartitionSpec('i', None))
---------------------------------------------------------------------------
XlaRuntimeError Traceback (most recent call last)
[<ipython-input-1-c390646586f4>](https://localhost:8080/#) in <cell line: 18>()
16 print(x.sharding)
17
---> 18 y = jax.numpy.zeros_like(x)
19 print(y.sharding)
8 frames
[... skipping hidden 10 frame]
[/usr/local/lib/python3.10/dist-packages/jax/_src/interpreters/pxla.py](https://localhost:8080/#) in __call__(self, *args)
1154 self._handle_token_bufs(result_token_bufs, sharded_runtime_token)
1155 else:
-> 1156 results = self.xla_executable.execute_sharded(input_bufs)
1157 if dispatch.needs_check_special():
1158 out_arrays = results.disassemble_into_single_device_arrays()
XlaRuntimeError: INVALID_ARGUMENT: Executable expected parameter 1 of size 8 but got buffer with incompatible size 64: while running replica 0 and partition 0 of a replicated computation (other replicas may have failed as well).
```
It can be reproduced more directly by replacing the last line with this:
```python
jax.experimental.shard_alike.shard_alike(jnp.zeros(x.shape), x)
```
It looks like this bug was introduced to `full_like` in #19115.
| 2024-01-19T21:03:03 |
|
google/jax
| 19,511 |
google__jax-19511
|
[
"19334"
] |
1ae054b003088d873902fa62cfa8099260471e16
|
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -132,9 +132,8 @@ def asarray(x: ArrayLike) -> Array:
"""Lightweight conversion of ArrayLike input to Array output."""
if isinstance(x, Array):
return x
- if isinstance(x, np.ndarray) or np.isscalar(x):
- # Call device_put_impl directly to avoid binding the primitive.
- return dispatch._device_put_impl(x)
+ if isinstance(x, (np.ndarray, np.generic, bool, int, float, builtins.complex)):
+ return _convert_element_type(x, weak_type=dtypes.is_weakly_typed(x))
else:
raise TypeError(f"asarray: expected ArrayLike, got {x} of type {type(x)}.")
|
diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2726,6 +2726,28 @@ def _step(carry, arg):
a, b = jax.lax.scan(_step, 0, jnp.arange(4, dtype=jnp.complex64))
+ @parameterized.parameters([float, np.array, np.float32, jnp.float32])
+ def testAsarray(self, typ):
+ x = typ(1.0)
+ x_arr = lax_internal.asarray(x)
+ self.assertArraysEqual(x, x_arr)
+ self.assertIsInstance(x_arr, jax.Array)
+
+ # jaxpr should not bind any primitives, whether called directly or
+ # as a closure:
+ jaxpr = jax.make_jaxpr(lax_internal.asarray)(x)
+ self.assertLen(jaxpr.eqns, 0)
+
+ asarray_closure = lambda: lax_internal.asarray(x)
+ jaxpr = jax.make_jaxpr(asarray_closure)()
+ self.assertLen(jaxpr.eqns, 0)
+
+ # Regression test for https://github.com/google/jax/issues/19334
+ # lax.asarray as a closure should not trigger transfer guard.
+ with jax.transfer_guard('disallow'):
+ jax.jit(asarray_closure)()
+
+
class LazyConstantTest(jtu.JaxTestCase):
def _Check(self, make_const, expected):
|
`broadcast_to` raise Disallowed host-to-device transfer when used inside jax.jit
### Description
Reproduction:
```python
@jax.jit
def test():
return jnp.broadcast_to(0.1, (4,))
with jax.transfer_guard('disallow'):
test()
```
Fail with:
```
XlaRuntimeError: INVALID_ARGUMENT: Disallowed host-to-device transfer: aval=ShapedArray(float32[]), dst_sharding=SingleDeviceSharding(device=TpuDevice(id=0, process_index=0, coords=(0,0,0), core_on_chip=0))
```
The `0.1` is a constant in the XLA graph, so this should not trigger any host-to-device transfer.
It's possible to silence the issue with `jnp.broadcast_to(jnp.asarray(0.1), (4,))` but original issue is a bug.
### What jax/jaxlib version are you using?
HEAD
### Which accelerator(s) are you using?
TPU
|
Thanks for the report. It looks like the issue comes from the implementation of `lax.asarray` (used by `broadcast_to`), which uses `device_put` for non-jax-array inputs: https://github.com/google/jax/blob/31efc2cc6ae6f7f20b78c742def23afcd6507625/jax/_src/lax/lax.py#L129-L137
I think we need to use a different strategy for converting scalars to arrays if we want to avoid triggering the transfer guard here.
Here's the more direct repro:
```python
import jax
with jax.transfer_guard('disallow'):
jax.jit(lambda: jax.numpy.asarray(0.1))() # no error
jax.jit(lambda: jax._src.lax.lax.asarray(0.1))() # error
```
| 2024-01-24T23:37:57 |
google/jax
| 19,518 |
google__jax-19518
|
[
"19490"
] |
1ae054b003088d873902fa62cfa8099260471e16
|
diff --git a/jax/_src/nn/functions.py b/jax/_src/nn/functions.py
--- a/jax/_src/nn/functions.py
+++ b/jax/_src/nn/functions.py
@@ -432,7 +432,8 @@ def log_softmax(x: ArrayLike,
numpy_util.check_arraylike("log_softmax", x)
x_arr = jnp.asarray(x)
x_max = jnp.max(x_arr, axis, where=where, initial=initial, keepdims=True)
- shifted = x_arr - lax.stop_gradient(x_max)
+ x_safe = x_arr if where is None else jnp.where(where, x_arr, initial)
+ shifted = x_safe - lax.stop_gradient(x_max)
shifted_logsumexp = jnp.log(
jnp.sum(jnp.exp(shifted), axis, where=where, keepdims=True))
result = shifted - shifted_logsumexp
@@ -486,7 +487,8 @@ def _softmax(
where: ArrayLike | None = None,
initial: ArrayLike | None = None) -> Array:
x_max = jnp.max(x, axis, where=where, initial=initial, keepdims=True)
- unnormalized = jnp.exp(x - x_max)
+ x_safe = x if where is None else jnp.where(where, x, initial)
+ unnormalized = jnp.exp(x_safe - x_max)
result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)
if where is not None:
result = jnp.where(where, result, 0)
@@ -504,7 +506,8 @@ def _softmax_deprecated(
where: ArrayLike | None = None,
initial: ArrayLike | None = None) -> Array:
x_max = jnp.max(x, axis, where=where, initial=initial, keepdims=True)
- unnormalized = jnp.exp(x - lax.stop_gradient(x_max))
+ x_safe = x if where is None else jnp.where(where, x, initial)
+ unnormalized = jnp.exp(x_safe - lax.stop_gradient(x_max))
result = unnormalized / jnp.sum(unnormalized, axis, where=where, keepdims=True)
if where is not None:
result = jnp.where(where, result, 0)
|
diff --git a/tests/nn_test.py b/tests/nn_test.py
--- a/tests/nn_test.py
+++ b/tests/nn_test.py
@@ -172,6 +172,16 @@ def testSoftmaxWhereMask(self, fn):
jnp.array([0, 2, 3]))
jtu.check_grads(g_fun, (x,), order=2)
+ @parameterized.parameters([nn.softmax, nn.log_softmax])
+ def testSoftmaxWhereGrad(self, fn):
+ # regression test for https://github.com/google/jax/issues/19490
+ x = jnp.array([36., 10000.])
+ mask = x < 1000
+
+ f = lambda x, mask: fn(x, where=mask, initial=x.min())[0]
+
+ self.assertAllClose(jax.grad(f)(x, mask), jnp.zeros_like(x))
+
def testSoftmaxGrad(self):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
jtu.check_grads(nn.softmax, (x,), order=2, atol=5e-3)
|
`log_softmax` and `softmax` are not numerically stable with `where` argument inside `vmap`
### Description
In the following code, large masked-out values seem to make the gradient infeasible. Uncommenting `x = jnp.where(mask, x, x.min())` fixes the issue. Apologies if I'm not using log_softmax right.
```python
import os
import jax
import jax.numpy as jnp
import jax.random as jr
from jax import vmap, jit, nn, value_and_grad
key = jr.PRNGKey(12322351)
@jit
def f(x, mask, i):
# x = jnp.where(mask, x, x.min())
return nn.log_softmax(x, where=mask, initial=x.max())[i]
key, sub = jr.split(key)
x = jr.normal(sub, (1000, 12)) * 100
key, sub = jr.split(key)
mask = jr.bernoulli(sub, 0.5, (1000, 12))
idx = jnp.zeros(1000, dtype=jnp.int32)
for i in range(1000):
while not jnp.any(mask[i]):
key, sub = jr.split(key)
mask = mask.at[i].set(jr.bernoulli(sub, 0.5, (12,)))
j, = jnp.where(mask[i])
key, sub = jr.split(key)
idx = idx.at[i].set(jr.choice(sub, j))
v, g = value_and_grad(lambda x: vmap(f)(x, mask, idx).sum())(x)
v.item(), g.min().item(), g.max().item()
```
returns
```
(inf, nan, nan)
```
### What jax/jaxlib version are you using?
jax==0.4.23
jaxlib==0.4.23+cuda12.cudnn89
### Which accelerator(s) are you using?
GPU
### Additional system info?
1.26.2 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:40:35) [GCC 12.3.0] uname_result(system='Linux', node='xxxx', release='5.14.0-284.30.1.el9_2.x86_64', version='#1 SMP PREEMPT_DYNAMIC Tue Sep 12 09:28:32 EDT 2023', machine='x86_64')
### NVIDIA GPU info
```
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 545.23.06 Driver Version: 545.23.06 CUDA Version: 12.3 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA A100-SXM4-80GB On | 00000000:07:00.0 Off | 0 |
| N/A 31C P0 70W / 400W | 971MiB / 81920MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
```
|
Thanks for the report – this looks like an instance of the issue discussed here: https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where
It seems like you arrived at the correct fix, which is applying `where` twice to avoid passing problematic values to the differentiated function. My only question here is whether `softmax` and other functions providing the `where` argument should do this double-where by default... I'm not sure. What do you think?
I personally did not think my nans were coming from the softmax, so I'd lean on doing the double where in softmax. Perhaps it could be disabled with a flag for the performance-conscious people?
Here's a much more compact repro:
```python
import jax
import jax.numpy as jnp
def f(x, mask):
# x = jnp.where(mask, x, x.min())
return jax.nn.log_softmax(x, where=mask, initial=x.max())[0]
x = jnp.array([36., 200.])
mask = jnp.array([True, False])
print(*jax.value_and_grad(f)(x, mask))
# inf [nan 0.]
```
Oh, I see the issue now – it's a problem with the `initial` value. You're setting `initial=x.max()`, when you should set `initial=x.min()`.
The gradient will still contain `NaN` value because the output of `logsoftmax` contains infinities where `mask` is False. This is the expected output, because `softmax(x) = 0`, so `logsoftmax(x) = -inf`, and gradients of infinity are `NaN` because no other value makes sense.
If you don't want `NaN` gradients, you need to take the derivative of a function that does not have infinite outputs.
Does that make sense?
I don't think that's expected, though. I'm only looking at indexes where softmax is **not** zero. The same issue happens with softmax instead of logsoftmax where the gradient should be zero because the output is independent of the input. This only happens with the vmap. This also happens when initial x.min() or x.max()
I'm not sure I follow – could you put together a minimal example that demonstrates these issues? i.e. one that uses a constant array with a few values like my repro, rather than an expensive-to-construct array with thousands of entries as in your original repro.
Will do!
You were right that this does not seem to have anything to do with Vmap, and I might be misunderstanding what you are saying, but please look at the following piece of code.
```python
def f(x, mask):
# x = jnp.where(mask, x, x.min())
return jax.nn.log_softmax(x, where=mask, initial=x[jnp.argmax(mask)])[0]
mask = jnp.array([True, False])
print(*jax.value_and_grad(f)(jnp.array([36., 36.]), mask))
print(*jax.value_and_grad(f)(jnp.array([36., 10000.]), mask))
# 0.0 [0. 0.]
# 0.0 [ 0. nan]
```
I think that the behavior should be consistent, either always nan or always zero.
> The gradient will still contain NaN value because the output of logsoftmax contains infinities where mask is False.
I don't think that this statement is true. The gradient doesn't have nans yet there is a mask. The output does not depend on the -infs, so the gradient should still be defined. If a value is masked out, then the gradient should just be zero like it is here when both values are close.
Thanks - that's very clear. I was confused by the claim that this was related to `vmap`, but this makes more sense.
Side-note: as noted above, you should set `initial` to the minimum value in the array (or negative infinity in case the mask is all `False`). That won't fix the `nan` issue, but it is what the function expects.
I think I was correct when I speculated above that the `nan` here is coming from the issue mentioned in the linked FAQ entry. The way to address it is to use the double-where construct, which can be accomplished by uncommenting your commented line. Perhaps `softmax` should do that automatically, but that's less clear to me.
The `nan` in the output is coming from the `sum` in the implementation; here's a minimization of the implementation issue:
```python
def f(x, mask):
return jnp.sum(jnp.exp(x), where=mask)
mask = jnp.array([True, False])
print(*jax.value_and_grad(f)(jnp.array([0., 10000.]), mask))
# 1.0 [ 1. nan]
```
So perhaps `sum` should include the double-where construct to avoid potentially generating NaNs in gradients? If we fixed it in sum, it would be fixed in softmax as well.
That sounds reasonable to me
This should be fixed in #19518. I've confirmed that it fixes the initial report so long as you use `initial=x.min()` instead of `initial=x.max()`.
| 2024-01-25T18:00:03 |
google/jax
| 19,601 |
google__jax-19601
|
[
"19663"
] |
9a098e922aff62a3b49bd673b9518d97ee599248
|
diff --git a/jax/experimental/shard_map.py b/jax/experimental/shard_map.py
--- a/jax/experimental/shard_map.py
+++ b/jax/experimental/shard_map.py
@@ -923,13 +923,14 @@ def _standard_collective_check(prim, mesh, x_rep, *, axis_name, **params):
def _standard_collective_rewrite(prim, mesh, in_rep, x, axis_name, **params):
# The standard collective rewrite may insert a pbroadcast on the input.
- if type(axis_name) is tuple: raise NotImplementedError # TODO
if params.get('axis_index_groups') is not None: raise NotImplementedError
+ axis_name = (axis_name,) if not isinstance(axis_name, tuple) else axis_name
x_rep, = in_rep
- if axis_name in in_rep:
- x = pbroadcast(x, (axis_name,))
+ axis_name_set = set(axis_name)
+ if pbroadcast_axis_name := axis_name_set & x_rep:
+ x = pbroadcast(x, tuple(pbroadcast_axis_name))
out_val = prim.bind(x, axis_name=axis_name, **params)
- return [out_val], [x_rep - {axis_name}]
+ return [out_val], [x_rep - axis_name_set]
for o in it.chain(lax.__dict__.values(), slicing.__dict__.values(),
|
diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -125,10 +125,17 @@ def test_all_gather(self):
@partial(shard_map, mesh=mesh,
in_specs=(P('z', ('x', 'y')),), out_specs=P('z', ('x', 'y')))
def fwd(a):
- return lax.all_gather(a, 'z', axis=0, tiled=True)
-
- c = fwd(a)
+ return (
+ lax.all_gather(a, 'z', axis=0, tiled=True),
+ lax.all_gather(a, ('x', 'y'), axis=-1, tiled=True),
+ )
+ c, d = fwd(a)
self.assertEqual(c.addressable_data(0).shape, (8, 2))
+ for i, a_shard in enumerate(np.split(a, 4, axis=1)):
+ self.assertAllClose(c.addressable_data(2 * i), a_shard)
+ self.assertEqual(d.addressable_data(0).shape, (4, 8))
+ for i, a_shard in enumerate(np.split(a, 2, axis=0)):
+ self.assertAllClose(d.addressable_data(i), a_shard)
def test_matmul_partial(self):
raise unittest.SkipTest("invalid replication asserted by out_spec?")
@@ -156,10 +163,17 @@ def test_matmul_reduce_scatter(self):
out_specs=P(('z', 'y'), None))
def fwd(a, b):
c = jnp.matmul(a, b) # [B.z, F] {y.unreduced}
- return lax.psum_scatter(c, 'y', scatter_dimension=0, tiled=True)
+ return (
+ lax.psum_scatter(c, 'y', scatter_dimension=0, tiled=True),
+ lax.psum_scatter(c, ('z', 'y'), scatter_dimension=0, tiled=True),
+ )
- c = fwd(a, b)
+ expected = jnp.matmul(a, b)
+ c, d = fwd(a, b)
self.assertEqual(c.addressable_data(0).shape, (2, 8))
+ self.assertAllClose(expected, c)
+ self.assertEqual(d.addressable_data(0).shape, (1, 8))
+ self.assertAllClose(expected[:4] + expected[4:], d)
def test_collective_permute(self):
devices = np.array(jax.devices()[:8]) # Take up to 8 devices
@@ -169,8 +183,9 @@ def test_collective_permute(self):
jax.sharding.NamedSharding(mesh, P('x', None)))
@jax.jit
- @partial(shard_map, mesh=mesh, in_specs=(P('x', None),),
- out_specs=P('x', None))
+ @partial(
+ shard_map, mesh=mesh, in_specs=(P('x', None),), out_specs=P('x', None)
+ )
def fwd(a):
axis_size = lax.psum(1, 'x')
perm = [(j, (j + 1) % axis_size) for j in range(axis_size)]
@@ -179,18 +194,74 @@ def fwd(a):
c = fwd(a)
self.assertAllClose(c[1, :], a[0, :])
- def test_all_to_all(self):
- devices = np.array(jax.devices()[:8]) # Take up to 8 devices
- mesh = Mesh(devices, axis_names=('x'))
+ def test_collective_permute_with_multiple_axis_names(self):
+ mesh = Mesh(
+ np.array(jax.devices()[:8]).reshape((2, 2, 2)),
+ axis_names=('x', 'y', 'z'),
+ )
+ a = jax.device_put(
+ jnp.arange(8 * 8).reshape((4, 16)),
+ jax.sharding.NamedSharding(mesh, P('x', ('y', 'z'))),
+ )
+
+ @jax.jit
+ @partial(
+ shard_map,
+ mesh=mesh,
+ in_specs=(P('x', ('y', 'z')),),
+ out_specs=P('x', ('y', 'z')),
+ )
+ def fwd(a):
+ xy_axis_size = lax.psum(1, ('x', 'y'))
+ yz_axis_size = lax.psum(1, ('y', 'z'))
+ xy_perm = [(j, (j + 1) % xy_axis_size) for j in range(xy_axis_size)]
+ yz_perm = [(j, (j + 1) % yz_axis_size) for j in range(yz_axis_size)]
+ return (
+ lax.ppermute(a, ('x', 'y'), perm=xy_perm),
+ lax.ppermute(a, ('y', 'z'), perm=yz_perm),
+ )
+
+ c, d = fwd(a)
+ for i in range(8):
+ self.assertAllClose(
+ a.addressable_data(i), c.addressable_data((i + 2) % 8)
+ )
+ self.assertAllClose(
+ a.addressable_data(i), d.addressable_data(4 * (i // 4) + (i + 1) % 4)
+ )
+
+ @parameterized.named_parameters(
+ dict(
+ testcase_name='_single_axis_name', axis_name='x', mesh_axes=dict(x=8)
+ ),
+ dict(
+ testcase_name='_multiple_axis_names',
+ axis_name=('x', 'y'),
+ mesh_axes=dict(x=4, y=2),
+ ),
+ )
+ def test_all_to_all(self, axis_name, mesh_axes):
+ devices = np.array(jax.devices()[: np.prod(tuple(mesh_axes.values()))])
+ mesh = Mesh(
+ devices.reshape(tuple(mesh_axes.values())),
+ axis_names=tuple(mesh_axes.keys()),
+ )
a = jax.device_put(
jnp.arange(8 * 8).reshape((8, 8)),
- jax.sharding.NamedSharding(mesh, P('x', None)))
+ jax.sharding.NamedSharding(mesh, P(axis_name, None)),
+ )
@jax.jit
- @partial(shard_map, mesh=mesh,
- in_specs=(P('x', None),), out_specs=P(None, 'x'))
+ @partial(
+ shard_map,
+ mesh=mesh,
+ in_specs=(P(axis_name, None),),
+ out_specs=P(None, axis_name),
+ )
def fwd(a):
- return lax.all_to_all(a, 'x', split_axis=1, concat_axis=1, tiled=True)
+ return lax.all_to_all(
+ a, axis_name, split_axis=1, concat_axis=1, tiled=True
+ )
c = fwd(a)
assert (c == jnp.reshape(a.T, (1, 64))).all()
@@ -860,7 +931,9 @@ def test_dce(self):
def f(x, y, z):
@partial(shard_map, mesh=mesh, in_specs=(P('i', 'j'), P(None, 'i')),
out_specs=(P(None, None), P(None, 'i'), P('i', 'j')))
- def g(y, z): return jnp.sin(x), jnp.cos(z), jnp.tan(y)
+ def g(y, z):
+ return jnp.sin(x), jnp.cos(z), jnp.tan(y)
+
return g(y, z)
x = jnp.zeros((4, 4))
|
[shmap] shard_map doesn't support multiple axes
### Description
```
import functools
import jax
from jax import numpy as jnp
from jax import sharding
from jax.experimental import mesh_utils
from jax.experimental import shard_map
mesh = sharding.Mesh(
mesh_utils.create_device_mesh((1, 1), jax.devices()[:1]), ('x', 'y')
)
@functools.partial(
shard_map.shard_map,
mesh=mesh,
in_specs=(sharding.PartitionSpec(('x', 'y')),),
out_specs=sharding.PartitionSpec(('x', 'y')),
)
def shmap(x):
return jax.lax.all_to_all(
x, ('x', 'y'), split_axis=1, concat_axis=1, tiled=True
)
shmap(jnp.arange(64).reshape((8, 8)))
```
throws
```
NotImplementedError Traceback (most recent call last)
[<ipython-input-15-054b2c13eea0>](https://colab.corp.google.com/drive/1BrxzONkVzmempZ6vhNoQfMm29kmKZUqE?resourcekey=0-8USnR6nL2HfsuYRxxqzFbw#) in <module>()
24
25
---> 26 shmap(jnp.arange(64).reshape((8, 8)))
[<ipython-input-15-054b2c13eea0>](https://colab.corp.google.com/drive/1BrxzONkVzmempZ6vhNoQfMm29kmKZUqE?resourcekey=0-8USnR6nL2HfsuYRxxqzFbw#) in shmap(x)
19 )
20 def shmap(x):
---> 21 return jax.lax.all_to_all(
22 x, ('x', 'y'), split_axis=1, concat_axis=1, tiled=True
23 )
NotImplementedError:
```
from
https://github.com/google/jax/blob/5c5e8f032a83ea88333ef53d076e3b21dad60059/jax/experimental/shard_map.py#L926
Proposed a fix in https://github.com/google/jax/pull/19601.
### What jax/jaxlib version are you using?
0.4.24
### Which accelerator(s) are you using?
CPU/TPU
### Additional system info?
1.26.3 3.11.7 (stable, redacted, redacted) [Clang google3-trunk (0784b1eefa36d4acbb0dacd2d18796e26313b6c5)] uname_result(system='Linux', node='258826a665cf1304-84aaaed3d9.borgtask.google.com', release='5.10.0-smp-1100.465.0.0', version='#1 [v5.10.0-1100.465.0.0] SMP @1704273398', machine='x86_64')
### NVIDIA GPU info
_No response_
| 2024-01-31T17:14:46 |
|
google/jax
| 19,622 |
google__jax-19622
|
[
"19616"
] |
bb5c90bdfad370c018d221828b0bf6016de283b0
|
diff --git a/jax/_src/lax/lax.py b/jax/_src/lax/lax.py
--- a/jax/_src/lax/lax.py
+++ b/jax/_src/lax/lax.py
@@ -4437,7 +4437,9 @@ def _outfeed_lowering(ctx, token, *xs, partitions):
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
- Returns uniformly distributed random numbers in the range [a, b)
+ Returns uniformly distributed random numbers in the range [a, b). If
+ b <= a, then the result is undefined, and different implementations may
+ return different results.
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
|
jax.lax.rng_uniform(k,k) has different behaviour in CPU (RANDOM int32) /TPU (ALWAYS k)
### Description
rng_uniform claims "Returns uniformly distributed random numbers in the range [a, b)".
When calling it with a == b, it has a different behaviour from CPU and TPU:
- CPU: it seems to use the full int32 range.
- TPU: it ALWAYS returns the same!!!
Example:
```
import jax.tools.colab_tpu
# jax.tools.colab_tpu.setup_tpu()
import jax
from jax.lib import xla_bridge
print(xla_bridge.get_backend().platform)
print(jax.devices())
print(jax.lax.rng_uniform(3, 3, (10,)))
```
CPU:
```
cpu
[CpuDevice(id=0)]
[-1752798562 1744106001 -688905896 -376700610 28523420 -600052118
1113020891 874027959 762190079 -467109515]
```
GPU:
```
gpu
[cuda(id=0)]
[-1752798562 1744106001 -688905896 -376700610 28523420 -600052118
1113020891 874027959 762190079 -467109515]
```
On TPU:
```
tpu
[TpuDevice(id=0, process_index=0, coords=(0,0,0), core_on_chip=0), TpuDevice(id=1, process_index=0, coords=(0,0,0), core_on_chip=1), TpuDevice(id=2, process_index=0, coords=(1,0,0), core_on_chip=0), TpuDevice(id=3, process_index=0, coords=(1,0,0), core_on_chip=1), TpuDevice(id=4, process_index=0, coords=(0,1,0), core_on_chip=0), TpuDevice(id=5, process_index=0, coords=(0,1,0), core_on_chip=1), TpuDevice(id=6, process_index=0, coords=(1,1,0), core_on_chip=0), TpuDevice(id=7, process_index=0, coords=(1,1,0), core_on_chip=1)]
[3 3 3 3 3 3 3 3 3 3]
```
### What jax/jaxlib version are you using?
0.4.23
### Which accelerator(s) are you using?
CPU/GPU/TPU
### Additional system info?
https://colab.research.google.com/
### NVIDIA GPU info
_No response_
|
Thanks for the report! I think this is working as expected. From the [XLA:RngUniform docs](https://www.tensorflow.org/xla/operation_semantics#rnguniform):
> if *b <= a* the result is implementation-defined.
I think we could make that more clear in the JAX documentation for `jax.lax.rng_uniform`.
| 2024-02-01T18:20:29 |
|
google/jax
| 19,710 |
google__jax-19710
|
[
"19709"
] |
b9824d7de3cb30f1df738cc42e486db3e9d915ff
|
diff --git a/jax/experimental/shard_map.py b/jax/experimental/shard_map.py
--- a/jax/experimental/shard_map.py
+++ b/jax/experimental/shard_map.py
@@ -923,7 +923,6 @@ def _standard_collective_check(prim, mesh, x_rep, *, axis_name, **params):
def _standard_collective_rewrite(prim, mesh, in_rep, x, axis_name, **params):
# The standard collective rewrite may insert a pbroadcast on the input.
- if params.get('axis_index_groups') is not None: raise NotImplementedError
axis_name = (axis_name,) if not isinstance(axis_name, tuple) else axis_name
x_rep, = in_rep
axis_name_set = set(axis_name)
|
diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -137,6 +137,28 @@ def fwd(a):
for i, a_shard in enumerate(np.split(a, 2, axis=0)):
self.assertAllClose(d.addressable_data(i), a_shard)
+ def test_all_gather_with_axis_index_groups(self):
+ mesh, a, _ = create_inputs(P('x', ('y', 'z')), P(None, None))
+
+ @jax.jit
+ @partial(
+ shard_map,
+ mesh=mesh,
+ in_specs=(P('x', ('y', 'z')),),
+ out_specs=P('x', ('y', 'z')),
+ )
+ def fwd(a):
+ return lax.all_gather(
+ a, ('y', 'z'), axis_index_groups=((0, 1), (2, 3)), axis=-1, tiled=True
+ )
+
+ c = fwd(a)
+ self.assertEqual(c.addressable_data(0).shape, (4, 4))
+ for i, row_block in enumerate(np.split(a, 2, axis=0)):
+ for j, block in enumerate(np.split(row_block, 2, axis=-1)):
+ self.assertAllClose(c.addressable_data(4 * i + 2 * j), block)
+ self.assertAllClose(c.addressable_data(4 * i + 2 * j + 1), block)
+
def test_matmul_partial(self):
raise unittest.SkipTest("invalid replication asserted by out_spec?")
@@ -175,6 +197,39 @@ def fwd(a, b):
self.assertEqual(d.addressable_data(0).shape, (1, 8))
self.assertAllClose(expected[:4] + expected[4:], d)
+ def test_reduce_scatter_with_axis_index_groups(self):
+ axis_index_groups = ((0, 2, 4, 6), (1, 3, 5, 7))
+ mesh, a, _ = create_inputs(P(None, ('x', 'y', 'z')), P(None, None))
+ assert a.addressable_data(0).shape == (8, 1)
+
+ @jax.jit
+ @partial(
+ shard_map,
+ mesh=mesh,
+ in_specs=(P(None, ('x', 'y', 'z')),),
+ out_specs=P(None, ('x', 'y', 'z')),
+ )
+ def fwd(a):
+ return lax.psum_scatter(
+ a,
+ ('x', 'y', 'z'),
+ scatter_dimension=0,
+ axis_index_groups=axis_index_groups,
+ tiled=True,
+ )
+
+ c = fwd(a)
+
+ self.assertEqual(c.addressable_data(0).shape, (2, 1))
+
+ sum_of_even_columns = np.sum(a[..., axis_index_groups[0]], -1)
+ for i, sums in enumerate(np.split(sum_of_even_columns, 4, 0)):
+ self.assertAllClose(np.squeeze(c.addressable_data(2 * i), -1), sums)
+
+ sum_of_odd_columns = np.sum(a[..., axis_index_groups[1]], -1)
+ for i, sums in enumerate(np.split(sum_of_odd_columns, 4, 0)):
+ self.assertAllClose(np.squeeze(c.addressable_data(2 * i + 1), -1), sums)
+
def test_collective_permute(self):
devices = np.array(jax.devices()[:8]) # Take up to 8 devices
mesh = Mesh(devices, axis_names=('x'))
@@ -266,6 +321,44 @@ def fwd(a):
c = fwd(a)
assert (c == jnp.reshape(a.T, (1, 64))).all()
+ def test_all_to_all_with_axis_index_groups(self):
+ mesh_axes = dict(x=4)
+ devices = np.array(jax.devices()[: np.prod(tuple(mesh_axes.values()))])
+ mesh = Mesh(
+ devices.reshape(tuple(mesh_axes.values())),
+ axis_names=tuple(mesh_axes.keys()),
+ )
+ a = jax.device_put(
+ jnp.arange(4 * 4).reshape((4, 4)),
+ jax.sharding.NamedSharding(mesh, P('x', None)),
+ )
+ self.assertEqual(a.addressable_data(0).shape, (1, 4))
+
+ @jax.jit
+ @partial(
+ shard_map,
+ mesh=mesh,
+ in_specs=(P('x', None),),
+ out_specs=P(None, 'x'),
+ )
+ def fwd(a):
+ return lax.all_to_all(
+ a,
+ 'x',
+ split_axis=1,
+ concat_axis=0,
+ axis_index_groups=((0, 1), (2, 3)),
+ tiled=True,
+ )
+
+ c = fwd(a)
+
+ # Each shard corresponds to a quadrant rather than a row.
+ self.assertEqual(c.addressable_data(0).shape, (2, 2))
+ for i, row_block in enumerate(np.split(a, 2, axis=0)):
+ for j, block in enumerate(np.split(row_block, 2, axis=-1)):
+ self.assertAllClose(block, c.addressable_data(2 * i + j))
+
def test_eager_repr(self):
mesh = Mesh(np.array(jax.devices()[:4]).reshape(2, 2), ('x', 'y'))
s = None
|
[shmap] shard_map collectives don't support axis_index_groups
I am primarily interested in collective matrix multiplication algorithms where `all_to_all` and `all_gather` are useful for overlapping reduce scatter permutes and matrix multiplication.
Collectives like `psum` may be hard to support since the replication rule doesn't make sense, but collectives like `all_gather`, `all_to_all`, and `psum_scatter` should be easy since they are per-device and the output is unreplicated.
Example of code that doesn't work:
```python
from functools import partial
import jax
from jax import numpy as jnp
from jax import sharding
from jax.experimental.shard_map import shard_map
mesh = sharding.Mesh(jax.devices(), ('x',))
x = jax.device_put(
jnp.arange(64).reshape(8, 8),
sharding.NamedSharding(mesh, sharding.PartitionSpec(None, 'x')),
)
assert x.addressable_data(0).shape == (8, 1)
@partial(
shard_map,
mesh=mesh,
in_specs=(sharding.PartitionSpec(None, 'x'),),
out_specs=sharding.PartitionSpec(None, 'x'),
)
def fwd(x):
return jax.lax.psum_scatter(
x,
'x',
scatter_dimension=0,
axis_index_groups=((0, 2, 4, 6), (1, 3, 5, 7)),
tiled=True,
)
y = fwd(x)
# Expected:
# [[ 12 16 76 80 140 144 204 208]
# [ 44 48 108 112 172 176 236 240]]
```
See https://github.com/google/jax/blob/bbeddbdcffd7fcd8e52ea9b708c01fdfd4c52bb0/jax/experimental/shard_map.py#L926
cc @mattjj
| 2024-02-08T06:04:09 |
|
google/jax
| 19,854 |
google__jax-19854
|
[
"15982"
] |
40038d65c29cfb2834c39b53a3a54bec69207f58
|
diff --git a/jax/_src/lax/parallel.py b/jax/_src/lax/parallel.py
--- a/jax/_src/lax/parallel.py
+++ b/jax/_src/lax/parallel.py
@@ -396,7 +396,8 @@ def bind(x, split_axis=split_axis, concat_axis=concat_axis):
split_axis += 1 # we have a new axis before split_axis now
result = all_to_all_p.bind(x, split_axis=split_axis, concat_axis=concat_axis,
axis_name=axis_name,
- axis_index_groups=axis_index_groups)
+ axis_index_groups=axis_index_groups,
+ tiled=tiled)
if not tiled and split_axis != concat_axis:
result = lax.squeeze(result, (split_axis,))
return result
@@ -954,8 +955,10 @@ def _index_in_group(axis_name, axis_index_groups):
slicing.dynamic_slice_in_dim(device_id_to_idx, cur_device_id, 1), [0])
-def _all_to_all_lowering(ctx, x, *,
- split_axis, concat_axis, axis_name, axis_index_groups):
+def _all_to_all_lowering(
+ ctx, x, *, split_axis, concat_axis, axis_name, axis_index_groups, tiled
+):
+ del tiled # expand_dims and squeeze is done in `all_to_all` if `True`
# Workaround for AllToAll not being implemented on CPU.
replica_groups = _replica_groups(ctx.module_context.axis_env, axis_name,
axis_index_groups)
@@ -985,15 +988,19 @@ def _all_to_all_lowering(ctx, x, *,
replica_groups=_replica_groups_hlo(replica_groups),
**other_args).results
-def _all_to_all_transpose_rule(cts, x, axis_name, split_axis, concat_axis, axis_index_groups):
+def _all_to_all_transpose_rule(
+ cts, x, axis_name, split_axis, concat_axis, axis_index_groups, tiled
+):
return (all_to_all(
cts,
axis_name=axis_name,
split_axis=concat_axis,
concat_axis=split_axis,
- axis_index_groups=axis_index_groups),)
+ axis_index_groups=axis_index_groups,
+ tiled=tiled),)
-def _all_to_all_batcher(vals_in, dims_in, *, axis_name, split_axis, concat_axis, axis_index_groups):
+def _all_to_all_batcher(vals_in, dims_in, *, axis_name, split_axis, concat_axis, axis_index_groups,
+ tiled):
x, = vals_in
d, = dims_in
result = all_to_all_p.bind(
@@ -1001,12 +1008,14 @@ def _all_to_all_batcher(vals_in, dims_in, *, axis_name, split_axis, concat_axis,
axis_name=axis_name,
split_axis=split_axis + (d <= split_axis),
concat_axis=concat_axis + (d <= concat_axis),
- axis_index_groups=axis_index_groups)
+ axis_index_groups=axis_index_groups,
+ tiled=tiled,
+ )
return result, d
def _all_to_all_batched_collective(axis_size, frame_name, _, vals_in, dims_in,
axis_name, split_axis, concat_axis,
- axis_index_groups):
+ axis_index_groups, tiled):
if axis_index_groups is not None:
raise NotImplementedError("Please open a feature request!")
x, = vals_in
@@ -1041,7 +1050,8 @@ def _all_to_all_batched_collective(axis_size, frame_name, _, vals_in, dims_in,
if major_axes:
x = all_to_all_p.bind(x, axis_name=major_axes,
split_axis=split_axis, concat_axis=0,
- axis_index_groups=axis_index_groups)
+ axis_index_groups=axis_index_groups,
+ tiled=tiled)
# Split out the local part into axis new_d (NOTE: d is already in axis 1)
x = _splitaxis(split_axis, axis_size, x)
new_d = split_axis
@@ -1050,7 +1060,8 @@ def _all_to_all_batched_collective(axis_size, frame_name, _, vals_in, dims_in,
if minor_axes:
x = all_to_all_p.bind(x, axis_name=minor_axes,
split_axis=split_axis, concat_axis=2,
- axis_index_groups=axis_index_groups)
+ axis_index_groups=axis_index_groups,
+ tiled=tiled)
# Fold the chunk axes into a single one
x = _foldaxis(0, _foldaxis(0, x))
@@ -1062,8 +1073,9 @@ def _all_to_all_batched_collective(axis_size, frame_name, _, vals_in, dims_in,
def _all_to_all_effectful_abstract_eval(
- x, axis_name, split_axis, concat_axis, axis_index_groups
+ x, axis_name, split_axis, concat_axis, axis_index_groups, tiled
):
+ del tiled # expand_dims and squeeze is done in `all_to_all` if `True`
if not isinstance(axis_name, (list, tuple)):
axis_name = (axis_name,)
input_aval = raise_to_shaped(x)
|
diff --git a/tests/shard_map_test.py b/tests/shard_map_test.py
--- a/tests/shard_map_test.py
+++ b/tests/shard_map_test.py
@@ -359,6 +359,40 @@ def fwd(a):
for j, block in enumerate(np.split(row_block, 2, axis=-1)):
self.assertAllClose(block, c.addressable_data(2 * i + j))
+ def test_all_to_all_grad(self):
+ mesh_axes = dict(x=4)
+ devices = np.array(jax.devices()[: np.prod(tuple(mesh_axes.values()))])
+ mesh = Mesh(
+ devices.reshape(tuple(mesh_axes.values())),
+ axis_names=tuple(mesh_axes.keys()),
+ )
+ a = jax.device_put(
+ jnp.arange(8 * 8, dtype=jnp.float32).reshape((8, 8)),
+ jax.sharding.NamedSharding(mesh, P('x', None)),
+ )
+ self.assertEqual(a.addressable_data(0).shape, (2, 8))
+
+ @jax.jit
+ @partial(
+ shard_map, mesh=mesh, in_specs=(P('x', None),), out_specs=P(None, 'x')
+ )
+ def fwd(x):
+ return lax.all_to_all(x, 'x', split_axis=1, concat_axis=0, tiled=True)
+
+ c = fwd(a)
+ self.assertEqual(c.addressable_data(0).shape, (8, 2))
+ self.assertAllClose(a, c)
+
+ @jax.jit
+ @partial(jax.grad, has_aux=True)
+ def loss_and_grad(x):
+ loss = fwd(x).sum() * 2
+ return loss, loss
+
+ grad, loss = loss_and_grad(a)
+ self.assertEqual(loss, 2 * sum(range(64)))
+ self.assertAllClose(grad, 2 * np.ones_like(a))
+
def test_eager_repr(self):
mesh = Mesh(np.array(jax.devices()[:4]).reshape(2, 2), ('x', 'y'))
s = None
|
grad of shard_map of lax.all_to_all crashes
### Description
The forward transformation works, but not the backwards pass:
```python
import functools
import chex
import jax
from jax.experimental import shard_map
from jax import lax
import jax.numpy as jnp
import numpy as np
chex.set_n_cpu_devices(4)
P = jax.sharding.PartitionSpec
shmap = shard_map.shard_map
mesh = jax.sharding.Mesh(jax.devices(), axis_names=['x'])
@functools.partial(
shmap, mesh=mesh, in_specs=(P('x', None),), out_specs=P(None, 'x')
)
def reshard(x):
return lax.all_to_all(x, 'x', split_axis=1, concat_axis=0, tiled=True)
def loss(x):
return reshard(x).sum()
in_sharding = jax.sharding.NamedSharding(mesh, P('x', None))
x = jax.device_put(np.arange(64).reshape(8, 8), in_sharding)
print(x.sharding) # NamedSharding(mesh={'x': 4}, spec=PartitionSpec('x', None))
y = reshard(x)
print(y.sharding) # NamedSharding(mesh={'x': 4}, spec=PartitionSpec(None, 'x'))
np.testing.assert_array_equal(y, x)
print(jax.grad(loss)(1.0 * x))
```
`ValueError: all_to_all requires the size of the mapped axis axis_name to equal x.shape[split_axis], but they are 4 and 8 respectively.`
<details>
```
ValueError Traceback (most recent call last)
<ipython-input-72-29f919795aa1> in <module>()
32 np.testing.assert_array_equal(y, x)
33
---> 34 print(jax.grad(loss)(1.0 * x)) # errors
third_party/py/jax/_src/traceback_util.py in reraise_with_filtered_traceback(*args, **kwargs)
164 __tracebackhide__ = True
165 try:
--> 166 return fun(*args, **kwargs)
167 except Exception as e:
168 mode = _filtering_mode()
third_party/py/jax/_src/api.py in grad_f(*args, **kwargs)
644 @api_boundary
645 def grad_f(*args, **kwargs):
--> 646 _, g = value_and_grad_f(*args, **kwargs)
647 return g
648
third_party/py/jax/_src/traceback_util.py in reraise_with_filtered_traceback(*args, **kwargs)
164 __tracebackhide__ = True
165 try:
--> 166 return fun(*args, **kwargs)
167 except Exception as e:
168 mode = _filtering_mode()
third_party/py/jax/_src/api.py in value_and_grad_f(*args, **kwargs)
726 _check_scalar(ans)
727 tree_map(partial(_check_output_dtype_grad, holomorphic), ans)
--> 728 g = vjp_py(lax_internal._one(ans))
729 g = g[0] if isinstance(argnums, int) else g
730 if not has_aux:
third_party/py/jax/_src/tree_util.py in __call__(self, *args, **kw)
301
302 def __call__(self, *args, **kw):
--> 303 return self.fun(*args, **kw)
304
305 def __hash__(self):
third_party/py/jax/_src/api.py in _vjp_pullback_wrapper(name, cotangent_dtypes, cotangent_shapes, io_tree, fun, *py_args_)
2102 "must be the same as the shape of corresponding primal input "
2103 f"{ct_shape}.")
-> 2104 ans = fun(*args)
2105 return tree_unflatten(out_tree, ans)
2106
third_party/py/jax/_src/tree_util.py in __call__(self, *args, **kw)
301
302 def __call__(self, *args, **kw):
--> 303 return self.fun(*args, **kw)
304
305 def __hash__(self):
third_party/py/jax/_src/interpreters/ad.py in unbound_vjp(pvals, jaxpr, consts, *cts)
144 cts = tuple(ct for ct, pval in zip(cts, pvals) if not pval.is_known())
145 dummy_args = [UndefinedPrimal(v.aval) for v in jaxpr.invars]
--> 146 arg_cts = backward_pass(jaxpr, reduce_axes, True, consts, dummy_args, cts)
147 return map(instantiate_zeros, arg_cts)
148
third_party/py/jax/_src/interpreters/ad.py in backward_pass(jaxpr, reduce_axes, transform_stack, consts, primals_in, cotangents_in)
251 reduce_axes, cts_in, *invals, **eqn.params)
252 else:
--> 253 cts_out = get_primitive_transpose(eqn.primitive)(
254 cts_in, *invals, **eqn.params)
255 cts_out = [Zero(v.aval) for v in eqn.invars] if cts_out is Zero else cts_out
third_party/py/jax/experimental/shard_map.py in _shard_map_transpose(out_cts, jaxpr, mesh, in_names, out_names, check_rep, *args)
1061 return tuple(names for names, nz in zip(in_names, nz_arg_cts()) if nz)
1062
-> 1063 out_flat = shard_map_p.bind(
1064 fun_trans_flat, *all_args, mesh=mesh, in_names=tuple(new_in_names),
1065 out_names_thunk=new_out_names_thunk, check_rep=check_rep)
third_party/py/jax/experimental/shard_map.py in bind(self, fun, mesh, in_names, out_names_thunk, check_rep, *args)
345
346 tracers = map(top_trace.full_raise, args)
--> 347 outs = top_trace.process_shard_map( # pytype: disable=attribute-error
348 shard_map_p, fun, tracers, mesh=mesh, in_names=in_names,
349 out_names_thunk=new_out_names_thunk, check_rep=check_rep)
third_party/py/jax/experimental/shard_map.py in _shard_map_impl(***failed resolving arguments***)
552 t = main.with_cur_sublevel()
553 in_tracers = map(partial(ShardMapTracer, t), in_rep, args)
--> 554 ans = fun.call_wrapped(*in_tracers)
555 out_tracers = map(t.full_raise, ans)
556 outs_, out_rep = unzip2((t.val, t.rep) for t in out_tracers)
third_party/py/jax/_src/linear_util.py in call_wrapped(self, *args, **kwargs)
186
187 try:
--> 188 ans = self.f(*args, **dict(self.params, **kwargs))
189 except:
190 # Some transformations yield from inside context managers, so we have to
third_party/py/jax/experimental/shard_map.py in fun_trans(out_cts, args)
1044 pe.close_jaxpr(jaxpr), map(ad.is_undefined_primal, args), False)
1045 res_reshaped = core.jaxpr_as_fun(jaxpr_known)(*res)
-> 1046 out = ad.backward_pass(
1047 jaxpr_unknown.jaxpr, (), False, (), (*res_reshaped, *undefs), out_cts
1048 )
third_party/py/jax/_src/interpreters/ad.py in backward_pass(jaxpr, reduce_axes, transform_stack, consts, primals_in, cotangents_in)
251 reduce_axes, cts_in, *invals, **eqn.params)
252 else:
--> 253 cts_out = get_primitive_transpose(eqn.primitive)(
254 cts_in, *invals, **eqn.params)
255 cts_out = [Zero(v.aval) for v in eqn.invars] if cts_out is Zero else cts_out
third_party/py/jax/_src/interpreters/ad.py in linear_transpose2(transpose_rule, cotangent, *args, **kwargs)
518
519 def linear_transpose2(transpose_rule, cotangent, *args, **kwargs):
--> 520 return Zero if type(cotangent) is Zero else transpose_rule(cotangent, *args, **kwargs)
521
522
third_party/py/jax/_src/lax/parallel.py in _all_to_all_transpose_rule(cts, x, axis_name, split_axis, concat_axis, axis_index_groups)
993
994 def _all_to_all_transpose_rule(cts, x, axis_name, split_axis, concat_axis, axis_index_groups):
--> 995 return (all_to_all(
996 cts,
997 axis_name=axis_name,
third_party/py/jax/_src/lax/parallel.py in all_to_all(x, axis_name, split_axis, concat_axis, axis_index_groups, tiled)
403 return result
404
--> 405 return tree_util.tree_map(bind, x)
406
407 def axis_index(axis_name):
third_party/py/jax/_src/tree_util.py in tree_map(f, tree, is_leaf, *rest)
208 leaves, treedef = tree_flatten(tree, is_leaf)
209 all_leaves = [leaves] + [treedef.flatten_up_to(r) for r in rest]
--> 210 return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
211
212 def build_tree(treedef: PyTreeDef, xs: Any) -> Any:
third_party/py/jax/_src/tree_util.py in <genexpr>(.0)
208 leaves, treedef = tree_flatten(tree, is_leaf)
209 all_leaves = [leaves] + [treedef.flatten_up_to(r) for r in rest]
--> 210 return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
211
212 def build_tree(treedef: PyTreeDef, xs: Any) -> Any:
third_party/py/jax/_src/lax/parallel.py in bind(x, split_axis, concat_axis)
387 msg = ("all_to_all requires the size of the mapped axis axis_name to "
388 "equal x.shape[split_axis], but they are {} and {} respectively.")
--> 389 raise ValueError(msg.format(group_size, x.shape[split_axis]))
390 if split_axis < concat_axis:
391 concat_axis += 1 # concat_axis gives a position _after_ split_axis is removed
ValueError: all_to_all requires the size of the mapped axis axis_name to equal x.shape[split_axis], but they are 4 and 8 respectively.
```
</details>
### What jax/jaxlib version are you using?
_No response_
### Which accelerator(s) are you using?
_No response_
### Additional system info
_No response_
### NVIDIA GPU info
_No response_
| 2024-02-16T21:14:47 |
|
google/jax
| 19,909 |
google__jax-19909
|
[
"8356"
] |
5f19f7712b485493ac141c44eea3b3eb1ffdfb59
|
diff --git a/jax/_src/custom_derivatives.py b/jax/_src/custom_derivatives.py
--- a/jax/_src/custom_derivatives.py
+++ b/jax/_src/custom_derivatives.py
@@ -734,12 +734,17 @@ def _flatten_bwd(in_tree, in_avals, out_trees, *args):
zero = object() # non-pytree sentinel to replace Nones in py_cts_in
dummy = tree_unflatten(in_tree, [object()] * in_tree.num_leaves)
cts_in_flat = []
- append = lambda x, d: cts_in_flat.extend([x] * len(tree_flatten(d)[0])) or x
+ def append(x, d):
+ num_leaves = len(tree_flatten(d)[0])
+ if x is None and d is not None:
+ cts_in_flat.extend([zero] * num_leaves)
+ elif x is not None:
+ cts_in_flat.extend([x] * num_leaves)
+ return x
try:
if not isinstance(py_cts_in, tuple):
raise ValueError
- tree_map(append,
- tuple(zero if ct is None else ct for ct in py_cts_in), dummy)
+ tree_map(append, py_cts_in, dummy, is_leaf=lambda x: x is None)
except ValueError:
_, in_tree2 = tree_flatten(py_cts_in)
msg = ("Custom VJP rule must produce an output with the same container "
|
diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -9212,6 +9212,38 @@ def g(x):
g(1.) # doesn't crash
+ def test_nones_representing_zeros_in_subtrees_returned_by_bwd(self):
+ # https://github.com/google/jax/issues/8356
+ @jax.custom_vjp
+ def f(x):
+ return x[0]
+
+ def f_fwd(x):
+ return f(x), None
+
+ def f_bwd(_, z_bar):
+ return (z_bar, (None, None)),
+
+ f.defvjp(f_fwd, f_bwd)
+
+ jax.grad(f)((1.0, (2.0, 3.0))) # don't crash
+
+ def test_pytree_nones_returned_by_bwd(self):
+ @jax.custom_vjp
+ def f(x):
+ return x[0]
+
+ def f_fwd(x):
+ return f(x), None
+
+ def f_bwd(_, z_bar):
+ return (z_bar, (None, None)),
+
+ f.defvjp(f_fwd, f_bwd)
+
+ jax.grad(f)((1.0, (2.0, None))) # don't crash
+
+
def transpose_unary(f, x_example):
def transposed(y):
|
Improve error message in custom_vjp when returning the wrong grad structure
```
import jax
from jax import numpy as jnp
def splat_core(data, warp, output_dims):
output = jnp.zeros((output_dims[0], output_dims[1], data.shape[-1]),
dtype=data.dtype)
output = output.at[:data.shape[0], :data.shape[1]].set(data * warp[..., :1] +
warp[..., 1:2])
return output
@jax.custom_vjp
def splat_2d(data, warp, output_dims):
"""Functions.
Args:
data: An [H, W, C] array.
warp: An [H, W, 2] array.
output_dims: A 2-tuple containing the output height and width.
Returns:
An array with shape [output_dims[0], output_dims[1], data.shape[2]]
"""
return splat_core(data, warp, output_dims=output_dims)
def splat_2d_fwd(data, warp, output_dims):
return splat_2d(data, warp, output_dims), (data, warp)
def splat_2d_grad_impl(res, g):
return res[0] * 2.0 * jax.numpy.sum(g), res[1] * 2.0 * jax.numpy.sum(g)
def splat_2d_bwd(res, g):
actual_grad = res[0] * 2.0 * jax.numpy.sum(g), res[1] * 2.0 * jax.numpy.sum(g)
# Need to return a tuple of None's here since output_dims is intepreted as
# a pytree.
return actual_grad + ((None, None),)
splat_2d.defvjp(splat_2d_fwd, splat_2d_bwd)
def test():
output_dims = (6, 7)
warp = jnp.ones((5, 5, 2))
data = jnp.ones((5, 5, 3))
grad_output = jnp.ones((output_dims[0], output_dims[1], 3))
# Is passing the output_dims here right?
actual_grad = jax.vjp(splat_2d, data, warp, output_dims)[1](grad_output)
test()
```
Note that `splat_2d_bwd` should return actual_grad + (None,)
Gives this confusing error message:
```
The above exception was the direct cause of the following exception:
AssertionError Traceback (most recent call last)
<ipython-input-5-e07748477af3> in <module>()
50
51
---> 52 test()
<ipython-input-5-e07748477af3> in test()
47 data = jnp.ones((5, 5, 3))
48 grad_output = jnp.ones((output_dims[0], output_dims[1], 3))
---> 49 actual_grad = jax.vjp(splat_2d, data, warp, output_dims)[1](grad_output)
50
51
google3/third_party/py/jax/_src/tree_util.py in <lambda>(*args, **kw)
324 if isinstance(func, functools.partial):
325 original_func = func
--> 326 func = lambda *args, **kw: original_func(*args, **kw)
327 func.func = original_func.func
328 func.args = original_func.args
google3/third_party/py/jax/_src/api.py in _vjp_pullback_wrapper(cotangent_dtypes, cotangent_shapes, io_tree, fun, py_args)
2217 "must be the same as the shape of corresponding primal input "
2218 f"{ct_shape}.")
-> 2219 ans = fun(*args)
2220 return tree_unflatten(out_tree, ans)
2221
google3/third_party/py/jax/_src/tree_util.py in <lambda>(*args, **kw)
324 if isinstance(func, functools.partial):
325 original_func = func
--> 326 func = lambda *args, **kw: original_func(*args, **kw)
327 func.func = original_func.func
328 func.args = original_func.args
google3/third_party/py/jax/interpreters/ad.py in unbound_vjp(pvals, jaxpr, consts, *cts)
121 cts = tuple(map(ignore_consts, cts, pvals))
122 dummy_args = [UndefinedPrimal(v.aval) for v in jaxpr.invars]
--> 123 arg_cts = backward_pass(jaxpr, reduce_axes, consts, dummy_args, cts)
124 return map(instantiate_zeros, arg_cts)
125
google3/third_party/py/jax/interpreters/ad.py in backward_pass(jaxpr, reduce_axes, consts, primals_in, cotangents_in)
227 else:
228 cts_out = get_primitive_transpose(eqn.primitive)(cts_in, *invals,
--> 229 **eqn.params)
230 cts_out = [Zero(v.aval) for v in eqn.invars] if cts_out is Zero else cts_out
231 # FIXME: Some invars correspond to primals!
google3/third_party/py/jax/interpreters/ad.py in _custom_lin_transpose(cts_out, num_res, bwd, out_avals, *invals)
688 res, _ = split_list(invals, [num_res])
689 cts_out = map(instantiate_zeros_aval, out_avals, cts_out)
--> 690 cts_in = bwd.call_wrapped(*res, *cts_out)
691 return [None] * num_res + list(cts_in)
692 primitive_transposes[custom_lin_p] = _custom_lin_transpose
google3/third_party/py/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
177 while stack:
178 gen, out_store = stack.pop()
--> 179 ans = gen.send(ans)
180 if out_store is not None:
181 ans, side = ans
google3/third_party/py/jax/_src/custom_derivatives.py in _flatten_bwd(in_tree, in_avals, out_trees, *args)
592 # TODO(mattjj): change this to check if tangent type represents 0dim vspace
593 yield [Zero(a.at_least_vspace()) if ct is zero or a != a.at_least_vspace()
--> 594 else ct for a, ct in zip(in_avals, cts_in_flat)]
595
596
google3/third_party/py/jax/_src/util.py in safe_zip(*args)
33 n = len(args[0])
34 for arg in args[1:]:
---> 35 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
36 return list(zip(*args))
37
AssertionError: length mismatch: [4, 2]
``
|
Hi @johnpjf
I executed the mentioned code with latest JAX version 0.4.23 on Google Colab. Now the error message gives a `ValueError` indicating that the `safe_zip() argument 2 is shorter than argument 1`.
```python
JaxStackTraceBeforeTransformation: ValueError: safe_zip() argument 2 is shorter than argument 1
The preceding stack trace is the source of the JAX operation that, once transformed by JAX, triggered the following exception.
--------------------
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
/usr/local/lib/python3.10/dist-packages/jax/_src/custom_derivatives.py in _flatten_bwd(in_tree, in_avals, out_trees, *args)
753 # TODO(mattjj): change this to check if tangent type represents 0dim vspace
754 yield [Zero(a.at_least_vspace()) if ct is zero or a != a.at_least_vspace()
--> 755 else ct for a, ct in zip(in_avals, cts_in_flat)]
756
757
ValueError: safe_zip() argument 2 is shorter than argument 1
```
Kindly find the [gist](https://colab.research.google.com/gist/rajasekharporeddy/8361c21b0722ab7570627f20e3ff251a/-8356.ipynb) for reference
Thank you
Thanks for checking @rajasekharporeddy. I'm surprised the error message is still so bad! I thought we had improved it.
| 2024-02-21T08:21:13 |
google/jax
| 19,936 |
google__jax-19936
|
[
"19935"
] |
ac19d0f3b246c7ea9754fd42ba6fd88c80776bfc
|
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -940,39 +940,41 @@ def isclose(a: ArrayLike, b: ArrayLike, rtol: ArrayLike = 1e-05, atol: ArrayLike
equal_nan: bool = False) -> Array:
a, b = util.promote_args("isclose", a, b)
dtype = _dtype(a)
- if issubdtype(dtype, inexact):
- if issubdtype(dtype, complexfloating):
- dtype = util._complex_elem_type(dtype)
- rtol = lax.convert_element_type(rtol, dtype)
- atol = lax.convert_element_type(atol, dtype)
- out = lax.le(
- lax.abs(lax.sub(a, b)),
- lax.add(atol, lax.mul(rtol, lax.abs(b))))
- # This corrects the comparisons for infinite and nan values
- a_inf = ufuncs.isinf(a)
- b_inf = ufuncs.isinf(b)
- any_inf = ufuncs.logical_or(a_inf, b_inf)
- both_inf = ufuncs.logical_and(a_inf, b_inf)
- # Make all elements where either a or b are infinite to False
- out = ufuncs.logical_and(out, ufuncs.logical_not(any_inf))
- # Make all elements where both a or b are the same inf to True
- same_value = lax.eq(a, b)
- same_inf = ufuncs.logical_and(both_inf, same_value)
- out = ufuncs.logical_or(out, same_inf)
-
- # Make all elements where either a or b is NaN to False
- a_nan = ufuncs.isnan(a)
- b_nan = ufuncs.isnan(b)
- any_nan = ufuncs.logical_or(a_nan, b_nan)
- out = ufuncs.logical_and(out, ufuncs.logical_not(any_nan))
- if equal_nan:
- # Make all elements where both a and b is NaN to True
- both_nan = ufuncs.logical_and(a_nan, b_nan)
- out = ufuncs.logical_or(out, both_nan)
- return out
- else:
+ if dtypes.issubdtype(dtype, dtypes.extended):
return lax.eq(a, b)
+ a, b = util.promote_args_inexact("isclose", a, b)
+ dtype = _dtype(a)
+ if issubdtype(dtype, complexfloating):
+ dtype = util._complex_elem_type(dtype)
+ rtol = lax.convert_element_type(rtol, dtype)
+ atol = lax.convert_element_type(atol, dtype)
+ out = lax.le(
+ lax.abs(lax.sub(a, b)),
+ lax.add(atol, lax.mul(rtol, lax.abs(b))))
+ # This corrects the comparisons for infinite and nan values
+ a_inf = ufuncs.isinf(a)
+ b_inf = ufuncs.isinf(b)
+ any_inf = ufuncs.logical_or(a_inf, b_inf)
+ both_inf = ufuncs.logical_and(a_inf, b_inf)
+ # Make all elements where either a or b are infinite to False
+ out = ufuncs.logical_and(out, ufuncs.logical_not(any_inf))
+ # Make all elements where both a or b are the same inf to True
+ same_value = lax.eq(a, b)
+ same_inf = ufuncs.logical_and(both_inf, same_value)
+ out = ufuncs.logical_or(out, same_inf)
+
+ # Make all elements where either a or b is NaN to False
+ a_nan = ufuncs.isnan(a)
+ b_nan = ufuncs.isnan(b)
+ any_nan = ufuncs.logical_or(a_nan, b_nan)
+ out = ufuncs.logical_and(out, ufuncs.logical_not(any_nan))
+ if equal_nan:
+ # Make all elements where both a and b is NaN to True
+ both_nan = ufuncs.logical_and(a_nan, b_nan)
+ out = ufuncs.logical_or(out, both_nan)
+ return out
+
def _interp(x: ArrayLike, xp: ArrayLike, fp: ArrayLike,
left: ArrayLike | str | None = None,
|
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -3532,6 +3532,10 @@ def testIsClose(self):
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
+ self.assertEqual(np.isclose(6, 10, rtol=0.5), jnp.isclose(6, 10, rtol=0.5))
+ key = jax.random.key(0)
+ self.assertTrue(jnp.isclose(key, key))
+
@jtu.sample_product(
x=[1, [1], [1, 1 + 1E-4], [1, np.nan]],
y=[1, [1], [1, 1 + 1E-4], [1, np.nan]],
|
`jax.numpy.isclose` differs from `numpy.isclose`
### Description
The result of `jax.numpy.isclose` and `numpy.isclose` differ because the former treats integers differently than floats. Specifically for "exact" dtypes, jax demands equality rather than being "close". The relevant line is
https://github.com/google/jax/blob/8d6bb0197b65f7aa0439087d4fc4e34ccb88c509/jax/_src/numpy/lax_numpy.py#L974
```python
>>> import numpy as np
>>> from jax import numpy as jnp
>>> np.isclose(6, 10, rtol=0.5)
True
>>> jnp.isclose(6, 10, rtol=0.5)
Array(False, dtype=bool, weak_type=True)
```
Casting to a float first gives the expected result.
```python
>>> jnp.isclose(6, 10., rtol=0.5)
Array(True, dtype=bool)
```
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.24
jaxlib: 0.4.24
numpy: 1.26.3
python: 3.10.10 (main, Mar 3 2023, 16:31:35) [GCC 9.4.0]
jax.devices (1 total, 1 local): [CpuDevice(id=0)]
process_count: 1
```
| 2024-02-22T22:29:16 |
|
google/jax
| 19,986 |
google__jax-19986
|
[
"19978"
] |
d29acbab2c46121ab7bc7e9b8a3cc108b9a23507
|
diff --git a/jax/_src/callback.py b/jax/_src/callback.py
--- a/jax/_src/callback.py
+++ b/jax/_src/callback.py
@@ -97,7 +97,7 @@ def pure_callback_batching_rule(
vectorized: bool,
result_avals: Sequence[core.ShapedArray],
):
- axis_size = next(a.shape[0] for a, d in zip(args, dims)
+ axis_size = next(a.shape[d] for a, d in zip(args, dims)
if d is not batching.not_mapped)
new_args = [arg if dim is batching.not_mapped else
batching.moveaxis(arg, dim, 0) for arg, dim in zip(args, dims)]
|
diff --git a/tests/python_callback_test.py b/tests/python_callback_test.py
--- a/tests/python_callback_test.py
+++ b/tests/python_callback_test.py
@@ -566,6 +566,16 @@ def h(x, y):
self.assertArraysAllClose(out, np.sin(np.arange(4.)) + np.arange(10., 14.),
rtol=1E-7, check_dtypes=False)
+ @jax.jit
+ @functools.partial(jax.vmap, in_axes=1, out_axes=1)
+ def h(x, y):
+ out_shape = jax.ShapeDtypeStruct(x.shape, np.result_type(x.dtype, y.dtype))
+ return jax.pure_callback(lambda x, y: np.sin(x) + y, out_shape, x, y)
+ out = h(jnp.arange(4.)[None], jnp.arange(10., 14.)[None])
+ self.assertArraysAllClose(out, np.sin(np.arange(4.)) + np.arange(10.,
+ 14.)[None],
+ rtol=1E-7, check_dtypes=False)
+
def test_vmap_vectorized_callback(self):
def cb(x):
@@ -598,6 +608,15 @@ def h(x, y):
out = h(jnp.arange(4.), 4.)
np.testing.assert_allclose(out, np.sin(np.arange(4.)) + 4.)
+ @jax.jit
+ @functools.partial(jax.vmap, in_axes=(1, None), out_axes=1)
+ def h(x, y):
+ return jax.pure_callback(lambda x, y: np.sin(x) + y, x, x, y,
+ vectorized=True)
+ out = h(jnp.arange(4.)[None], 4.)
+ np.testing.assert_allclose(out, np.sin(np.arange(4.)[None]) + 4.)
+
+
def test_vmap_vectorized_callback_errors_if_returns_wrong_shape(self):
def cb(x):
|
`jax.vmap(jax.pure_callback(...), in_axes=1)` is broken
### Description
When `vmap`ping a `jax.pure_callback` over the non-0 axis, the logic that checks the callback output shape seems wrong.
MWE:
```python
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
def my_fun(x):
print(f"provided callback with {x.shape}")
return x
@jax.jit
def my_jax_fun(x):
res = jax.pure_callback(my_fun,
jax.ShapeDtypeStruct(x.shape, x.dtype),
x,
vectorized=True)
return res
s = jnp.ones((2,3,4))
vmap_fun = jax.vmap(my_jax_fun, in_axes=1, out_axes=1)
vmap_fun(s)
```
error:
```python
provided callback with (3, 2, 4)
---------------------------------------------------------------------------
XlaRuntimeError Traceback (most recent call last)
Cell In[13], line 23
19 s = jnp.ones((2,3,4))
21 vmap_fun = jax.vmap(my_jax_fun, in_axes=1, out_axes=1)
---> 23 vmap_fun(s)
[... skipping hidden 17 frame]
File ~/Documents/pythonenvs/netket/python-3.11.2/lib/python3.11/site-packages/jax/_src/interpreters/pxla.py:1201, in ExecuteReplicated.__call__(self, *args)
1198 if (self.ordered_effects or self.has_unordered_effects
1199 or self.has_host_callbacks):
1200 input_bufs = self._add_tokens_to_inputs(input_bufs)
-> 1201 results = self.xla_executable.execute_sharded(
1202 input_bufs, with_tokens=True
1203 )
1204 result_token_bufs = results.disassemble_prefix_into_single_device_arrays(
1205 len(self.ordered_effects))
1206 sharded_runtime_token = results.consume_token()
XlaRuntimeError: INTERNAL: Generated function failed: CpuCallback error: RuntimeError: Incorrect output shape for return value 0: Expected: (2, 2, 4), Actual: (3, 2, 4)
```
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.24
jaxlib: 0.4.24
numpy: 1.24.3
python: 3.11.2 (main, Apr 7 2023, 16:35:55) [Clang 14.0.3 (clang-1403.0.22.14.1)]
jax.devices (1 total, 1 local): [CpuDevice(id=0)]
process_count: 1
```
cc @Adrien-Kahn
| 2024-02-26T23:13:17 |
|
google/jax
| 20,083 |
google__jax-20083
|
[
"20082"
] |
67e3542d326d2bb6e4cc958e134ace4baaa152ec
|
diff --git a/jax/experimental/attrs.py b/jax/experimental/attrs.py
--- a/jax/experimental/attrs.py
+++ b/jax/experimental/attrs.py
@@ -72,6 +72,7 @@ def _ensure_tracked(trace: pe.DynamicJaxprTrace, obj: Any, attr: str):
frame.attrs_tracked.append((obj, attr))
frame.attrs_inits.append(init_val)
frame.attrs_vars.append(var)
+ frame.tracers.append(tracer)
pe.DynamicJaxprTrace._ensure_tracked = _ensure_tracked
def _getattr_staging(trace, *, obj, attr):
|
diff --git a/tests/attrs_test.py b/tests/attrs_test.py
--- a/tests/attrs_test.py
+++ b/tests/attrs_test.py
@@ -45,6 +45,7 @@ class Thing:
class AttrsTest(jtu.JaxTestCase):
+
@parameterized.parameters([True, False])
def test_jit_basic(self, jit: bool):
thing = Thing(1.0)
@@ -228,6 +229,28 @@ def f(obj, x):
self.assertAllClose(thing.x, 3.0, check_dtypes=False)
self.assertEqual(count, 1)
+ def test_tracer_lifetime_bug(self):
+ # regression test for https://github.com/google/jax/issues/20082
+ class StatefulRNG:
+ key: jax.Array
+
+ def __init__(self, key: jax.Array):
+ self.key = key
+
+ def split(self) -> jax.Array:
+ key = jax_getattr(self, "key")
+ new_key, returned_key = jax.random.split(key)
+ jax_setattr(self, "key", new_key)
+ return returned_key
+
+ rng = StatefulRNG(jax.random.key(0))
+
+ def jitted():
+ rng.split()
+ rng.split()
+
+ jax.jit(jitted)() # don't crash
+
class AttrsJVPTest(jtu.JaxTestCase):
@@ -533,5 +556,6 @@ def f_ref(x, y, z, w):
self.assertAllClose(attr_cotangents[(thing2, 'x')], attr_cotangents_ref[1],
check_dtypes=False)
+
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
`jax.experimental.attrs`: Spurious "AssertionError: a jaxpr variable must be created only once per tracer"
### Description
The following code results in an erroneous jax internal assertion
> `AssertionError: a jaxpr variable must be created only once per tracer`
The issue is a use-after-free due to `experimental.attrs` creating a Tracer but not adequately ensuring its lifetime.
```python
import jax
from jax.experimental.attrs import jax_getattr, jax_setattr
class StatefulRNG:
key: jax.Array
def __init__(self, key: jax.Array):
self.key = key
def split(self) -> jax.Array:
key = jax_getattr(self, "key")
new_key, returned_key = jax.random.split(key)
jax_setattr(self, "key", new_key)
return returned_key
rng = StatefulRNG(jax.random.key(0))
def jitted():
rng.split()
rng.split()
jax.jit(jitted)()
```
(n.b. this is not code or a style I intend to use; I was just trying to experiment with `experimental.attrs` to wrap my head around it)
Output on my laptop (`JAX_TRACEBACK_FILTERING=off` [version attached](https://github.com/google/jax/files/14500467/attrs_bug.log.txt))
```
❯ JAX_PLATFORMS=cpu python attrs_bug.py
Traceback (most recent call last):
File "/Users/nelhage/attrs_bug.py", line 27, in <module>
jax.jit(jitted)()
File "/Users/nelhage/attrs_bug.py", line 24, in jitted
rng.split()
File "/Users/nelhage/attrs_bug.py", line 14, in split
new_key, returned_key = jax.random.split(key)
^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/miniforge/base/envs/py311/lib/python3.11/site-packages/jax/_src/random.py", line 295, in split
return _return_prng_keys(wrapped, _split(typed_key, num))
^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/miniforge/base/envs/py311/lib/python3.11/site-packages/jax/_src/random.py", line 281, in _split
return prng.random_split(key, shape=shape)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/miniforge/base/envs/py311/lib/python3.11/site-packages/jax/_src/prng.py", line 631, in random_split
return random_split_p.bind(keys, shape=shape)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError: a jaxpr variable must be created only once per tracer
--------------------
For simplicity, JAX has removed its internal frames from the traceback of the following exception. Set JAX_TRACEBACK_FILTERING=off to include these.
```
The problem arises from these lines in `attrs.py`:
https://github.com/google/jax/blob/f9e20d58754283de87b2ed35cc9df58bcdff2073/jax/experimental/attrs.py#L70-L71
The `tracer` we create is referenced only by way of the `setattr` on the following line. If a future mutation to the tracked object drops that reference, the tracer will be freed, and its address (`id`) may be reused by a future object. If a new `Tracer` then happens to get allocated at the same address, the `tracer_to_var` map will still contain the old mapping and result in the observed problem.
We can hackily test that theory by making sure to manually retain all the tracers inside of our reproducer; this version works reliably for me:
```python
import jax
from jax.experimental.attrs import jax_getattr, jax_setattr
class StatefulRNG:
key: jax.Array
def __init__(self, key: jax.Array):
self.key = key
def split(self, l) -> jax.Array:
key = jax_getattr(self, "key")
l.append(key)
new_key, returned_key = jax.random.split(key)
l.append(new_key)
l.append(returned_key)
jax_setattr(self, "key", new_key)
return returned_key
rng = StatefulRNG(jax.random.key(0))
def jitted():
l = []
rng.split(l)
rng.split(l)
l.clear()
jax.jit(jitted)()
```
### System info (python version, jaxlib version, accelerator, etc.)
```
❯ JAX_PLATFORMS='cpu' python -c 'import jax; jax.print_environment_info()'
jax: 0.4.25
jaxlib: 0.4.25
numpy: 1.24.4
python: 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:37:07) [Clang 15.0.7 ]
jax.devices (1 total, 1 local): [CpuDevice(id=0)]
process_count: 1
platform: uname_result(system='Darwin', node='Nelson-Elhage-MacBook', release='23.3.0', version='Darwin Kernel Version 23.3.0: Wed Dec 20 21:30:27 PST 2023; root:xnu-10002.81.5~7/RELEASE_ARM64_T8103', machine='arm64')
```
|
Brilliant, thank you for the clear diagnosis!
I think we should probably persist all the Tracers we create during jaxpr tracing. That's usually what we do with [this attribute on the builder](https://github.com/google/jax/blob/67e3542d326d2bb6e4cc958e134ace4baaa152ec/jax/_src/interpreters/partial_eval.py#L1760), and we [append to it when we make new tracers in the usual path](https://github.com/google/jax/blob/67e3542d326d2bb6e4cc958e134ace4baaa152ec/jax/_src/interpreters/partial_eval.py#L2001), but as you point out we neglected to do it on the attrs path.
| 2024-03-05T20:09:23 |
google/jax
| 20,087 |
google__jax-20087
|
[
"20086"
] |
20090dd1766bb2e7a086b654dd42f8e50bf35d95
|
diff --git a/jax/_src/scipy/fft.py b/jax/_src/scipy/fft.py
--- a/jax/_src/scipy/fft.py
+++ b/jax/_src/scipy/fft.py
@@ -109,7 +109,7 @@ def dctn(x: Array, type: int = 2,
return x
-@implements(osp_fft.dct)
+@implements(osp_fft.idct)
def idct(x: Array, type: int = 2, n: int | None = None,
axis: int = -1, norm: str | None = None) -> Array:
if type != 2:
|
Documentation Errors in jax.scipy module
I've found the following errors in the documentation for [`jax.scipy`](https://jax.readthedocs.io/en/latest/jax.scipy.html) module:
https://jax.readthedocs.io/en/latest/jax.scipy.html
1. Duplicate `sqrtm` function:
- [ ] The `sqrtm` function is listed twice under `jax.scipy.linalg`.
2. Typo in idctn description:
- [ ] The description for `idctn` incorrectly states "Discrete Cosine Transform." It should be "Inverse Discrete Cosine Transform" to match the function's behavior.
| 2024-03-05T23:23:16 |
||
google/jax
| 20,094 |
google__jax-20094
|
[
"19085"
] |
59e9ee368bc6f322ac86ea49fa736fe65b7646f3
|
diff --git a/jax/_src/lax/control_flow/loops.py b/jax/_src/lax/control_flow/loops.py
--- a/jax/_src/lax/control_flow/loops.py
+++ b/jax/_src/lax/control_flow/loops.py
@@ -2046,16 +2046,18 @@ def map(f, xs):
return ys
def _rng_bit_generator_batching_rule(batched_args, batch_dims, *, shape, dtype, algorithm):
- """Calls RBG in a loop and stacks the results."""
- key, = batched_args
+ keys, = batched_args
bd, = batch_dims
if bd is batching.not_mapped:
- return lax.rng_bit_generator_p.bind(key, shape=shape, dtype=dtype,
+ return lax.rng_bit_generator_p.bind(keys, shape=shape, dtype=dtype,
algorithm=algorithm), (None, None)
- key = batching.moveaxis(key, bd, 0)
- map_body = lambda k: lax.rng_bit_generator_p.bind(k, shape=shape, dtype=dtype, algorithm=algorithm)
- stacked_keys, stacked_bits = map(map_body, key)
- return (stacked_keys, stacked_bits), (0, 0)
+ keys = batching.moveaxis(keys, bd, 0)
+ batch_size = keys.shape[0]
+ key = keys[0]
+ new_key, bits = lax.rng_bit_generator_p.bind(key, shape=(batch_size, *shape),
+ dtype=dtype, algorithm=algorithm)
+ new_keys = jax.lax.dynamic_update_index_in_dim(keys, new_key, 0, axis=0)
+ return (new_keys, bits), (0, 0)
batching.primitive_batchers[lax.rng_bit_generator_p] = _rng_bit_generator_batching_rule # type: ignore
diff --git a/jax/_src/random.py b/jax/_src/random.py
--- a/jax/_src/random.py
+++ b/jax/_src/random.py
@@ -1233,7 +1233,7 @@ def _gamma_impl(key, a, *, log_space, use_vmap=False):
keys = keys.flatten()
alphas = a.flatten()
- if use_vmap:
+ if use_vmap and _key_impl(key) is prng.threefry_prng_impl:
samples = vmap(partial(_gamma_one, log_space=log_space))(keys, alphas)
else:
samples = lax.map(
|
diff --git a/tests/BUILD b/tests/BUILD
--- a/tests/BUILD
+++ b/tests/BUILD
@@ -784,6 +784,9 @@ jax_test(
"notsan", # Times out
],
},
+ backend_variant_args = {
+ "gpu": ["--jax_num_generated_cases=40"],
+ },
shard_count = {
"cpu": 40,
"gpu": 30,
diff --git a/tests/lax_test.py b/tests/lax_test.py
--- a/tests/lax_test.py
+++ b/tests/lax_test.py
@@ -2652,6 +2652,24 @@ def testRngBitGeneratorReturnedKey(self):
new_key, _ = lax.rng_bit_generator(key, (0,))
self.assertAllClose(key, new_key)
+ def test_rng_bit_generator_vmap(self):
+ def f(key):
+ return lax.rng_bit_generator(key, shape=(5, 7))
+
+ keys = np.arange(3 * 4).reshape((3, 4)).astype(np.uint32)
+ out_keys, bits = jax.vmap(f)(keys)
+ self.assertEqual(out_keys.shape, (3, 4))
+ self.assertEqual(bits.shape, (3, 5, 7))
+
+ def test_rng_bit_generator_vmap_vmap(self):
+ def f(key):
+ return lax.rng_bit_generator(key, shape=(5, 7))
+
+ keys = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.uint32)
+ out_keys, bits = jax.vmap(jax.vmap(f))(keys)
+ self.assertEqual(out_keys.shape, (2, 3, 4))
+ self.assertEqual(bits.shape, (2, 3, 5, 7))
+
@jtu.sample_product(
dtype=lax_test_util.all_dtypes + lax_test_util.python_scalar_types,
weak_type=[True, False],
diff --git a/tests/random_lax_test.py b/tests/random_lax_test.py
--- a/tests/random_lax_test.py
+++ b/tests/random_lax_test.py
@@ -1348,6 +1348,7 @@ def test_vmap_fold_in_shape(self):
out = vmap(vmap(random.fold_in), in_axes=(1, 0))(keys(), msgs.T)
self.assertEqual(out.shape, (3, 2))
+ @jax.enable_key_reuse_checks(False)
def test_vmap_split_mapped_key(self):
key = self.make_key(73)
mapped_keys = random.split(key, num=3)
@@ -1408,24 +1409,57 @@ def test_vmap_split_not_mapped_key(self):
self.assertArraysEqual(random.key_data(vk),
random.key_data(single_split_key))
- def test_vmap_split_mapped_key(self):
+ @jax.enable_key_reuse_checks(False)
+ def test_vmap_split_mapped_key_shape(self):
key = self.make_key(73)
mapped_keys = random.split(key, num=3)
- forloop_keys = [random.split(k) for k in mapped_keys]
vmapped_keys = vmap(random.split)(mapped_keys)
self.assertEqual(vmapped_keys.shape, (3, 2, *key.shape))
- for fk, vk in zip(forloop_keys, vmapped_keys):
- self.assertArraysEqual(random.key_data(fk),
+
+ @jax.enable_key_reuse_checks(False)
+ def test_vmap_split_mapped_key_values(self):
+ key = self.make_key(73)
+ mapped_keys = random.split(key, num=3)
+ vmapped_keys = vmap(random.split)(mapped_keys)
+ ref_keys = [random.split(k) for k in mapped_keys]
+ for rk, vk in zip(ref_keys, vmapped_keys):
+ self.assertArraysEqual(random.key_data(rk),
random.key_data(vk))
- def test_vmap_random_bits(self):
- rand_fun = lambda key: random.randint(key, (), 0, 100)
+ @jax.enable_key_reuse_checks(False)
+ def test_vmap_random_bits_shape(self):
+ rand_fun = lambda key, shape=(): random.randint(key, shape, 0, 100)
key = self.make_key(73)
mapped_keys = random.split(key, num=3)
- forloop_rand_nums = [rand_fun(k) for k in mapped_keys]
rand_nums = vmap(rand_fun)(mapped_keys)
self.assertEqual(rand_nums.shape, (3,))
- self.assertArraysEqual(rand_nums, jnp.array(forloop_rand_nums))
+
+ @jtu.skip_on_devices("tpu")
+ @jax.enable_key_reuse_checks(False)
+ def test_vmap_random_bits_value(self):
+ rand_fun = lambda key, shape=(): random.randint(key, shape, 0, 100)
+ key = self.make_key(73)
+ mapped_keys = random.split(key, num=3)
+ rand_nums = vmap(rand_fun)(mapped_keys)
+ ref_nums = rand_fun(mapped_keys[0], shape=(3,))
+ self.assertArraysEqual(rand_nums, ref_nums)
+
+ def test_vmap_random_bits_distribution(self):
+ dtype = jnp.float32
+ keys = lambda: jax.random.split(self.make_key(0), 10)
+
+ def rand(key):
+ nums = jax.vmap(lambda key: random.uniform(key, (1000,), dtype))(key)
+ return nums.flatten()
+
+ crand = jax.jit(rand)
+
+ uncompiled_samples = rand(keys())
+ compiled_samples = crand(keys())
+
+ for samples in [uncompiled_samples, compiled_samples]:
+ self._CheckCollisions(samples, jnp.finfo(dtype).nmant)
+ self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.uniform().cdf)
def test_cannot_add(self):
key = self.make_key(73)
@@ -1455,6 +1489,15 @@ class LaxRandomWithUnsafeRBGPRNGTest(LaxRandomWithRBGPRNGTest):
def make_key(self, seed):
return random.PRNGKey(seed, impl="unsafe_rbg")
+ @jtu.skip_on_devices("tpu")
+ @jax.enable_key_reuse_checks(False)
+ def test_vmap_split_mapped_key_values(self):
+ key = self.make_key(73)
+ mapped_keys = random.split(key, num=3)
+ vmapped_keys = vmap(random.split)(mapped_keys)
+ ref_keys = random.split(mapped_keys[0], (3, 2))
+ self.assertArraysEqual(random.key_data(vmapped_keys),
+ random.key_data(ref_keys))
def _sampler_unimplemented_with_custom_prng(*args, **kwargs):
raise SkipTest('sampler only implemented for default RNG')
|
efficient untrue batching of `random_bit_generator`
The batching rule for the `random_bit_generator` primitive, over a batch of keys, emits a loop (via `lax.map`):
https://github.com/google/jax/blob/42ae8432185bf03f61ddd2e7bc279d3abb5247fd/jax/_src/lax/control_flow/loops.py#L2012-L2024
This is a workaround to the corresponding `RandomBitGenerator` HLO not being batchable. But looping violates the operational expectations of `vmap`, namely that everything is vectorized. And downstream, the surprise performance hit when switching RNG implementations isn't great.
We could consider a few options:
1. Emit an unrolled loop. Drawbacks: grows the program size with the batch size.
2. Generate a batch of random numbers from a _single_ key in the batch, dropping the remaining keys in the batch. Drawbacks: this violates `vmap` semantics considering the random values generated, although the output is "statistically" the same in a sense.
Let's try number 2.
The RBG operation is already non-portable across platforms and XLA flags. In some cases the random generation is affected by sharding. So arguably, callers opting into RBG RNGs already expect unusual semantics. By contrast, it's uncommon that anyone expects the performance hit.
cc @mattjj, @dlwh
|
This ought to address #16792
| 2024-03-06T04:16:25 |
google/jax
| 20,273 |
google__jax-20273
|
[
"20267"
] |
cdafb8fd145f7726d82e824e72efc51fe2a80fc9
|
diff --git a/jax/_src/pjit.py b/jax/_src/pjit.py
--- a/jax/_src/pjit.py
+++ b/jax/_src/pjit.py
@@ -76,7 +76,7 @@
from jax._src.util import (
HashableFunction, safe_map, safe_zip, wraps,
distributed_debug_log, split_list, weakref_lru_cache,
- merge_lists, flatten, unflatten, subs_list2)
+ merge_lists, flatten, unflatten)
map, unsafe_map = safe_map, map
zip, unsafe_zip = safe_zip, zip
@@ -1798,8 +1798,8 @@ def _pjit_partial_eval(trace, *in_tracers,
known_ins = tuple(pv.is_known() for pv in in_pvals)
unknown_ins = tuple(not k for k in known_ins)
- known_jaxpr, unknown_jaxpr, unknown_outs, res_avals = pe.partial_eval_jaxpr_nounits(
- jaxpr, unknown_ins, instantiate=False)
+ known_jaxpr, unknown_jaxpr, unknown_outs, res_avals = \
+ pe.partial_eval_jaxpr_nounits(jaxpr, unknown_ins, instantiate=False)
unknown_outs = tuple(unknown_outs)
known_outs = tuple(not uk for uk in unknown_outs)
num_residuals = len(res_avals)
@@ -1808,28 +1808,37 @@ def _pjit_partial_eval(trace, *in_tracers,
def keep_where(l, should_keep):
return tuple(x for x, keep in zip(l, should_keep) if keep)
- # Compute which outputs are just forwarded inputs.
- num_out_primals = len(known_jaxpr.out_avals) - num_residuals
- in_fwd = pe._jaxpr_forwarding(known_jaxpr.jaxpr)
-
- # Only forward primal outputs when corresponding out_sharding is UNSPECIFIED.
- in_fwd_primal, in_fwd_res = split_list(in_fwd, [num_out_primals])
- in_fwd = [fwd if is_unspecified(os) else None for os, fwd in
- zip(keep_where(out_shardings, known_outs), in_fwd_primal)
- ] + in_fwd_res
- del in_fwd_primal, in_fwd_res
-
- # Compute which residuals are just primal outputs.
- out_vars, res_vars = split_list(known_jaxpr.jaxpr.outvars, [num_out_primals])
- idx_map = {id(v): i for i, v in enumerate(out_vars)}
- out_fwd = [None] * num_out_primals + [idx_map.get(id(v)) for v in res_vars]
-
- # Prune jaxpr outputs and out_shardings by removing forwards.
- keep = [f1 is None and f2 is None for f1, f2 in zip(in_fwd, out_fwd)]
- known_jaxpr = pe.prune_closed_jaxpr_outputs(known_jaxpr, keep)
known_out_shardings = keep_where(out_shardings, known_outs) + res_shardings
- known_out_shardings = keep_where(known_out_shardings, keep)
- del keep, num_out_primals
+
+ # TODO(mattjj): un-disable this optimization after we have more tests
+ # # Input-to-output forwarding: compute which outputs are just forwarded inputs.
+ # num_out_primals = len(known_jaxpr.out_avals) - num_residuals
+ # in_fwd: list[int | None] = pe._jaxpr_forwarding(known_jaxpr.jaxpr)
+ # # Only forward primal outputs when corresponding out_sharding is UNSPECIFIED.
+ # in_fwd_primal, in_fwd_res = split_list(in_fwd, [num_out_primals])
+ # in_fwd = [fwd if is_unspecified(os) else None for os, fwd in
+ # zip(keep_where(out_shardings, known_outs), in_fwd_primal)
+ # ] + in_fwd_res
+ # del in_fwd_primal, in_fwd_res
+ # # Prune jaxpr outputs and out_shardings by removing the input-forwards.
+ # keep = [f is None for f in in_fwd]
+ # known_jaxpr = pe.prune_closed_jaxpr_outputs(known_jaxpr, keep)
+ # known_out_shardings = keep_where(known_out_shardings, keep)
+ # # Update num_out_primals to reflect pruning.
+ # kept_primals, kept_res = split_list(keep, [num_out_primals])
+ # num_out_primals = sum(f is None for f in kept_primals)
+ # del keep, kept_primals, kept_res
+
+ # TODO(mattjj): un-disable this optimization after we have more tests
+ # # Output-to-output forwarding: compute which residuals are just primal outputs
+ # out_vars, res_vars = split_list(known_jaxpr.jaxpr.outvars, [num_out_primals])
+ # idx_map = {id(v): i for i, v in enumerate(out_vars)}
+ # out_fwd = [None] * num_out_primals + [idx_map.get(id(v)) for v in res_vars]
+ # # Prune jaxpr outputs and out_shardings by removing forwarded residuals.
+ # keep = [f is None for f in out_fwd]
+ # known_jaxpr = pe.prune_closed_jaxpr_outputs(known_jaxpr, keep)
+ # known_out_shardings = keep_where(known_out_shardings, keep)
+ # del keep
known_params = dict(
jaxpr=known_jaxpr, in_shardings=keep_where(in_shardings, known_ins),
@@ -1841,16 +1850,19 @@ def keep_where(l, should_keep):
# Bind known things to pjit_p.
known_inputs = [pv.get_known() for pv in in_pvals if pv.is_known()]
all_known_outs = pjit_p.bind(*known_inputs, **known_params)
- all_known_outs = subs_list2(in_fwd, out_fwd, known_inputs, all_known_outs,
- all_known_outs)
+ # TODO(mattjj): un-disable this optimization after we have more tests
+ # # Add back in the output fwds.
+ # all_known_outs = subs_list(out_fwd, all_known_outs, all_known_outs)
+ # # Add back in the input fwds.
+ # all_known_outs = subs_list(in_fwd, known_inputs, all_known_outs)
known_out_vals, residual_vals = \
split_list(all_known_outs, [len(all_known_outs) - num_residuals])
residual_tracers = map(trace.new_instantiated_const, residual_vals)
- # The convention of partial_eval_jaxpr_nounits is to place residual binders
- # at the front of the jaxpr produced, so we move them to the back since both
- # the jaxpr equation built below and the pjit transpose rule assume a
+ # The convention of partial_eval_jaxpr_nounits is to place residual binders at
+ # the front of the jaxpr produced, so we move them to the back since both the
+ # jaxpr equation built below and the pjit transpose rule assume a
# residual-inputs-last convention.
unknown_jaxpr = pe.move_binders_to_back(
unknown_jaxpr, [True] * num_residuals + [False] * sum(unknown_ins))
|
diff --git a/tests/api_test.py b/tests/api_test.py
--- a/tests/api_test.py
+++ b/tests/api_test.py
@@ -4565,6 +4565,15 @@ def foo(self):
gc.collect()
assert a() is None
+ def test_forwarding_bug(self):
+ # Test for issue #20267.
+ def f(x):
+ @jax.jit
+ def inner(a, x):
+ return a, jnp.exp(x)
+ return inner(0., x)[0]
+ jax.grad(f)(1.) # don't crash
+
class RematTest(jtu.JaxTestCase):
diff --git a/tests/core_test.py b/tests/core_test.py
--- a/tests/core_test.py
+++ b/tests/core_test.py
@@ -368,6 +368,8 @@ def body(c, _):
dropvar, b = jaxpr.eqns[0].outvars
self.assertEqual(dropvar.aval, aval)
+ # TODO(mattjj): un-skip
+ @unittest.skip('temporarily skipping until we can add more tests')
def test_input_residual_forwarding(self):
# https://github.com/google/jax/pull/11151
x = jnp.arange(3 * 4.).reshape(3, 4)
|
`grad(jit(f))` can fail when an output which is a forwarded input precedes an output which is also a residual
### Description
Example:
```python
import jax
import jax.numpy as jnp
def f(x):
@jax.jit
def inner(a, x):
return a, jnp.exp(x)
return inner(0., x)[0]
jax.grad(f)(1.)
# IndexError: list index out of range
```
The error occurs in the call to `subs_list2` [here](https://github.com/google/jax/blob/a5d32c41c336368d8ec01bda17072d34189f7faa/jax/_src/pjit.py#L1844). It looks like the problem is that `idx_map` is built using indices into `out_vars`, so initially the `exp(x)` value is marked as being in output index 1. When the forwarded inputs and duplicated residuals are pruned, this index doesn't get updated, and can end up being out of bounds.
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.25
jaxlib: 0.4.25
numpy: 1.25.2
python: 3.11.6 (main, Nov 14 2023, 09:36:21) [GCC 13.2.1 20230801]
jax.devices (2 total, 2 local): [cuda(id=0) cuda(id=1)]
process_count: 1
```
|
Oof, what a bad bug! Thanks for reporting this!
| 2024-03-15T17:01:10 |
google/jax
| 20,524 |
google__jax-20524
|
[
"20410"
] |
011ced4431d0f58837f9c48bda854df78664c0af
|
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -2877,6 +2877,27 @@ def repeat(a: ArrayLike, repeats: ArrayLike, axis: int | None = None, *,
return take(a, gather_indices, axis=axis)
[email protected](getattr(np, "trapezoid", getattr(np, "trapz", None)))
+@partial(jit, static_argnames=('axis',))
+def trapezoid(y: ArrayLike, x: ArrayLike | None = None, dx: ArrayLike = 1.0,
+ axis: int = -1) -> Array:
+ # TODO(phawkins): remove this annotation after fixing jnp types.
+ dx_array: Array
+ if x is None:
+ util.check_arraylike('trapezoid', y)
+ y_arr, = util.promote_dtypes_inexact(y)
+ dx_array = asarray(dx)
+ else:
+ util.check_arraylike('trapezoid', y, x)
+ y_arr, x_arr = util.promote_dtypes_inexact(y, x)
+ if x_arr.ndim == 1:
+ dx_array = diff(x_arr)
+ else:
+ dx_array = moveaxis(diff(x_arr, axis=axis), axis, -1)
+ y_arr = moveaxis(y_arr, axis, -1)
+ return 0.5 * (dx_array * (y_arr[..., 1:] + y_arr[..., :-1])).sum(-1)
+
+
@util.implements(np.tri)
def tri(N: int, M: int | None = None, k: int = 0, dtype: DTypeLike | None = None) -> Array:
dtypes.check_user_dtype_supported(dtype, "tri")
diff --git a/jax/_src/scipy/integrate.py b/jax/_src/scipy/integrate.py
--- a/jax/_src/scipy/integrate.py
+++ b/jax/_src/scipy/integrate.py
@@ -27,18 +27,4 @@
@partial(jit, static_argnames=('axis',))
def trapezoid(y: ArrayLike, x: ArrayLike | None = None, dx: ArrayLike = 1.0,
axis: int = -1) -> Array:
- # TODO(phawkins): remove this annotation after fixing jnp types.
- dx_array: Array
- if x is None:
- util.check_arraylike('trapezoid', y)
- y_arr, = util.promote_dtypes_inexact(y)
- dx_array = jnp.asarray(dx)
- else:
- util.check_arraylike('trapezoid', y, x)
- y_arr, x_arr = util.promote_dtypes_inexact(y, x)
- if x_arr.ndim == 1:
- dx_array = jnp.diff(x_arr)
- else:
- dx_array = jnp.moveaxis(jnp.diff(x_arr, axis=axis), axis, -1)
- y_arr = jnp.moveaxis(y_arr, axis, -1)
- return 0.5 * (dx_array * (y_arr[..., 1:] + y_arr[..., :-1])).sum(-1)
+ return jnp.trapezoid(y, x, dx, axis)
diff --git a/jax/numpy/__init__.py b/jax/numpy/__init__.py
--- a/jax/numpy/__init__.py
+++ b/jax/numpy/__init__.py
@@ -233,6 +233,7 @@
tensordot as tensordot,
tile as tile,
trace as trace,
+ trapezoid as trapezoid,
transpose as transpose,
tri as tri,
tril as tril,
@@ -447,7 +448,15 @@
register_jax_array_methods()
del register_jax_array_methods
-try:
- from numpy import issubsctype as _deprecated_issubsctype
-except ImportError:
- _deprecated_issubsctype = None
+
+_deprecations = {
+ # Deprecated 18 Sept 2023 and removed 06 Feb 2024
+ "trapz": (
+ "jnp.trapz is deprecated; use jnp.trapezoid instead.",
+ None
+ ),
+}
+
+from jax._src.deprecations import deprecation_getattr as _deprecation_getattr
+__getattr__ = _deprecation_getattr(__name__, _deprecations)
+del _deprecation_getattr
|
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -5571,6 +5571,37 @@ def test_isdtype(self, dtype, kind):
numpy_result = np.isdtype(dtype, kind)
self.assertEqual(jax_result, numpy_result)
+ @jtu.sample_product(
+ [dict(yshape=yshape, xshape=xshape, dx=dx, axis=axis)
+ for yshape, xshape, dx, axis in [
+ ((10,), None, 1.0, -1),
+ ((3, 10), None, 2.0, -1),
+ ((3, 10), None, 3.0, -0),
+ ((10, 3), (10,), 1.0, -2),
+ ((3, 10), (10,), 1.0, -1),
+ ((3, 10), (3, 10), 1.0, -1),
+ ((2, 3, 10), (3, 10), 1.0, -2),
+ ]
+ ],
+ dtype=float_dtypes + int_dtypes,
+ )
+ @jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and reenable this test.
+ @jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
+ def test_trapezoid(self, yshape, xshape, dtype, dx, axis):
+ rng = jtu.rand_default(self.rng())
+ args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
+ if jtu.numpy_version() >= (2, 0, 0):
+ np_fun = partial(np.trapezoid, dx=dx, axis=axis)
+ else:
+ np_fun = partial(np.trapz, dx=dx, axis=axis)
+ jnp_fun = partial(jnp.trapezoid, dx=dx, axis=axis)
+ tol = jtu.tolerance(dtype, {np.float16: 2e-3, np.float64: 1e-12,
+ jax.dtypes.bfloat16: 4e-2})
+ self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
+ check_dtypes=False)
+ self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
+ check_dtypes=False)
+
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
|
Implementation of np.trapz
I would like to see the implementation of np.trapz in jax.numpy, seems like a useful addition.
|
I believe this is already essentially available with `jax.scipy.integrate.trapezoid`: https://jax.readthedocs.io/en/latest/_autosummary/jax.scipy.integrate.trapezoid.html
`jax.numpy.trapz` was [deprecated in JAX v0.4.16](https://jax.readthedocs.io/en/latest/changelog.html#jax-0-4-16-sept-18-2023) following NumPy's deprecation of `numpy.trapz`. After the deprecation cycle, it was [removed in JAX v0.4.24](https://jax.readthedocs.io/en/latest/changelog.html#jax-0-4-24-feb-6-2024). `jax.scipy.integrate.trapezoid` is a drop-in replacement.
It seems that `numpy.trapz` is not being deprecated anymore and simply being renamed under `numpy.trapezoid`. https://github.com/numpy/numpy/issues/25586. I understand removing in JAX following numpy, however I still feel like having to rely on jax.scipy.integrate is more cumbersome than it should be for a simple function like jax.numpy.trapz
Thanks, I hadn't seen that. We can re-add this as `jax.numpy.trapezoid` for consistency with the NumPy API.
`jnp.trapezoid` added in #20524 – thanks!
| 2024-04-01T19:56:28 |
google/jax
| 20,528 |
google__jax-20528
|
[
"20430"
] |
dcd45c8d208cc92afb34002cfb5f9be43ae41201
|
diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -540,7 +540,7 @@ def _create_polynomial(var, coeffs):
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
sanitized_mcp = jnp.where(
- maybe_complement_p <= dtype(0.),
+ maybe_complement_p == dtype(0.),
jnp.full(shape, dtype(0.5)),
maybe_complement_p)
@@ -571,9 +571,9 @@ def _create_polynomial(var, coeffs):
x = jnp.where(p > dtype(1. - np.exp(-2.)), x, -x)
infinity = jnp.full(shape, dtype(np.inf))
- x_nan_replaced = jnp.where(
- p <= dtype(0.0), -infinity, jnp.where(p >= dtype(1.0), infinity, x))
- return x_nan_replaced
+ x_fix_boundaries = jnp.where(
+ p == dtype(0.0), -infinity, jnp.where(p == dtype(1.0), infinity, x))
+ return x_fix_boundaries
@partial(custom_derivatives.custom_jvp, nondiff_argnums=(1,))
|
diff --git a/tests/lax_scipy_special_functions_test.py b/tests/lax_scipy_special_functions_test.py
--- a/tests/lax_scipy_special_functions_test.py
+++ b/tests/lax_scipy_special_functions_test.py
@@ -114,7 +114,7 @@ def op_record(name, nargs, dtypes, rng_factory, test_grad, nondiff_argnums=(), t
),
op_record(
"ndtri", 1, float_dtypes,
- functools.partial(jtu.rand_uniform, low=0.05, high=0.95), True,
+ functools.partial(jtu.rand_uniform, low=0.0, high=1.0), True,
),
op_record(
"ndtr", 1, float_dtypes, jtu.rand_default, True
@@ -218,6 +218,13 @@ def testGammaSign(self):
self._CheckAgainstNumpy(osp_special.gamma, lsp_special.gamma, args_maker, rtol=rtol)
self._CompileAndCheck(lsp_special.gamma, args_maker, rtol=rtol)
+ def testNdtriExtremeValues(self):
+ # Testing at the extreme values (bounds (0. and 1.) and outside the bounds).
+ dtype = jax.numpy.zeros(0).dtype # default float dtype.
+ args_maker = lambda: [np.arange(-10, 10).astype(dtype)]
+ rtol = 1E-3 if jtu.test_device_matches(["tpu"]) else 1e-5
+ self._CheckAgainstNumpy(osp_special.ndtri, lsp_special.ndtri, args_maker, rtol=rtol)
+ self._CompileAndCheck(lsp_special.ndtri, args_maker, rtol=rtol)
if __name__ == "__main__":
|
`jax.scipy.special.ndtri` edge case behavior
### Description
`jax.scipy.special.ndtri` returns positive infinity when the argument exceeds `1.0` and negative infinity when the argument is less than `0.0`.
```python3
from jax import numpy as jnp
from jax.scipy.special import ndtri
ndtri(jnp.asarray(-1.)) # Array(-inf, dtype=float32)
ndtri(jnp.asarray(2.)) # Array(inf, dtype=float32)
```
It seems that it would be more appropriate to return NaN in these cases (as `scipy.special.ndtri`, `torch.special.ndtri`, etc. do).
This came up when adding support for JAX as an array API backend in SciPy. For more information, please see scipy/scipy#20085.
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.23
jaxlib: 0.4.23
numpy: 1.25.2
python: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]
jax.devices (1 total, 1 local): [cuda(id=0)]
process_count: 1
$ nvidia-smi
Tue Mar 26 04:33:34 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.104.05 Driver Version: 535.104.05 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |
| N/A 70C P0 32W / 70W | 11447MiB / 15360MiB | 0% Default |
| | | N/A |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
+---------------------------------------------------------------------------------------+
```
| 2024-04-02T06:26:45 |
|
google/jax
| 20,536 |
google__jax-20536
|
[
"20533"
] |
2ee4c0f6449398fc41e6657009b76ab567aca7aa
|
diff --git a/jax/_src/numpy/util.py b/jax/_src/numpy/util.py
--- a/jax/_src/numpy/util.py
+++ b/jax/_src/numpy/util.py
@@ -418,6 +418,8 @@ def _broadcast_to(arr: ArrayLike, shape: DimSize | Shape) -> Array:
arr_shape = np.shape(arr)
if core.definitely_equal_shape(arr_shape, shape):
return arr
+ elif len(shape) < len(arr_shape):
+ raise ValueError(f"Cannot broadcast to shape with fewer dimensions: {arr_shape=} {shape=}")
else:
nlead = len(shape) - len(arr_shape)
shape_tail = shape[nlead:]
|
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -5177,6 +5177,13 @@ def testBroadcastTo(self, from_shape, to_shape):
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
+ def testBroadcastToInvalidShape(self):
+ # Regression test for https://github.com/google/jax/issues/20533
+ x = jnp.zeros((3, 4, 5))
+ with self.assertRaisesRegex(
+ ValueError, "Cannot broadcast to shape with fewer dimensions"):
+ jnp.broadcast_to(x, (4, 5))
+
@jtu.sample_product(
[dict(shapes=shapes, broadcasted_shape=broadcasted_shape)
for shapes, broadcasted_shape in [
|
Not captured error in `jnp.broadcast_to`
### Description
If `shape` is shorter than `array.ndim`, then an exception in `safe_zip` is not captured, which is a bit confusing.
Example:
```python
import jax.numpy as jnp
jnp.broadcast_to(jnp.zeros((3, 4, 5)), (4, 5))
```
JAX gives
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jax/_src/numpy/lax_numpy.py", line 1222, in broadcast_to
return util._broadcast_to(array, shape)
File "jax/_src/numpy/util.py", line 425, in _broadcast_to
for arr_d, shape_d in safe_zip(arr_shape, shape_tail))
ValueError: safe_zip() argument 2 is shorter than argument 1
```
While numpy gives the message
```
ValueError: input operand has more dimensions than allowed by the axis remapping
```
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.25
jaxlib: 0.4.25
numpy: 1.24.3
python: 3.10.13 (main, Sep 11 2023, 13:44:35) [GCC 11.2.0]
jax.devices (1 total, 1 local): [CpuDevice(id=0)]
process_count: 1
platform: uname_result(system='Linux', node='<HIDDEN>', release='5.15.0-78-generic', version='#85-Ubuntu SMP Fri Jul 7 15:25:09 UTC 2023', machine='x86_64')
```
| 2024-04-02T15:40:02 |
|
google/jax
| 20,558 |
google__jax-20558
|
[
"4523"
] |
026f309dcb79f4ea3d790d88ffea8c6d4428defd
|
diff --git a/jax/_src/nn/functions.py b/jax/_src/nn/functions.py
--- a/jax/_src/nn/functions.py
+++ b/jax/_src/nn/functions.py
@@ -199,6 +199,29 @@ def silu(x: ArrayLike) -> Array:
swish = silu
[email protected]
+def mish(x: ArrayLike) -> Array:
+ r"""Mish activation function.
+
+ Computes the element-wise function:
+
+ .. math::
+ \mathrm{mish}(x) = x \cdot \mathrm{tanh}(\mathrm{softplus}(x))
+
+ For more information, see
+ `Mish: A Self Regularized Non-Monotonic Activation Function
+ <https://arxiv.org/abs/1908.08681>`_.
+
+ Args:
+ x : input array
+
+ Returns:
+ An array.
+ """
+ numpy_util.check_arraylike("mish", x)
+ x_arr = jnp.asarray(x)
+ return x_arr * jnp.tanh(softplus(x_arr))
+
@jax.jit
def log_sigmoid(x: ArrayLike) -> Array:
r"""Log-sigmoid activation function.
@@ -314,7 +337,7 @@ def celu(x: ArrayLike, alpha: ArrayLike = 1.0) -> Array:
For more information, see
`Continuously Differentiable Exponential Linear Units
- <https://arxiv.org/pdf/1704.07483.pdf>`_.
+ <https://arxiv.org/abs/1704.07483>`_.
Args:
x : input array
@@ -342,7 +365,7 @@ def selu(x: ArrayLike) -> Array:
For more information, see
`Self-Normalizing Neural Networks
- <https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf>`_.
+ <https://arxiv.org/abs/1706.02515>`_.
Args:
x : input array
diff --git a/jax/nn/__init__.py b/jax/nn/__init__.py
--- a/jax/nn/__init__.py
+++ b/jax/nn/__init__.py
@@ -45,6 +45,7 @@
silu as silu,
swish as swish,
squareplus as squareplus,
+ mish as mish,
)
# Deprecations
|
diff --git a/tests/nn_test.py b/tests/nn_test.py
--- a/tests/nn_test.py
+++ b/tests/nn_test.py
@@ -91,6 +91,26 @@ def testSquareplusGradNan(self):
def testSquareplusZero(self, dtype):
self.assertEqual(dtype(1), nn.squareplus(dtype(0), dtype(4)))
+ def testMishGrad(self):
+ check_grads(nn.mish, (1e-8,), order=4,
+ rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
+
+ def testMishGradZero(self):
+ check_grads(nn.mish, (0.,), order=1,
+ rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
+
+ def testMishGradNegInf(self):
+ check_grads(nn.mish, (-float('inf'),), order=1,
+ rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
+
+ def testMishGradNan(self):
+ check_grads(nn.mish, (float('nan'),), order=1,
+ rtol=1e-2 if jtu.test_device_matches(["tpu"]) else None)
+
+ @parameterized.parameters([float] + jtu.dtypes.floating)
+ def testMishZero(self, dtype):
+ self.assertEqual(dtype(0), nn.mish(dtype(0)))
+
def testReluGrad(self):
rtol = 1e-2 if jtu.test_device_matches(["tpu"]) else None
check_grads(nn.relu, (1.,), order=3, rtol=rtol)
@@ -117,6 +137,10 @@ def testSquareplusValue(self):
val = nn.squareplus(1e3)
self.assertAllClose(val, 1e3, check_dtypes=False, atol=1e-3)
+ def testMishValue(self):
+ val = nn.mish(1e3)
+ self.assertAllClose(val, 1e3, check_dtypes=False, atol=1e-3)
+
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testEluGrad(self):
check_grads(nn.elu, (1e4,), order=4, eps=1.)
@@ -149,7 +173,7 @@ def gelu_reference(x):
(jnp.float32, jnp.bfloat16, jnp.float16),
(partial(nn.gelu, approximate=False),
partial(nn.gelu, approximate=True),
- nn.relu, nn.softplus, nn.sparse_plus, nn.sigmoid, nn.squareplus)))
+ nn.relu, nn.softplus, nn.sparse_plus, nn.sigmoid, nn.squareplus, nn.mish)))
def testDtypeMatchesInput(self, dtype, fn):
x = jnp.zeros((), dtype=dtype)
out = fn(x)
|
[Feature Request] mish activation function
Hi, @mattjj @froystig I would like to contribute by adding ```mish``` activation in Jax function. It's a Self Regularized Non-Monotonic Neural Activation Function.
```mish``` = ```x * tanh(softplus(x))```
paper: https://arxiv.org/abs/1908.08681
|
This has come up multiple times before, see #1676 and #2842 .
I think the only thing that would change our minds is if `mish` is now more popular. Can you provide evidence that it is widely used? Otherwise it makes sense to me to keep it out of tree, given it is a 1-liner anyway.
Hi, @hawkinsp thanks. ```mish``` is having official implementation in various ML library. Kindly see the attached pics.


This activation function is quite new(2019) but gained a lot of popularity. This paper is having ```82``` citation. And you can find some more amazing stats about ```mish``` here, https://github.com/digantamisra98/Mish .
Thanks.
`mish` seems to have little usage in the research community, so I'm reluctant to add it the standard library. It also wouldn't help JAX users much to add it; if anyone wants `mish`, the complete implementation is:
```
def mish(x): return x * jnp.tanh(jax.nn.softplus(x))
```
and that's something that is easy to copy and paste into a model.
We can reevaluate if `mish` has significant research impact and becomes widely used. So I would say: not yet.
@hawkinsp It would be nice to re-evaluate this request as of now considering Mish got added to standard PyTorch ([PR](https://github.com/pytorch/pytorch/pull/58648), [Feature Request Issue](https://github.com/pytorch/pytorch/issues/58375#issuecomment-842654130)) based on similar evaluation criteria.
Additionally the paper has received nearly 270 citations as per [Scholar](https://scholar.google.com/citations?user=LwiJwNYAAAAJ&hl=en).
The paper now has 1,663 citations according to [Google Scholar](https://scholar.google.com/scholar?q=mish+activation+function).
Some places where mish has been requested:
- https://github.com/google/flax/issues/584
- https://github.com/google/flax/discussions/3391
- https://github.com/google/objax/issues/225
- https://github.com/google/jax/issues/2842
- https://github.com/google/flax/discussions/3483
- https://github.com/google/jax/pull/1676
- https://github.com/google/jax/issues/4523
Looks like there's enough demand and citations for it.
The following links might be of interest:
- [Benchmarking PyTorch’s Native Mish](https://benjaminwarner.dev/2021/07/19/benchmarking-pytorch-native-mish)
- [mish-cuda: Mish Activation Function for PyTorch](https://github.com/thomasbrandon/mish-cuda)
So there might be value in an optimized implementation.
| 2024-04-03T05:55:11 |
google/jax
| 20,589 |
google__jax-20589
|
[
"20587"
] |
66427f8800919059fd30971926762f60aa93df8c
|
diff --git a/jax/_src/scipy/special.py b/jax/_src/scipy/special.py
--- a/jax/_src/scipy/special.py
+++ b/jax/_src/scipy/special.py
@@ -44,14 +44,16 @@ def gammaln(x: ArrayLike) -> Array:
return lax.lgamma(x)
+def _gamma_sign(x: Array) -> Array:
+ floor_x = lax.floor(x)
+ return jnp.where((x > 0) | (x == floor_x) | (floor_x % 2 == 0), 1.0, -1.0)
+
+
@implements(osp_special.gamma, module='scipy.special', lax_description="""\
The JAX version only accepts real-valued inputs.""")
def gamma(x: ArrayLike) -> Array:
x, = promote_args_inexact("gamma", x)
- # Compute the sign for negative x, matching the semantics of scipy.special.gamma
- floor_x = lax.floor(x)
- sign = jnp.where((x > 0) | (x == floor_x), 1.0, (-1.0) ** floor_x)
- return sign * lax.exp(lax.lgamma(x))
+ return _gamma_sign(x) * lax.exp(lax.lgamma(x))
betaln = implements(
osp_special.betaln,
@@ -71,7 +73,8 @@ def factorial(n: ArrayLike, exact: bool = False) -> Array:
@implements(osp_special.beta, module='scipy.special')
def beta(x: ArrayLike, y: ArrayLike) -> Array:
x, y = promote_args_inexact("beta", x, y)
- return lax.exp(betaln(x, y))
+ sign = _gamma_sign(x) * _gamma_sign(y) * _gamma_sign(x + y)
+ return sign * lax.exp(betaln(x, y))
@implements(osp_special.betainc, module='scipy.special')
|
diff --git a/tests/lax_scipy_special_functions_test.py b/tests/lax_scipy_special_functions_test.py
--- a/tests/lax_scipy_special_functions_test.py
+++ b/tests/lax_scipy_special_functions_test.py
@@ -53,10 +53,10 @@ def op_record(name, nargs, dtypes, rng_factory, test_grad, nondiff_argnums=(), t
JAX_SPECIAL_FUNCTION_RECORDS = [
op_record(
- "beta", 2, float_dtypes, jtu.rand_positive, False
+ "beta", 2, float_dtypes, jtu.rand_default, False
),
op_record(
- "betaln", 2, float_dtypes, jtu.rand_positive, False
+ "betaln", 2, float_dtypes, jtu.rand_default, False
),
op_record(
"betainc", 3, float_dtypes, jtu.rand_positive, False
|
jax.scipy.special.beta results are different from scipy.special.beta for certain input
### Description
When I use` jax.scipy.special.beta`, I get different results from the scipy one for some input as below:
```
b = 14.25
n = b - 1
k = jnp.arange(40)
plt.plot(k, scipy.special.beta(1 + n - k, 1 + k), label="scipy", ls=":")
plt.plot(k, jax.scipy.special.beta(1 + n - k, 1 + k), label="jax")
plt.plot(k, np.array([cephes4py.beta(1 + n - k, 1 + k) for k in np.arange(40)]), label="cephes", ls="-.")
plt.legend();
plt.show();
```
Here is the result:

### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.26
jaxlib: 0.4.25
numpy: 1.26.4
python: 3.10.13
```
|
Thanks for the report! It looks like we weren't properly setting the sign of `beta` for negative inputs. #20589 should fix this.
| 2024-04-04T15:43:08 |
google/jax
| 20,637 |
google__jax-20637
|
[
"20624"
] |
1d26365e601528d4cb63ca9d39213248eb320adb
|
diff --git a/jax/experimental/array_api/_data_type_functions.py b/jax/experimental/array_api/_data_type_functions.py
--- a/jax/experimental/array_api/_data_type_functions.py
+++ b/jax/experimental/array_api/_data_type_functions.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import builtins
import functools
from typing import NamedTuple
import jax
@@ -183,6 +183,8 @@ def isdtype(dtype, kind):
def result_type(*arrays_and_dtypes):
dtypes = []
for val in arrays_and_dtypes:
+ if isinstance(val, (builtins.bool, int, float, complex)):
+ val = jax.numpy.array(val)
if isinstance(val, jax.Array):
val = val.dtype
if _is_valid_dtype(val):
diff --git a/jax/experimental/array_api/_elementwise_functions.py b/jax/experimental/array_api/_elementwise_functions.py
--- a/jax/experimental/array_api/_elementwise_functions.py
+++ b/jax/experimental/array_api/_elementwise_functions.py
@@ -22,10 +22,11 @@
def _promote_dtypes(name, *args):
assert isinstance(name, str)
- if not all(isinstance(arg, jax.Array) for arg in args):
+ if not all(isinstance(arg, (bool, int, float, complex, jax.Array))
+ for arg in args):
raise ValueError(f"{name}: inputs must be arrays; got types {[type(arg) for arg in args]}")
dtype = _result_type(*args)
- return [arg.astype(dtype) for arg in args]
+ return [jax.numpy.asarray(arg).astype(dtype) for arg in args]
def abs(x, /):
|
Tracer's imag method returns float; crashes with the Array API
### Description
The following:
```python
import jax.numpy as jnp
from jax.lax import while_loop
from array_api_compat import get_namespace
def body(x):
assert False
def cond(x):
# x is a tracer, x.real is a tracer, x.imag is a float!
xp = get_namespace(x)
xp.square(x.imag) # No problem with x.real!
while_loop(cond, body, jnp.asarray(0.0))
```
crashes with:
```
ValueError: square: inputs must be arrays; got types [<class 'float'>]
```
Also broken:
```python
import jax.numpy as jnp
from jax import jit
@jit
def f(x):
x.imag.shape # Crash!
f(jnp.asarray(0.0))
```
According to the Array API spec, `x.imag` must be a _Jax array_ (not a NumPy array, nor a float).
### System info (python version, jaxlib version, accelerator, etc.)
An NVIDIA GPU may be present on this machine, but a CUDA-enabled jaxlib is not installed. Falling back to cpu.
jax: 0.4.26
jaxlib: 0.4.26
numpy: 1.26.4
python: 3.11.8 (main, Feb 22 2024, 17:25:49) [GCC 11.4.0]
|
Thanks - it seems that the validation for array API inputs is too strict: we should probably allow Python scalars here: https://github.com/google/jax/blob/1d26365e601528d4cb63ca9d39213248eb320adb/jax/experimental/array_api/_elementwise_functions.py#L25-L26
| 2024-04-08T17:11:59 |
|
google/jax
| 20,716 |
google__jax-20716
|
[
"20704"
] |
2c8051eb52c741e1cd50a58269e3890653eb85a8
|
diff --git a/jax/_src/pjit.py b/jax/_src/pjit.py
--- a/jax/_src/pjit.py
+++ b/jax/_src/pjit.py
@@ -223,7 +223,7 @@ def _get_states(attrs_tracked):
def _get_fastpath_data(
executable, out_tree, args_flat, out_flat, attrs_tracked, effects,
- abstracted_axes
+ consts, abstracted_axes,
) -> Optional[pxla.MeshExecutableFastpathData]:
out_reflattened, out_tree = pxla.reflatten_outputs_for_dispatch(out_tree, out_flat)
@@ -244,7 +244,7 @@ def _get_fastpath_data(
# no prng reuse checking
and not (config.debug_key_reuse.value and any(
hasattr(arg, 'dtype') and dtypes.issubdtype(arg.dtype, dtypes.prng_key)
- for arg in (*args_flat, *out_flat)))
+ for arg in (*args_flat, *out_flat, *consts)))
)
if use_fastpath:
@@ -306,7 +306,7 @@ def cache_miss(*args, **kwargs):
executable = _read_most_recent_pjit_call_executable(jaxpr)
maybe_fastpath_data = _get_fastpath_data(
executable, out_tree, args_flat, out_flat, attrs_tracked, jaxpr.effects,
- jit_info.abstracted_axes)
+ jaxpr.consts, jit_info.abstracted_axes)
return outs, maybe_fastpath_data
fun = jit_info.fun
@@ -1557,7 +1557,7 @@ def call_impl_cache_miss(*args_, **kwargs_):
inline=inline)
fastpath_data = _get_fastpath_data(
compiled, tree_structure(out_flat), args, out_flat, [], jaxpr.effects,
- None)
+ jaxpr.consts, None)
return out_flat, fastpath_data
f = _get_jaxpr_as_fun(
diff --git a/jax/experimental/key_reuse/_core.py b/jax/experimental/key_reuse/_core.py
--- a/jax/experimental/key_reuse/_core.py
+++ b/jax/experimental/key_reuse/_core.py
@@ -356,14 +356,15 @@ def is_consumed(var: core.Atom):
raise KeyReuseError(f"In {eqn.primitive}, source {src.idx} out of range [0, {len(eqn.outvars)}]")
source(eqn.outvars[src.idx])
+ all_inputs = [*jaxpr.invars, *jaxpr.constvars]
return KeyReuseSignature(
- *(Sink(i, consumed[v]) for i, v in enumerate(jaxpr.invars)
+ *(Sink(i, consumed[v]) for i, v in enumerate(all_inputs)
if is_key(v) and np.any(consumed.get(v, False))),
*(Source(i) for i, v in enumerate(jaxpr.outvars)
- if is_key(v) and resolve_forwards(v) not in jaxpr.invars and not consumed.get(v, False)),
- *(Forward(jaxpr.invars.index(resolve_forwards(outvar)), idx_out) # type: ignore[arg-type]
+ if is_key(v) and resolve_forwards(v) not in all_inputs and not consumed.get(v, False)),
+ *(Forward(all_inputs.index(resolve_forwards(outvar)), idx_out) # type: ignore[arg-type]
for idx_out, outvar in enumerate(jaxpr.outvars)
- if is_key(outvar) and resolve_forwards(outvar) in jaxpr.invars)
+ if is_key(outvar) and resolve_forwards(outvar) in all_inputs)
)
@@ -531,23 +532,24 @@ def key_reuse_impl_rule(prim, original_rule):
@wraps(original_rule)
def key_reuse_impl(*args, **kwargs):
if config.debug_key_reuse.value:
+ funcname = str(prim)
+ jaxpr = None
+ consts = []
if prim == pjit.pjit_p:
funcname = "jit-compiled function"
jaxpr = kwargs['jaxpr'].jaxpr
+ consts = kwargs['jaxpr'].consts
signature = jaxpr_type_signature(jaxpr)
elif prim in key_reuse_signatures:
- funcname = str(prim)
- jaxpr = None
signature = key_reuse_signatures[prim]
elif prim in key_reuse_signatures_dynamic:
- funcname = str(prim)
jaxpr = jax.make_jaxpr(partial(prim.bind, **kwargs))(*args).jaxpr
signature = jaxpr_type_signature(jaxpr)
else:
raise RuntimeError(f"Internal: no key reuse rule for primitive {prim}")
- signature.check_signature(*args, funcname=funcname)
+ signature.check_signature(*args, *consts, funcname=funcname)
result = original_rule(*args, **kwargs)
- signature.update_consumption(args, result if prim.multiple_results else [result])
+ signature.update_consumption([*args, *consts], result if prim.multiple_results else [result])
return result
else:
return original_rule(*args, **kwargs)
|
diff --git a/tests/key_reuse_test.py b/tests/key_reuse_test.py
--- a/tests/key_reuse_test.py
+++ b/tests/key_reuse_test.py
@@ -623,17 +623,26 @@ def test_clone_eager(self):
def test_simple_reuse_nojit(self):
key = jax.random.key(0)
- _ = jax.random.bits(key)
with jax.disable_jit():
+ _ = jax.random.bits(key)
with self.assertRaisesRegex(KeyReuseError, self.eager_bits_msg):
_ = jax.random.bits(key)
def test_simple_key_reuse_jit(self):
key = jax.random.key(0)
- _ = jax.random.bits(key)
+ _ = jax.jit(jax.random.bits)(key)
with self.assertRaisesRegex(KeyReuseError, self.jit_msg):
_ = jax.jit(jax.random.bits)(key)
+ def test_closed_over_key_reuse_jit(self):
+ key = jax.random.key(0)
+ @jax.jit
+ def f():
+ return jax.random.uniform(key)
+ _ = f()
+ with self.assertRaisesRegex(KeyReuseError, self.jit_msg):
+ _ = f()
+
def test_key_reuse_within_jit(self):
@jax.jit
def f():
|
key reuse detection fails for closed-over keys in JIT
```python
import jax
jax.config.update('jax_debug_key_reuse', True)
key = jax.random.key(0)
@jax.jit
def f():
return jax.random.bits(key)
f()
f() # Should error but doesn't.
```
| 2024-04-11T19:09:37 |
|
google/jax
| 20,809 |
google__jax-20809
|
[
"20627"
] |
32922f61e9beedb2cd83ffae1a8352ca4dfbb165
|
diff --git a/jax/_src/debugging.py b/jax/_src/debugging.py
--- a/jax/_src/debugging.py
+++ b/jax/_src/debugging.py
@@ -18,6 +18,7 @@
import importlib.util
from collections.abc import Sequence
import functools
+import logging
import string
import sys
from typing import Any, Callable, Union
@@ -25,6 +26,7 @@
import numpy as np
+import jax
import jax.numpy as jnp
from jax import lax
@@ -45,6 +47,8 @@
from jax._src.sharding import Sharding
from jax._src.sharding_impls import NamedSharding, parse_flatten_op_sharding
+logger = logging.getLogger(__name__)
+
class DebugEffect(effects.Effect):
__str__ = lambda self: "Debug"
debug_effect = DebugEffect()
@@ -73,7 +77,14 @@ class OrderedDebugEffect(effects.Effect):
def debug_callback_impl(*args, callback: Callable[..., Any],
effect: DebugEffect):
del effect
- callback(*args)
+ cpu_device, *_ = jax.local_devices(backend="cpu")
+ args = jax.device_put(args, cpu_device)
+ with jax.default_device(cpu_device):
+ try:
+ callback(*args)
+ except BaseException:
+ logger.exception("jax.debug_callback failed")
+ raise
return ()
@debug_callback_p.def_effectful_abstract_eval
|
diff --git a/tests/debugger_test.py b/tests/debugger_test.py
--- a/tests/debugger_test.py
+++ b/tests/debugger_test.py
@@ -110,7 +110,7 @@ def f(x):
return y
expected = _format_multiline(r"""
Entering jdb:
- (jdb) array(2., dtype=float32)
+ (jdb) Array(2., dtype=float32)
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
@@ -126,7 +126,7 @@ def f(x):
return y
expected = _format_multiline(r"""
Entering jdb:
- (jdb) (array(2., dtype=float32), array(3., dtype=float32))
+ (jdb) (Array(2., dtype=float32), Array(3., dtype=float32))
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
@@ -196,7 +196,7 @@ def g\(x\):
-> y = f\(x\)
return jnp\.exp\(y\)
.*
- \(jdb\) array\(2\., dtype=float32\)
+ \(jdb\) Array\(2\., dtype=float32\)
\(jdb\) > .*debugger_test\.py\([0-9]+\)
def f\(x\):
y = jnp\.sin\(x\)
@@ -225,9 +225,9 @@ def g(x):
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
- (jdb) array(3., dtype=float32)
+ (jdb) Array(3., dtype=float32)
(jdb) Entering jdb:
- (jdb) array(6., dtype=float32)
+ (jdb) Array(6., dtype=float32)
(jdb) """)
g(jnp.array(2., jnp.float32))
jax.effects_barrier()
@@ -249,9 +249,9 @@ def g(x):
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
- (jdb) array(1., dtype=float32)
+ (jdb) Array(1., dtype=float32)
(jdb) Entering jdb:
- (jdb) array(2., dtype=float32)
+ (jdb) Array(2., dtype=float32)
(jdb) """)
g(jnp.arange(2., dtype=jnp.float32))
jax.effects_barrier()
@@ -274,9 +274,9 @@ def g(x):
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
- \(jdb\) array\(.*, dtype=float32\)
+ \(jdb\) Array\(.*, dtype=float32\)
\(jdb\) Entering jdb:
- \(jdb\) array\(.*, dtype=float32\)
+ \(jdb\) Array\(.*, dtype=float32\)
\(jdb\) """)
g(jnp.arange(2., dtype=jnp.float32))
jax.effects_barrier()
@@ -302,7 +302,7 @@ def g(x):
out_shardings=jax.sharding.PartitionSpec("dev"),
)
with jax.sharding.Mesh(np.array(jax.devices()), ["dev"]):
- arr = (1 + np.arange(8)).astype(np.int32)
+ arr = (1 + jnp.arange(8)).astype(np.int32)
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) {}
diff --git a/tests/debugging_primitives_test.py b/tests/debugging_primitives_test.py
--- a/tests/debugging_primitives_test.py
+++ b/tests/debugging_primitives_test.py
@@ -170,7 +170,7 @@ def f(x):
with jtu.capture_stdout() as output:
f(np.array(2, np.int32))
jax.effects_barrier()
- self.assertEqual(output(), f"x: {str(dict(foo=np.array(2, np.int32)))}\n")
+ self.assertEqual(output(), f"x: {str(dict(foo=jnp.array(2, np.int32)))}\n")
def test_debug_print_should_use_default_layout(self):
data = np.array(
|
jax.debug.callback changes array type
### Description
```python
import jax.numpy as jnp
from jax import Array, debug, jit
def callback(x) -> None:
assert isinstance(x, Array) # Fail!
@jit
def f(x):
assert isinstance(x, Array) # Good.
debug.callback(callback, x)
f(jnp.ones(1))
```
For some reason, `debug.callback` rematerializes the array as a numpy array instead of `jaxlib.xla_extension.ArrayImpl` (the thing created by `jnp.asarray`). This can cause problems if the array in the callback is mixed with jax arrays when passed to libraries that implement the Array API.
### System info (python version, jaxlib version, accelerator, etc.)
An NVIDIA GPU may be present on this machine, but a CUDA-enabled jaxlib is not installed. Falling back to cpu.
jax: 0.4.26
jaxlib: 0.4.26
numpy: 1.26.4
python: 3.11.8 (main, Feb 22 2024, 17:25:49) [GCC 11.4.0]
|
Thanks for the report! This is working as intended: callback functions will convert JAX arrays to NumPy arrays, however this will change soon (see #20325)
@jakevdp Great to hear! So, in an upcoming release then? Feel free to close this if so :smile:
| 2024-04-18T09:34:23 |
google/jax
| 20,862 |
google__jax-20862
|
[
"20854"
] |
83aff78d1220f64d44349018b5852830d96bc269
|
diff --git a/jax/_src/numpy/lax_numpy.py b/jax/_src/numpy/lax_numpy.py
--- a/jax/_src/numpy/lax_numpy.py
+++ b/jax/_src/numpy/lax_numpy.py
@@ -1162,12 +1162,12 @@ def select(
raise ValueError(msg.format(len(condlist), len(choicelist)))
if len(condlist) == 0:
raise ValueError("condlist must be non-empty")
- choices = util.promote_dtypes(default, *choicelist)
- choicelist = choices[1:]
- output = choices[0]
- for cond, choice in zip(condlist[::-1], choicelist[::-1]):
- output = where(cond, choice, output)
- return output
+ # Put the default at front with condition False because
+ # argmax returns zero for an array of False values.
+ choicelist = util.promote_dtypes(default, *choicelist)
+ conditions = stack(broadcast_arrays(False, *condlist))
+ idx = argmax(conditions.astype(bool), axis=0)
+ return lax.select_n(*broadcast_arrays(idx, *choicelist))
@util.implements(np.bincount, lax_description="""\
|
diff --git a/tests/lax_numpy_test.py b/tests/lax_numpy_test.py
--- a/tests/lax_numpy_test.py
+++ b/tests/lax_numpy_test.py
@@ -4532,6 +4532,7 @@ def testWhereScalarPromotion(self):
# maximal set of dtypes.
dtypes=itertools.combinations_with_replacement(all_dtypes, 3),
)
+ @jax.numpy_rank_promotion('allow')
def testSelect(self, n, shapes, dtypes):
dtypes = dtypes[:n+1]
rng = jtu.rand_default(self.rng())
|
jnp.select should use jax.lax.select_n
### Description
jnp.select is a function that's like jnp.where, but selects between n different arrays. jnp.where is a wrapper around jax.lax.select, which makes it more flexible in terms of input shapes and dtypes.
jax.lax.select_n (added in #9482 ) is a generalization of jax.lax.select to pick from n arrays rather than 2; it translates at lower time into a binary tree of jax.lax.select's, which is helpful for efficiency when there are many branches. jnp.select doesn't use this: it linearly chains jnp.where's across the different arrays. It would likely help performance if it did, particularly in the case where the condlist entries have lower ndim than the choicelist.
### System info (python version, jaxlib version, accelerator, etc.)
jax: 0.4.27
jaxlib: 0.4.27
numpy: 1.26.3
python: 3.11.8 (stable, redacted, redacted) [Clang google3-trunk (3b5e7c83a6e226d5bd7ed2e9b67449b64812074c)]
jax.devices (16 total, 16 local): [TpuDevice(id=0, process_index=0, coords=(0,0,0), core_on_chip=0) TpuDevice(id=1, process_index=0, coords=(0,0,0), core_on_chip=1) ... TpuDevice(id=14, process_index=0, coords=(3,1,0), core_on_chip=0) TpuDevice(id=15, process_index=0, coords=(3,1,0), core_on_chip=1)]
process_count: 1
platform: uname_result(system='Linux', node='ebe6e776479e64ea-65d7388ae36.borgtask.google.com', release='5.10.0-smp-1101.33.0.0', version='#1 [v5.10.0-1101.33.0.0] SMP @1708585970', machine='x86_64')
| 2024-04-22T12:40:54 |
|
google/jax
| 20,937 |
google__jax-20937
|
[
"20925"
] |
ded9272a5b653296571040df4a213934114b2900
|
diff --git a/jax/__init__.py b/jax/__init__.py
--- a/jax/__init__.py
+++ b/jax/__init__.py
@@ -118,7 +118,7 @@
from jax._src.api import pmap as pmap
from jax._src.xla_bridge import process_count as process_count
from jax._src.xla_bridge import process_index as process_index
-from jax._src.callback import pure_callback_api as pure_callback
+from jax._src.callback import pure_callback as pure_callback
from jax._src.ad_checkpoint import checkpoint_wrapper as remat
from jax._src.api import ShapeDtypeStruct as ShapeDtypeStruct
from jax._src.api import value_and_grad as value_and_grad
diff --git a/jax/_src/callback.py b/jax/_src/callback.py
--- a/jax/_src/callback.py
+++ b/jax/_src/callback.py
@@ -257,14 +257,40 @@ def pure_callback(
vectorized: bool = False,
**kwargs: Any,
):
- """Calls a pure Python callback.
+ """Calls a pure Python callback. Works under :func:`jit`/:func:`~vmap`/etc.
For more explanation, see `External Callbacks`_.
+ ``pure_callback`` enables calling a Python function in JIT-ed JAX functions.
+ The input ``callback`` will be passed NumPy arrays in place of JAX arrays and
+ should also return NumPy arrays. Execution takes place on CPU, like any
+ Python+NumPy function.
+
+ The callback is treated as functionally pure, meaning it has no side-effects
+ and its output value depends only on its argument values. As a consequence, it
+ is safe to be called multiple times (e.g. when transformed by :func:`~vmap` or
+ :func:`~pmap`), or not to be called at all when e.g. the output of a
+ `jit`-decorated function has no data dependence on its value. Pure callbacks
+ may also be reordered if data-dependence allows.
+
+ When `vmap`-ed the behavior will depend on the value of the
+ ``vectorized`` keyword argument. When ``vectorized`` is ``True``, the callback
+ is assumed to obey
+ ``jax.vmap(callback)(xs) == callback(xs) == jnp.stack([callback(x) for x in xs])``.
+ Therefore, the callback will be called directly on batched inputs (where the
+ batch axes are the leading dimensions). Additionally, the callbacks should
+ return outputs that have corresponding leading batch axes. If not vectorized
+ ``callback`` will be mapped sequentially across the batched axis.
+ For example, if ``callback = lambda x, y: np.matmul(x, y)``, then we are free
+ to set ``vectorized=True`` because the ``np.matmul`` function handles
+ arbitrary leading batch dimensions.
+
Args:
callback: function to execute on the host. The callback is assumed to be a pure
function (i.e. one without side-effects): if an impure function is passed, it
- may behave in unexpected ways, particularly under transformation.
+ may behave in unexpected ways, particularly under transformation. The callable
+ will be passed PyTrees of arrays as arguments, and should return a PyTree of
+ arrays that matches ``result_shape_dtypes``.
result_shape_dtypes: pytree whose leaves have ``shape`` and ``dtype`` attributes,
whose structure matches the expected output of the callback function at runtime.
:class:`jax.ShapeDtypeStruct` is often used to define leaf values.
@@ -301,75 +327,6 @@ def pure_callback(
return tree_util.tree_unflatten(out_tree, out_flat)
-def pure_callback_api(
- callback: Callable[..., Any],
- result_shape_dtypes: Any,
- *args: Any,
- sharding: SingleDeviceSharding | None = None,
- vectorized: bool = False,
- **kwargs: Any,
-):
- """Applies a functionally pure Python callable. Works under :func:`jit`/:func:`~pmap`/etc.
-
- ``pure_callback`` enables calling a Python function in JIT-ed JAX functions.
- The input ``callback`` will be passed NumPy arrays in place of JAX arrays and
- should also return NumPy arrays. Execution takes place on CPU, like any
- Python+NumPy function.
-
- The callback is treated as functionally pure, meaning it has no side-effects
- and its output value depends only on its argument values. As a consequence, it
- is safe to be called multiple times (e.g. when transformed by :func:`~vmap` or
- :func:`~pmap`), or not to be called at all when e.g. the output of a
- `jit`-decorated function has no data dependence on its value. Pure callbacks
- may also be reordered if data-dependence allows.
-
- When :func:`~pmap`-ed, the pure callback will be called several times (one on each
- axis of the map). When `vmap`-ed the behavior will depend on the value of the
- ``vectorized`` keyword argument. When ``vectorized`` is ``True``, the callback
- is assumed to obey
- ``jax.vmap(callback)(xs) == callback(xs) == jnp.stack([callback(x) for x in xs])``.
- Therefore, the callback will be called directly on batched inputs (where the
- batch axes are the leading dimensions). Additionally, the callbacks should
- return outputs that have corresponding leading batch axes. If not vectorized
- ``callback`` will be mapped sequentially across the batched axis.
- For example, if ``callback = lambda x, y: np.matmul(x, y)``, then we are free
- to set ``vectorized=True`` because the ``np.matmul`` function handles
- arbitrary leading batch dimensions.
-
- Args:
- callback: A Python callable. The callable will be passed PyTrees of NumPy
- arrays as arguments, and should return a PyTree of NumPy arrays that
- matches ``result_shape_dtypes``.
- result_shape_dtypes: A PyTree with leaves that are objects with ``shape``
- and ``dtype`` attributes which represent to the shapes and dtypes of the
- value of ``callback`` applied to ``args`` and ``kwargs``.
- *args: The positional arguments to the callback. Must be PyTrees of JAX
- types.
- sharding: optional sharding that specifies the device from which the
- callback should be invoked.
- vectorized: A boolean that indicates whether or not ``callback`` is
- vectorized, meaning it can handle arrays with additional leading
- dimensions. If ``vectorized`` is `True`, when the callback is mapped
- via `jax.vmap`, it will be called directly on inputs with leading batch
- dimensions instead of executing ``callback`` on each mapped input
- individually. The callback should also return outputs batched across the
- leading axis. By default, ``vectorized`` is ``False``.
- **kwargs: The keyword arguments to the callback. Must be PyTrees of JAX
- types.
-
- Returns:
- The value of ``callback(*args, **kwargs)``.
- """
- return pure_callback(
- callback,
- result_shape_dtypes,
- *args,
- sharding=sharding,
- vectorized=vectorized,
- **kwargs,
- )
-
-
# IO Callback
io_callback_p = core.Primitive("io_callback")
|
"pure_callback" is not exported from module "jax"
### Description
```shell
$ echo "import jax; jax.pure_callback" >> test.py
$ pyright test.py
/Users/carlos/Downloads/test.py
/Users/carlos/Downloads/test.py:2:17 - error: "pure_callback" is not exported from module "jax" (reportPrivateImportUsage)
1 error, 0 warnings, 0 informations
```
See [this line](https://github.com/google/jax/blob/main/jax/__init__.py#L121). Unlike the other exports, `import pure_callback_api as pure_callback` doesn't follow the `import X as X` redundancy pattern that tells type checkers that this is a re-export, as described in [PEP 484](https://peps.python.org/pep-0484/):
> Modules and variables imported into the stub are not considered exported from the stub unless the import uses the `import ... as ...` form or the equivalent `from ... import ... as ...` form. (UPDATE: To clarify, the intention here is that only names imported using the form `X as X` will be exported, i.e. the name before and after as must be the same.)
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.26
jaxlib: 0.4.26
numpy: 1.26.4
python: 3.12.2 (main, Feb 6 2024, 20:19:44) [Clang 15.0.0 (clang-1500.1.0.2.5)]
jax.devices (1 total, 1 local): [CpuDevice(id=0)]
process_count: 1
platform: uname_result(system='Darwin', node='Carloss-MacBook-Pro-2.local', release='23.2.0', version='Darwin Kernel Version 23.2.0: Wed Nov 15 21:54:05 PST 2023; root:xnu-10002.61.3~2/RELEASE_ARM64_T6031', machine='arm64')
```
```shell
$ pyright --version
pyright 1.1.360
```
|
Thanks - I really wish there were a way to export a name while also renaming it, without having to use `__all__`. I think the only possibility here is to rename `pure_callback_api` to `pure_callback` in `jax._src.callback`, so that it can be imported by its exported name, which frankly is pretty silly.
Also, the context of this statement in PEP 484 is in a discussion of stub files, so it's not clear to me that this was ever intended to apply to `*.py` files. I've never found a good answer to that, but in any case `import X as X` within `py` files seems to be the export convention that at least some type checkers have landed on.
Python typing is such a mess.
Maybe this is the right fix:
```python
from jax._src.callback import pure_callback_api
jax._src.callback.pure_callback = pure_callback_api
del pure_callback_api
from jax._src.callback import pure_callback as pure_callback
del jax._src.callback.pure_callback
```
It certainly seems like the only way to export the API without renaming it at its source.
Actually, that won't work becuase `jax._src.callback.pure_callback` already exists. `pure_callback_api` is a shim around it that is basically identical except for the docstring (added in https://github.com/google/jax/commit/c1f65fc8b21bd8a238af2b7c9ff4c90196277be3) I'm not sure what the intent of introducing that was, but we could probably just delete `pure_callback_api` and import `pure_callback as pure_callback` directly.
Nevertheless, my snark about export conventions still stands.
| 2024-04-25T17:21:10 |
|
google/jax
| 21,216 |
google__jax-21216
|
[
"20392"
] |
e735a00cdce11e7a376573d4e171a54443130cdf
|
diff --git a/jax/_src/interpreters/pxla.py b/jax/_src/interpreters/pxla.py
--- a/jax/_src/interpreters/pxla.py
+++ b/jax/_src/interpreters/pxla.py
@@ -1319,6 +1319,8 @@ def _hlo_shard(aval, axis_env, xs, in_axis):
if aval is core.abstract_token:
return xs
elif isinstance(aval, core.ShapedArray):
+ if dtypes.issubdtype(aval.dtype, dtypes.extended):
+ aval = aval.dtype._rules.physical_element_aval(aval.dtype)
x, = xs
dims = list(aval.shape)
zero = mlir.ir_constant(np.zeros((), dtype=np.uint32))
|
diff --git a/tests/pmap_test.py b/tests/pmap_test.py
--- a/tests/pmap_test.py
+++ b/tests/pmap_test.py
@@ -2207,6 +2207,15 @@ def test_pmap_stack_size(self):
y = jax.pmap(jax.scipy.linalg.expm)(jnp.array([x, x]))
y.block_until_ready() # doesn't crash
+ def test_pmap_of_prng_key(self):
+ # Regression test for https://github.com/google/jax/issues/20392
+ keys = jax.random.split(jax.random.key(0), jax.device_count())
+ result1 = jax.pmap(jax.random.bits)(keys)
+ with jtu.ignore_warning(
+ category=UserWarning, message="The jitted function foo includes a pmap"):
+ result2 = jax.jit(jax.pmap(jax.random.bits))(keys)
+ self.assertArraysEqual(result1, result2)
+
@jtu.pytest_mark_if_available('multiaccelerator')
class CppPmapTest(PythonPmapTest):
|
`jit(pmap)` on key arrays raises an error in `pmap` lowering
### Description
While not really a bug, I was surprised to find a rogue variable `jax.random.key` that seems to be similar to PRNGKey but fails for the case`jit(pmap)`. Copilot dropped this surprise, as an autocomplete that I didn't notice until later on during debugging.
```python
import os
num_devices = 8
os.environ['XLA_FLAGS'] = f'--xla_force_host_platform_device_count={num_devices}'
import jax
def test():
def single_fn(key):
return jax.random.normal(key, ())
def pmap_batched_fn(key):
batch_size = num_devices
keys = jax.random.split(key, batch_size)
pmap_fn = jax.pmap(single_fn)
return pmap_fn(keys)
key = jax.random.key(0) # Some rogue jax.random.key
# key = jax.random.PRNGKey(0) # Works of course
# Passes pmap(...)
result_pmap_no_jit = pmap_batched_fn(key)
# Fails jit(pmap(...))
result_pmap_jit = jax.jit(pmap_batched_fn)(key)
```
### System info (python version, jaxlib version, accelerator, etc.)
Present at least in JAX versions 0.4.20 to 0.4.25
|
`jax.random.key` is far from rogue. See:
* https://jax.readthedocs.io/en/latest/jax.random.html
* https://jax.readthedocs.io/en/latest/jep/9263-typed-keys.html
* #9263
The function was introduced quietly a few versions ago. We only recently switched the module documentation to mention it (in #17741), so it isn't in the documentation for a release yet.
So, this looks like a bug! Thanks for filing.
Also, since we changed the docs in the current release, we should remember to mention it in the changelog.
fyi @jakevdp
Then I'm glad I used the correct issue template :)
| 2024-05-14T02:04:50 |
google/jax
| 21,362 |
google__jax-21362
|
[
"21330"
] |
be7939a20e4755ea9f0223def6cd20c57fadbfea
|
diff --git a/jax/_src/numpy/reductions.py b/jax/_src/numpy/reductions.py
--- a/jax/_src/numpy/reductions.py
+++ b/jax/_src/numpy/reductions.py
@@ -467,7 +467,7 @@ def _var(a: ArrayLike, axis: Axis = None, dtype: DTypeLike | None = None,
dtype=computation_dtype, keepdims=keepdims)
normalizer = lax.sub(normalizer, lax.convert_element_type(ddof, computation_dtype))
result = sum(centered, axis, dtype=computation_dtype, keepdims=keepdims, where=where)
- return lax.div(result, normalizer).astype(dtype)
+ return _where(normalizer > 0, lax.div(result, normalizer).astype(dtype), np.nan)
def _var_promote_types(a_dtype: DTypeLike, dtype: DTypeLike | None) -> tuple[DType, DType]:
|
diff --git a/tests/lax_numpy_reducers_test.py b/tests/lax_numpy_reducers_test.py
--- a/tests/lax_numpy_reducers_test.py
+++ b/tests/lax_numpy_reducers_test.py
@@ -571,6 +571,17 @@ def np_fun(x):
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
+ @jtu.sample_product(
+ jnp_fn=[jnp.var, jnp.std],
+ size=[0, 1, 2]
+ )
+ def testStdOrVarLargeDdofReturnsNan(self, jnp_fn, size):
+ # test for https://github.com/google/jax/issues/21330
+ x = jnp.arange(size)
+ self.assertTrue(np.isnan(jnp_fn(x, ddof=size)))
+ self.assertTrue(np.isnan(jnp_fn(x, ddof=size + 1)))
+ self.assertTrue(np.isnan(jnp_fn(x, ddof=size + 2)))
+
@jtu.sample_product(
shape=[(5,), (10, 5)],
dtype=all_dtypes,
|
Behavior of `jax.experimental.array_api.var` w/ `correction > n`
### Description
`jax.experimental.array_api.var` does not follow the special case specified in the [array API documentation of `var`](https://data-apis.org/array-api/latest/API_specification/generated/array_api.var.html) for `correction > N`.
> Let `N` equal the number of elements over which to compute the variance.
>
> - If `N - correction` is less than or equal to 0, the variance is NaN.
```python3
import jax.experimental.array_api as xp
xp.var(xp.asarray([1.]), correction=1) # Array(nan, dtype=float32)
xp.var(xp.asarray([]), correction=1) # Array(-0., dtype=float32)
```
In most cases, `-0` is be the result of following the usual formula. Whether it is applicable for `correction > n` is debatable, but at least in the case above, the mean is undefined, so it might be desirable to update `jax.numpy`, too.
```python3
import jax.numpy as jnp
jnp.mean(jnp.asarray([])) # Array(nan, dtype=float32)
jnp.var(jnp.asarray([]), ddof=1) # Array(-0., dtype=float32)
```
For context, I ran into this when adding array API support to `scipy.stats.bartlett` (scipy/scipy#20751).
### System info (python version, jaxlib version, accelerator, etc.)
```
jax: 0.4.28
jaxlib: 0.4.28
numpy: 1.25.2
python: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]
jax.devices (1 total, 1 local): [CpuDevice(id=0)]
process_count: 1
platform: uname_result(system='Linux', node='2e6238110dfb', release='6.1.85+', version='#1 SMP PREEMPT_DYNAMIC Sun Apr 28 14:29:16 UTC 2024', machine='x86_64')
```
| 2024-05-22T15:33:20 |
|
cleanlab/cleanlab
| 169 |
cleanlab__cleanlab-169
|
[
"168"
] |
080c7a841cf84275eeb29dccc72a801951ff60b1
|
diff --git a/.ci/nblint.py b/.ci/nblint.py
new file mode 100755
--- /dev/null
+++ b/.ci/nblint.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+"""
+Lint Jupyter notebooks being checked in to this repo.
+
+Currently, this "linter" only checks one property, that the notebook's output
+cells are empty, to avoid bloating the repository size.
+"""
+
+
+import argparse
+import json
+import os
+import sys
+
+
+def main():
+ opts = get_opts()
+ notebooks = find_notebooks(opts.dir)
+ for notebook in notebooks:
+ check(notebook)
+
+
+def get_opts():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("dir", help="Directories to search for notebooks", type=str, nargs="+")
+ return parser.parse_args()
+
+
+def find_notebooks(dirs):
+ notebooks = set()
+ for d in dirs:
+ for dirname, _, filenames in os.walk(d):
+ for filename in filenames:
+ if not filename.endswith(".ipynb"):
+ continue
+ full_path = os.path.join(dirname, filename)
+ notebooks.add(full_path)
+ return notebooks
+
+
+def check(notebook):
+ with open(notebook) as f:
+ contents = json.load(f)
+ check_outputs_empty(notebook, contents)
+
+
+def check_outputs_empty(path, contents):
+ for i, cell in enumerate(contents["cells"]):
+ if "outputs" in cell and cell["outputs"] != []:
+ fail(path, "output is not empty", i)
+
+
+def fail(path, message, cell=None):
+ cell_msg = f" [cell {cell}]" if cell is not None else ""
+ print(f"{path}{cell_msg}: {message}")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
|
Add CI to ensure that Jupyter notebooks don't include output
Preserving the output cells in Jupyter notebooks that are checked in to this repo bloat the repo size, and keeping the output is not necessary because the notebooks are run as part of the CI for producing docs.
For example, in #165, the `audio.ipynb` is 112K (and now part of the repo history forever), while clearing the output cells reduces that size down to 24K. I suspect that Git's delta compression will also work better without the output cells there as we make changes to the notebook. Also, the diffs will be more readable.
We should add CI that enforces that Jupyter notebooks checked in to the repo don't have any output cells. In the meantime, let's be careful and do this manually. CC @weijinglok @JohnsonKuan @jwmueller
| 2022-04-05T12:18:22 |
||
cleanlab/cleanlab
| 322 |
cleanlab__cleanlab-322
|
[
"281",
"281"
] |
5973a21958f47fd8e7246ccf36c8304ff4f22e3b
|
diff --git a/cleanlab/classification.py b/cleanlab/classification.py
--- a/cleanlab/classification.py
+++ b/cleanlab/classification.py
@@ -263,7 +263,7 @@ def __init__(
def fit(
self,
X,
- labels,
+ labels=None,
*,
pred_probs=None,
thresholds=None,
@@ -274,6 +274,7 @@ def fit(
clf_kwargs={},
clf_final_kwargs={},
validation_func=None,
+ y=None,
):
"""
Train the model `clf` with error-prone, noisy labels as if
@@ -388,6 +389,9 @@ class 0, 1, ..., K-1. `pred_probs` should have been computed using 3 (or
on the cleaned data subset, you should explicitly pass in that data yourself
(eg. via `clf_final_kwargs` or `clf_kwargs`).
+ y: np.array or pd.Series, optional
+ Alternative argument that can be specified instead of `labels`. Specifying `y` has the same effect as specifying `labels`, and is offered as an alternative for compatibility with sklearn.
+
Returns
-------
CleanLearning
@@ -416,6 +420,13 @@ class 0, 1, ..., K-1. `pred_probs` should have been computed using 3 (or
not just the subset of cleaned data left after pruning out the label issues.
"""
+ if labels is not None and y is not None:
+ raise ValueError("You must specify either `labels` or `y`, but not both.")
+ if y is not None:
+ labels = y
+ if labels is None:
+ raise ValueError("You must specify `labels`.")
+
self.clf_final_kwargs = {**clf_kwargs, **clf_final_kwargs}
if "sample_weight" in clf_kwargs:
|
diff --git a/tests/test_classification.py b/tests/test_classification.py
--- a/tests/test_classification.py
+++ b/tests/test_classification.py
@@ -491,6 +491,29 @@ def test_clf_fit_nm_inm(format):
assert score < score_nm_inm + 1e-4
[email protected]("format", list(DATA_FORMATS.keys()))
+def test_clf_fit_y_alias(format):
+ data = DATA_FORMATS[format]
+ cl = CleanLearning(seed=SEED)
+
+ # Valid signature
+ cl.fit(data["X_train"], data["labels"])
+
+ # Valid signature for labels/y alias
+ cl.fit(data["X_train"], labels=data["labels"])
+ cl.fit(data["X_train"], y=data["labels"])
+ cl.fit(X=data["X_train"], labels=data["labels"])
+ cl.fit(X=data["X_train"], y=data["labels"])
+
+ # Invalid signatures
+ with pytest.raises(ValueError):
+ cl.fit(data["X_train"])
+ with pytest.raises(ValueError):
+ cl.fit(data["X_train"], data["labels"], y=data["labels"])
+ with pytest.raises(ValueError):
+ cl.fit(X=data["X_train"], labels=data["labels"], y=data["labels"])
+
+
@pytest.mark.parametrize("format", list(DATA_FORMATS.keys()))
def test_pred_and_pred_proba(format):
data = DATA_FORMATS[format]
|
Change labels arg -> y in CleanLearning.fit()
`CleanLearning.fit()` argument names should match those of sklearn fit()
Change labels arg -> y in CleanLearning.fit()
`CleanLearning.fit()` argument names should match those of sklearn fit()
| 2022-07-24T23:15:46 |
|
cleanlab/cleanlab
| 397 |
cleanlab__cleanlab-397
|
[
"388",
"288"
] |
6ec5b173dd81fb604398ab9f90843676ca24b3cb
|
diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py
--- a/cleanlab/internal/token_classification_utils.py
+++ b/cleanlab/internal/token_classification_utils.py
@@ -160,18 +160,22 @@ def color_sentence(sentence: str, word: str) -> str:
Parameters
----------
- sentence: str
+ sentence:
a sentence where the word is searched
- word: str
- keyword to find in `sentence`. Assumes the word exists in token
-
+ word:
+ keyword to find in `sentence`. Assumes the word exists in the sentence.
Returns
---------
- colored_sentence: str
- `sentence` where the first occurance of the word is colored red, using `termcolor.colored`
+ colored_sentence:
+ `sentence` where the every occurance of the word is colored red, using `termcolor.colored`
"""
- start_idx = sentence.index(word)
- before, after = sentence[:start_idx], sentence[start_idx + len(word) :]
- return "%s%s%s" % (before, colored(word, "red"), after)
+ colored_word = colored(word, "red")
+ colored_sentence, number_of_substitions = re.subn(
+ r"\b{}\b".format(word), colored_word, sentence
+ )
+ if number_of_substitions == 0:
+ # Use basic string manipulation if regex fails
+ colored_sentence = sentence.replace(word, colored_word)
+ return colored_sentence
|
diff --git a/tests/test_token_classification.py b/tests/test_token_classification.py
--- a/tests/test_token_classification.py
+++ b/tests/test_token_classification.py
@@ -121,9 +121,36 @@ def test_merge_probs_with_normalization():
assert np.allclose(expected, merged_probs)
-def test_color_sentence():
- colored = color_sentence(sentences[0], words[0][1])
- assert colored == "Hello \x1b[31mWorld\x1b[0m"
+# Color boundaries
+C_L, C_R = "\x1b[31m", "\x1b[0m"
+
+
[email protected](
+ "sentence,word,expected",
+ [
+ ("Hello World", "World", f"Hello {C_L}World{C_R}"),
+ ("If you and I were to meet", "I", f"If you and {C_L}I{C_R} were to meet"),
+ ("If you and I were to meet", "If you and I", f"{C_L}If you and I{C_R} were to meet"),
+ ("If you and I were to meet", "If you and I w", f"{C_L}If you and I w{C_R}ere to meet"),
+ ("I think I know this", "I", f"{C_L}I{C_R} think {C_L}I{C_R} know this"),
+ ("A good reason for a test", "a", f"A good reason for {C_L}a{C_R} test"),
+ ("ab ab a b ab", "ab a", f"ab {C_L}ab a{C_R} b ab"),
+ ("ab ab ab ab", "ab a", f"{C_L}ab a{C_R}b {C_L}ab a{C_R}b"),
+ ],
+ ids=[
+ "single_word",
+ "ignore_subwords",
+ "multi-token_match",
+ "substring_replacement",
+ "multiple_matches",
+ "case_sensitive",
+ "only_word_boundary",
+ "non_overlapping_substrings",
+ ],
+)
+def test_color_sentence(sentence, word, expected):
+ colored = color_sentence(sentence, word)
+ assert colored == expected
issues = find_label_issues(labels, pred_probs)
|
color_sentence matches subtokens
<!-- Briefly summarize the issue. -->
`color_sentence` technically does subtoken matching instead of checking for equality between the tokenized sentence and the given token.
Is this intended?
# Stack trace
```bash
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AssertionError: ('\x1b[31mI\x1b[0mf you and I were to meet', 'If you and \x1b[31mI\x1b[0m were to meet')
```
# Steps to reproduce
Here's a MWE
```python
from cleanlab.internal.token_classification_utils import color_sentence
sentence = "If you and I were to meet"
word = "I"
output = color_sentence(sentence, word)
expected_output = "If you and \x1b[31mI\x1b[0m were to meet"
assert output == expected_output, (output, expected_output)
```
<!-- Be as detailed as possible here. If possible, include a self-contained
runnable example that demonstrates the issue. Remember to supply any data
necessary to run your example, or construct your example with synthetic data.
This is not strictly required, but the more detailed your bug report, the more
quickly we can help you and fix the bug. -->
# Additional information
- **Cleanlab version**: 31d43707014cb5c217ee1e9b014a206d554f2fd3
- **Operating system**: Linux 4.19.128-microsoft-standard
- **Python version**: Python 3.9.12
<!-- Please include any other information that could be helpful for debugging. -->
## Definition of `color_sentence`
https://github.com/cleanlab/cleanlab/blob/31d43707014cb5c217ee1e9b014a206d554f2fd3/cleanlab/internal/token_classification_utils.py#L144-L164
Give better error messages when a class has no examples in `find_label_issues`
Currently, we produce various internal errors when a class is not represented. We should either support missing classes, or we should produce something like a `ValueError("find_label_issues requires that each class has at least one example")`.
Right now, we produce a variety of internal errors (data-dependent).
This can be seen as a dupe of #41 and #89, though it suggests at least fixing the confusing error messages as a stop-gap solution until we decide whether or not to support classes with no examples. Potentially related to #266.
# Steps to reproduce
## Example 1
```python
import cleanlab
import numpy as np
labels = np.array([0, 0, 1, 1])
pred_probs = np.array([[0.1, 0.7, 0.2], [0.1, 0.8, 0.1], [0.7, 0.2, 0.1], [0.8, 0.1, 0.1]])
issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)
```
Produces the result:
```
Traceback (most recent call last):
File ".../error.py", line 6, in <module>
issues = cleanlab.filter.find_label_issues(labels, pred_probs)
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 457, in find_label_issues
confident_joint, cl_error_indices = compute_confident_joint(
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/count.py", line 435, in compute_confident_joint
pred_probs_bool = pred_probs >= thresholds - 1e-6
ValueError: operands could not be broadcast together with shapes (4,3) (2,)
```
## Example 2
```python
import cleanlab
import numpy as np
labels = np.array([0, 0])
pred_probs = np.array([[0.3, 0.7], [0.2, 0.8]])
issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)
```
Produces the result:
```
Traceback (most recent call last):
File ".../error.py", line 6, in <module>
issues = cleanlab.filter.find_label_issues(labels, pred_probs, n_jobs=1)
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 561, in find_label_issues
label_issues_masks_per_class = [_prune_by_count(k, args) for k in range(K)]
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 561, in <listcomp>
label_issues_masks_per_class = [_prune_by_count(k, args) for k in range(K)]
File ".../src/cleanlab/env/lib/python3.9/site-packages/cleanlab/filter.py", line 204, in _prune_by_count
if label_counts[k] <= min_examples_per_class: # No prune if not at least min_examples_per_class
IndexError: index 1 is out of bounds for axis 0 with size 1
```
# Additional information
- **Cleanlab version**: 2.0.0 (and also reproduced on master = 5e0b62d79790028c6c69a76c6af28c813a27ace5, though the stack traces and exceptions are slightly different there)
- **Operating system**: macOS 12.1, shouldn't be relevant here
- **Python version**: Python 3.9.12
|
Any thoughts on this @ericwang1997?
The function currently searches for the first occurrence of the substring in the sentence, so it will be problematic if we're searching for tokens as generic as "I". There are two ways to get around this:
1. Add an optional argument to take in the given tokens. Using your example, the user will call:
```
tokens = ["If", "you", "and", "I", "were", "to", "meet"]
color_sentence(sentence, word, tokens=tokens)
```
or simply generate the sentence internally using `get_sentence` and call:
```
color_sentece(word, tokens)
```
2. Tokenize the sentence and search for the keyword token. Or simply search for `" " + word " "`. The first option is more ideal imo.
Note that it's also possible that the keyword is not a given token (for example, if `word == "If you and I"`). The code below covers that case
```
from cleanlab.internal.token_classification_utils import get_sentence
def color_sentence(word, tokens):
sentence = get_sentence(tokens)
idx = -1
for i, token in enumerate(tokens):
if token == word:
idx = i
break
if idx == -1: # not found
start_idx = sentence.index(word)
before, after = sentence[:start_idx], sentence[start_idx + len(word) :]
else:
before, after = get_sentence(tokens[:idx]), get_sentence(tokens[idx+1:])
return "%s%s%s" % (before, colored(word, "red"), after)
```
Thanks for those suggestions.
I agree that providing tokenized text instead to the function as it's only called by `display_issues` which has the tokenized data at hand.
> The code below covers that case
Thanks! I think it's best I'd redefine `color_sentence` this way as you suggest, add the MWE as a test case and update the function call in `display_issues` accordingly!
^ Sounds like good plan to me! Also if the word occurs multiple times in token-list, should `color_sentence()` color all instances of the word? Or why is coloring only the first instance more desirable?
Good call. The keyword typically appears only once in the sentence as long as it's specific enough, but highlighting all occurrences makes more sense (it might be easier to extend this functionality using the new version of `color_sentence()` above)
@anishathalye do we just need to raise error `ValueError("find_label_issues requires that each class has at least one example")` for these two cases?
I am not sure what you mean by "these two cases."
We already raise warnings in two places [[1](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/filter.py#L165), [2](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/filter.py#L206)] when a class has too few examples (under min examples per class). We could raise an exception there if a class has zero examples. Or maybe it's cleaner to do it in another spot; that needs to be investigated. Also need to make sure that the exception propagates properly under both `n_jobs=1` (no multiprocessing) and `n_jobs>1` (multiprocessing).
Hey, thanks for explaining the requirement, I have tried to debug the code, as I can see we use [label_counts](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/filter.py#L451), which is Number of examples in each class of labels, [K](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/filter.py#L453) which is Number of classes labels and `min_examples_per_class` which is by default 1.
`label_counts` is value_count which for first example is [2, 2], now on [line](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/filter.py#L565) we pass `k` where it is `range(K)` means [0,1,2,3], we don't have element on index 2 and 3 for `label_count`. so when we do this [label_counts[k]](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/filter.py#L204), it will throw exception.
Now we can add this condition, where `if len(label_counts) < k: raise excpetion` in `find_label_issues`.
I am not sure that it is correct but that will throw warning for given example.
@anishathalye once you confirm, I can push a PR for this.
Thanks for volunteering to contribute @sidshrivastav
we could definitely use your help improving this library!!
For this specific issue though, I'm already handling it as part of another PR as it is very simple.
@jwmueller thanks for updating
Addressed in: https://github.com/cleanlab/cleanlab/pull/308
| 2022-09-03T01:22:03 |
cleanlab/cleanlab
| 404 |
cleanlab__cleanlab-404
|
[
"403"
] |
1a239922fe195d2a6104d6dc3552d53da16380ce
|
diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py
--- a/cleanlab/internal/token_classification_utils.py
+++ b/cleanlab/internal/token_classification_utils.py
@@ -173,7 +173,7 @@ def color_sentence(sentence: str, word: str) -> str:
"""
colored_word = colored(word, "red")
colored_sentence, number_of_substitions = re.subn(
- r"\b{}\b".format(word), colored_word, sentence
+ r"\b{}\b".format(re.escape(word)), colored_word, sentence
)
if number_of_substitions == 0:
# Use basic string manipulation if regex fails
|
diff --git a/tests/test_token_classification.py b/tests/test_token_classification.py
--- a/tests/test_token_classification.py
+++ b/tests/test_token_classification.py
@@ -141,6 +141,11 @@ def test_merge_probs_with_normalization():
("A good reason for a test", "a", f"A good reason for {C_L}a{C_R} test"),
("ab ab a b ab", "ab a", f"ab {C_L}ab a{C_R} b ab"),
("ab ab ab ab", "ab a", f"{C_L}ab a{C_R}b {C_L}ab a{C_R}b"),
+ (
+ "Alan John Percivale (A.j.p.) Taylor died",
+ "(",
+ f"Alan John Percivale {C_L}({C_R}A.j.p.) Taylor died",
+ ),
],
ids=[
"single_word",
@@ -151,6 +156,7 @@ def test_merge_probs_with_normalization():
"case_sensitive",
"only_word_boundary",
"non_overlapping_substrings",
+ "issue_403-escape_special_regex_characters",
],
)
def test_color_sentence(sentence, word, expected):
|
color_sentence fails in tutorial notebook
<!-- Briefly summarize the issue. -->
In the notebook, `display_issues` highlights all token issues with a call to `color_sentence`:
https://github.com/cleanlab/cleanlab/blob/1a239922fe195d2a6104d6dc3552d53da16380ce/docs/source/tutorials/token_classification.ipynb?short_path=2ebceca#L369-L379
One of the examples trips everything up with the following error:
```
missing ), unterminated subpattern at position 2
```
# Stack trace
From [failed CI job](https://github.com/cleanlab/cleanlab/actions/runs/2996555945):
<details><summary> Click to toggle stack trace</summary>
```bash
---------------------------------------------------------------------------
error Traceback (most recent call last)
Input In [12], in <module>
----> 1 display_issues(issues,given_words,pred_probs=pred_probs,given_labels=labels,
2 exclude=[(0,1),(1,0)],class_names=merged_entities)
File ~/work/cleanlab/cleanlab/cleanlab/token_classification/summary.py:81, in display_issues(issues, given_words, pred_probs, given_labels, exclude, class_names, top)
78 given = class_names[given]
80 shown += 1
---> 81 print("Sentence %d, token %d: \n%s" % (i, j, color_sentence(sentence,word)))
82 if given_labels and not pred_probs:
83 print("Given label: %s\n" % str(given))
File ~/work/cleanlab/cleanlab/cleanlab/internal/token_classification_utils.py:175, in color_sentence(sentence, word)
158 """
159 Searches for a given token in the sentence and returns the sentence where the given token is colored red
160
(...)
172
173 """
174 colored_word = colored(word, "red")
--> 175 colored_sentence, number_of_substitions = re.subn(
176 r"\b{}\b".format(word),colored_word,sentence
177 )
178 if number_of_substitions == 0:
179 # Use basic string manipulation if regex fails
180 colored_sentence = sentence.replace(word, colored_word)
File /opt/hostedtoolcache/Python/3.8.13/x64/lib/python3.8/re.py:221, in subn(pattern, repl, string, count, flags)
212 def subn(pattern, repl, string, count=0, flags=0):
213 """Return a 2-tuple containing (new_string, number).
214 new_string is the string obtained by replacing the leftmost
215 non-overlapping occurrences of the pattern in the source
(...)
219 If it is a callable, it's passed the Match object and must
220 return a replacement string to be used."""
--> 221 return _compile(pattern,flags).subn(repl, string, count)
File /opt/hostedtoolcache/Python/3.8.13/x64/lib/python3.8/re.py:304, in _compile(pattern, flags)
302 if not sre_compile.isstring(pattern):
303 raise TypeError("first argument must be string or compiled pattern")
--> 304 p = sre_compile.compile(pattern,flags)
305 if not (flags & DEBUG):
306 if len(_cache) >= _MAXCACHE:
307 # Drop the oldest item
File /opt/hostedtoolcache/Python/3.8.13/x64/lib/python3.8/sre_compile.py:764, in compile(p, flags)
762 if isstring(p):
763 pattern = p
--> 764 p = sre_parse.parse(p,flags)
765 else:
766 pattern = None
File /opt/hostedtoolcache/Python/3.8.13/x64/lib/python3.8/sre_parse.py:948, in parse(str, flags, state)
945 state.str = str
947 try:
--> 948 p = _parse_sub(source,state,flags&SRE_FLAG_VERBOSE,0)
949 except Verbose:
950 # the VERBOSE flag was switched on inside the pattern. to be
951 # on the safe side, we'll parse the whole thing again...
952 state = State()
File /opt/hostedtoolcache/Python/3.8.13/x64/lib/python3.8/sre_parse.py:443, in _parse_sub(source, state, verbose, nested)
441 start = source.tell()
442 while True:
--> 443 itemsappend(_parse(source,state,verbose,nested+1,
444 notnestedandnotitems))
445 if not sourcematch("|"):
446 break
File /opt/hostedtoolcache/Python/3.8.13/x64/lib/python3.8/sre_parse.py:836, in _parse(source, state, verbose, nested, first)
834 p = _parse_sub(source, state, sub_verbose, nested + 1)
835 if not source.match(")"):
--> 836 raise source.error("missing ), unterminated subpattern",
837 source.tell() - start)
838 if group is not None:
839 state.closegroup(group, p)
error: missing ), unterminated subpattern at position 2
```
</details>
|
This is the example that returns an error in `color_sentence`.
```
# Sentence
1990 - British historian Alan John Percivale (A.j.p.) Taylor died.
# Token
(
```
Looks like `"("`(`word`) isn't escaped properly in the call to `re.subn`:
https://github.com/cleanlab/cleanlab/blob/1a239922fe195d2a6104d6dc3552d53da16380ce/cleanlab/internal/token_classification_utils.py#L175-L177
| 2022-09-06T11:06:44 |
cleanlab/cleanlab
| 412 |
cleanlab__cleanlab-412
|
[
"398"
] |
b9ab4ba57b6661a3e19543dae509d864ed76e422
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -39,6 +39,7 @@
"sphinx_multiversion",
"sphinx_copybutton",
"sphinxcontrib.katex",
+ "sphinx_autodoc_typehints",
]
numpy_show_class_members = True
@@ -78,7 +79,7 @@
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
-napoleon_use_rtype = True
+napoleon_use_rtype = False
napoleon_preprocess_types = True
napoleon_type_aliases = None
napoleon_attr_annotations = True
|
CI: Suppress complex mypy typing in docs.cleanlab.ai
If docs.cleanlab.ai shows the mypy type annotations, the current signatures look a bit intimidating, eg. for: `cleanlab.internal.validation`
https://docs.cleanlab.ai/master/cleanlab/internal/validation.html
Plan is to use https://github.com/tox-dev/sphinx-autodoc-typehints to auto-tag parameters in docstrings based on the function signatures.
Consider the docstring for `cleanlab.internal.validation.labels_to_array`:
https://docs.cleanlab.ai/master/cleanlab/internal/validation.html#cleanlab.internal.validation.labels_to_array
The docstring would not change as the tags have been hard-coded. But the signature at the top would read:
```
cleanlab.internal.validation.labels_to_array(y)
```
instead of
```
cleanlab.internal.validation.labels_to_array(y: Union[list, ndarray, Series, DataFrame, generic]) → ndarray
```
This plan requires ironing out details with unrolling/expanding custom types like `LabelLike`.
It will take some effort to review all hard-coded types/tags in docstrings, because they won't be replaced with the auto-tag.
|
Related to this, we might want to show `LabelLike` instead of expanding the type alias. See [this StackOverflow thread](https://stackoverflow.com/questions/60028577/keeping-alias-types-simple-in-python-documentation) for some approaches.
| 2022-09-08T13:57:08 |
|
cleanlab/cleanlab
| 423 |
cleanlab__cleanlab-423
|
[
"418"
] |
78e84963630013b5fd86c31a6f70a1c0a38f3a66
|
diff --git a/cleanlab/token_classification/summary.py b/cleanlab/token_classification/summary.py
--- a/cleanlab/token_classification/summary.py
+++ b/cleanlab/token_classification/summary.py
@@ -9,7 +9,7 @@ def display_issues(
given_words: List[List[str]],
*,
pred_probs: Optional[list] = None,
- given_labels: Optional[list] = None,
+ labels: Optional[list] = None,
exclude: List[Tuple[int, int]] = [],
class_names: Optional[List[str]] = None,
top: int = 20
@@ -33,8 +33,8 @@ def display_issues(
sentence, and `K` is the number of classes predicted by the model. If provided, also displays the predicted
label of the token.
- given_labels:
- list of given labels, such that `given_labels[i]` is a list containing the given labels of the tokens in the
+ labels:
+ list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the
i'th sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also
displays the given label of the token.
@@ -66,24 +66,24 @@ class 0 and 1 are not displayed.
if pred_probs:
prediction = pred_probs[i][j].argmax()
- if given_labels:
- given = given_labels[i][j]
- if pred_probs and given_labels:
+ if labels:
+ given = labels[i][j]
+ if pred_probs and labels:
if (given, prediction) in exclude:
continue
if pred_probs and class_names:
prediction = class_names[prediction]
- if given_labels and class_names:
+ if labels and class_names:
given = class_names[given]
shown += 1
print("Sentence %d, token %d: \n%s" % (i, j, color_sentence(sentence, word)))
- if given_labels and not pred_probs:
+ if labels and not pred_probs:
print("Given label: %s\n" % str(given))
- elif not given_labels and pred_probs:
+ elif not labels and pred_probs:
print("Predicted label according to provided pred_probs: %s\n" % str(prediction))
- elif given_labels and pred_probs:
+ elif labels and pred_probs:
print(
"Given label: %s, predicted label according to provided pred_probs: %s\n"
% (str(given), str(prediction))
|
diff --git a/tests/test_token_classification.py b/tests/test_token_classification.py
--- a/tests/test_token_classification.py
+++ b/tests/test_token_classification.py
@@ -231,18 +231,16 @@ def test_issues_from_scores(label_quality_scores):
def test_display_issues():
display_issues(issues, words)
- display_issues(issues, words, given_labels=labels)
+ display_issues(issues, words, labels=labels)
display_issues(issues, words, pred_probs=pred_probs)
- display_issues(issues, words, pred_probs=pred_probs, given_labels=labels)
- display_issues(
- issues, words, pred_probs=pred_probs, given_labels=labels, class_names=class_names
- )
+ display_issues(issues, words, pred_probs=pred_probs, labels=labels)
+ display_issues(issues, words, pred_probs=pred_probs, labels=labels, class_names=class_names)
exclude = [(1, 2)] # Occurs in first token of second sentence "#I"
- display_issues(issues, words, pred_probs=pred_probs, given_labels=labels, exclude=exclude)
+ display_issues(issues, words, pred_probs=pred_probs, labels=labels, exclude=exclude)
top = 1
- display_issues(issues, words, pred_probs=pred_probs, given_labels=labels, top=top)
+ display_issues(issues, words, pred_probs=pred_probs, labels=labels, top=top)
issues_sentence_only = [i for i, _ in issues]
display_issues(issues_sentence_only, words)
|
Token Classification: given_labels -> labels
Rename argument `given_labels` to just `labels` throughout the module to be more consistent with rest of package.
Example: https://github.com/cleanlab/cleanlab/blob/master/cleanlab/token_classification/summary.py#L33
Need to correspondingly update tutorial notebook as well, possibly also example notebook but I don't think so.
| 2022-09-09T16:37:29 |
|
cleanlab/cleanlab
| 477 |
cleanlab__cleanlab-477
|
[
"474"
] |
fff06759b7741cc1abf5827a347b1032c7a24d22
|
diff --git a/cleanlab/token_classification/rank.py b/cleanlab/token_classification/rank.py
--- a/cleanlab/token_classification/rank.py
+++ b/cleanlab/token_classification/rank.py
@@ -33,7 +33,7 @@ def get_label_quality_scores(
token_score_method: str = "self_confidence",
sentence_score_method: str = "min",
sentence_score_kwargs: dict = {},
-) -> Union[np.ndarray, Tuple[np.ndarray, list]]:
+) -> Tuple[np.ndarray, list]:
"""
Returns overall quality scores for the labels in each sentence, as well as for the individual tokens' labels in a token classification dataset.
|
fix return type of get_label_quality_scores in token classification
The function only returns a `Tuple[np.ndarray, list]`, but it is annotated with:
https://github.com/cleanlab/cleanlab/blob/fad4eb266dee8b9e2925d3f0d74fe4a81939eb8a/cleanlab/token_classification/rank.py#L36
| 2022-09-20T05:35:00 |
||
cleanlab/cleanlab
| 514 |
cleanlab__cleanlab-514
|
[
"513"
] |
b12d76b6d1dd61e0c536bf5b130304278577fdd7
|
diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py
--- a/cleanlab/internal/token_classification_utils.py
+++ b/cleanlab/internal/token_classification_utils.py
@@ -249,10 +249,35 @@ def color_sentence(sentence: str, word: str) -> str:
'This is a \x1b[31msentence\x1b[0m. This is another \x1b[31msentence\x1b[0m.'
"""
colored_word = colored(word, "red")
- colored_sentence, number_of_substitions = re.subn(
- r"\b{}\b".format(re.escape(word)), colored_word, sentence
+ return _replace_sentence(sentence=sentence, word=word, new_word=colored_word)
+
+
+def _replace_sentence(sentence: str, word: str, new_word: str) -> str:
+ """
+ Searches for a given token in the sentence and returns the sentence where the given token has been replaced by
+ `new_word`.
+
+ Parameters
+ ----------
+ sentence:
+ a sentence where the word is searched
+
+ word:
+ keyword to find in `sentence`. Assumes the word exists in the sentence.
+
+ new_word:
+ the word to replace the keyword with
+
+ Returns
+ ---------
+ new_sentence:
+ `sentence` where the every occurrence of the word is replaced by `colored_word`
+ """
+
+ new_sentence, number_of_substitions = re.subn(
+ r"\b{}\b".format(re.escape(word)), new_word, sentence
)
if number_of_substitions == 0:
# Use basic string manipulation if regex fails
- colored_sentence = sentence.replace(word, colored_word)
- return colored_sentence
+ new_sentence = sentence.replace(word, new_word)
+ return new_sentence
|
diff --git a/tests/test_token_classification.py b/tests/test_token_classification.py
--- a/tests/test_token_classification.py
+++ b/tests/test_token_classification.py
@@ -5,6 +5,7 @@
mapping,
merge_probs,
color_sentence,
+ _replace_sentence,
)
from cleanlab.token_classification.filter import find_label_issues
from cleanlab.token_classification.rank import (
@@ -134,6 +135,7 @@ def test_merge_probs_with_normalization():
"sentence,word,expected",
[
("Hello World", "World", f"Hello {C_L}World{C_R}"),
+ ("Hello World", "help", "Hello World"),
("If you and I were to meet", "I", f"If you and {C_L}I{C_R} were to meet"),
("If you and I were to meet", "If you and I", f"{C_L}If you and I{C_R} were to meet"),
("If you and I were to meet", "If you and I w", f"{C_L}If you and I w{C_R}ere to meet"),
@@ -149,6 +151,7 @@ def test_merge_probs_with_normalization():
],
ids=[
"single_word",
+ "no_match",
"ignore_subwords",
"multi-token_match",
"substring_replacement",
@@ -159,11 +162,49 @@ def test_merge_probs_with_normalization():
"issue_403-escape_special_regex_characters",
],
)
-def test_color_sentence(sentence, word, expected):
+def test_color_sentence(monkeypatch: pytest.MonkeyPatch, sentence, word, expected):
+ monkeypatch.setattr("sys.stdout.isatty", lambda: True)
+
colored = color_sentence(sentence, word)
assert colored == expected
[email protected](
+ "sentence,word,expected",
+ [
+ ("Hello World", "World", "Hello [EXPECTED]"),
+ ("Hello World", "help", "Hello World"),
+ ("If you and I were to meet", "I", "If you and [EXPECTED] were to meet"),
+ ("If you and I were to meet", "If you and I", "[EXPECTED] were to meet"),
+ ("If you and I were to meet", "If you and I w", "[EXPECTED]ere to meet"),
+ ("I think I know this", "I", "[EXPECTED] think [EXPECTED] know this"),
+ ("A good reason for a test", "a", "A good reason for [EXPECTED] test"),
+ ("ab ab a b ab", "ab a", "ab [EXPECTED] b ab"),
+ ("ab ab ab ab", "ab a", "[EXPECTED]b [EXPECTED]b"),
+ (
+ "Alan John Percivale (A.j.p.) Taylor died",
+ "(",
+ "Alan John Percivale [EXPECTED]A.j.p.) Taylor died",
+ ),
+ ],
+ ids=[
+ "single_word",
+ "no_match",
+ "ignore_subwords",
+ "multi-token_match",
+ "substring_replacement",
+ "multiple_matches",
+ "case_sensitive",
+ "only_word_boundary",
+ "non_overlapping_substrings",
+ "issue_403-escape_special_regex_characters",
+ ],
+)
+def test_replace_sentence(sentence, word, expected):
+ new_sentence = _replace_sentence(sentence, word, "[EXPECTED]")
+ assert new_sentence == expected
+
+
issues = find_label_issues(labels, pred_probs)
|
Ensure unit tests work with termcolor v2.1.0
tests/test_token_classification.py currently fails after termcolor just upgraded to v2.1.0, specifically `test_color_sentence`
- [ ] update unit test code to make them pass with termcolor v2.1.0
- [ ] ensure new unit test code also works with older versions of termcolor pre v2.1.0 (suboptimal but ok if the unit test only works with versions post v2.0.0, as long as the package works with all termcolor versions currently supported).
- [ ] remove version upper bound on termcolor if it has been added to package in the meantime.
https://pypi.org/project/termcolor/
https://github.com/termcolor/termcolor/pull/25/files
https://github.com/cleanlab/cleanlab/actions/runs/3357515340/jobs/5563372689
| 2022-10-31T11:02:20 |
|
cleanlab/cleanlab
| 563 |
cleanlab__cleanlab-563
|
[
"556"
] |
0e4e38a9ef49d8dfcbc60a6f64419f0c1d78bc6b
|
diff --git a/cleanlab/internal/multilabel_scorer.py b/cleanlab/internal/multilabel_scorer.py
--- a/cleanlab/internal/multilabel_scorer.py
+++ b/cleanlab/internal/multilabel_scorer.py
@@ -165,11 +165,11 @@ def exponential_moving_average(
.. math::
- \\text{EMA}_t = \\alpha \cdot s_t + (1 - \\alpha) \cdot \\text{EMA}_{t-1}, \\qquad 0 \\leq \\alpha \\leq 1
+ \text{EMA}_t = \alpha \cdot s_t + (1 - \alpha) \cdot \text{EMA}_{t-1}, \qquad 0 \leq \alpha \leq 1
- We set :math:`\\text{EMA}_1 = s_1` as the largest score in the sorted vector s.
+ We set :math:`\text{EMA}_1 = s_1` as the largest score in the sorted vector s.
- :math:`\\alpha` is the "forgetting factor" that gives more weight to the
+ :math:`\alpha` is the "forgetting factor" that gives more weight to the
most recent scores, and successively less weight to the previous scores.
Parameters
|
Docs: Fix math in EMA docstring
I may have broken the math display format in EMA label-score docstring by adding extra slashes, but the docs build was also broken the way it was before with a single slash (there was some complaint about escaping this character that I don't precisely recall).

After this issue is fixed, we will want to replace the relevant section of the HTML file of the v2.2 docs in cleanlab-docs repo to update the stable documentation as well. I would be very cautious about replacing the entire HTML file and rather manually replace just the relevant section of this file.
## Code
https://github.com/cleanlab/cleanlab/blob/a14d810b9bf9f4f0ed7aae38a397b57e630f73be/cleanlab/internal/multilabel_scorer.py#L168
| 2022-12-12T14:04:19 |
||
cleanlab/cleanlab
| 760 |
cleanlab__cleanlab-760
|
[
"735"
] |
afdb667501381ebc3c3977a4cbcfc9adcd556dea
|
diff --git a/cleanlab/datalab/issue_finder.py b/cleanlab/datalab/issue_finder.py
--- a/cleanlab/datalab/issue_finder.py
+++ b/cleanlab/datalab/issue_finder.py
@@ -209,13 +209,16 @@ def _resolve_required_args(self, pred_probs, features, knn_graph):
features :
Name of column containing precomputed embeddings.
+ knn_graph :
+ Sparse matrix representing distances between examples in the dataset in a k nearest neighbor graph.
+
Returns
-------
args_dict :
Dictionary of required arguments for each issue type, if available.
"""
args_dict = {
- "label": {"pred_probs": pred_probs},
+ "label": {"pred_probs": pred_probs, "features": features},
"outlier": {"pred_probs": pred_probs, "features": features, "knn_graph": knn_graph},
"near_duplicate": {"features": features, "knn_graph": knn_graph},
"non_iid": {"features": features, "knn_graph": knn_graph},
diff --git a/cleanlab/datalab/issue_manager/label.py b/cleanlab/datalab/issue_manager/label.py
--- a/cleanlab/datalab/issue_manager/label.py
+++ b/cleanlab/datalab/issue_manager/label.py
@@ -17,6 +17,9 @@
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional
+from sklearn.neighbors import KNeighborsClassifier
+from sklearn.preprocessing import OneHotEncoder
+
import numpy as np
from cleanlab.classification import CleanLearning
@@ -25,7 +28,7 @@
if TYPE_CHECKING: # pragma: no cover
import pandas as pd
-
+ import numpy.typing as npt
from cleanlab.datalab.datalab import Datalab
@@ -37,6 +40,10 @@ class LabelIssueManager(IssueManager):
datalab :
A Datalab instance.
+ k :
+ The number of nearest neighbors to consider when computing pred_probs from features.
+ Only applicable if features are provided and pred_probs are not.
+
clean_learning_kwargs :
Keyword arguments to pass to the :py:meth:`CleanLearning <cleanlab.classification.CleanLearning>` constructor.
@@ -61,12 +68,14 @@ class LabelIssueManager(IssueManager):
def __init__(
self,
datalab: Datalab,
+ k: int = 10,
clean_learning_kwargs: Optional[Dict[str, Any]] = None,
health_summary_parameters: Optional[Dict[str, Any]] = None,
**_,
):
super().__init__(datalab)
self.cl = CleanLearning(**(clean_learning_kwargs or {}))
+ self.k = k
self.health_summary_parameters: Dict[str, Any] = (
health_summary_parameters.copy() if health_summary_parameters else {}
)
@@ -118,9 +127,39 @@ def _reset(self) -> None:
def find_issues(
self,
- pred_probs: np.ndarray,
+ pred_probs: Optional[npt.NDArray] = None,
+ features: Optional[npt.NDArray] = None,
**kwargs,
) -> None:
+ """Find label issues in the datalab.
+
+ Parameters
+ ----------
+ pred_probs :
+ The predicted probabilities for each example.
+
+ features :
+ The features for each example.
+ """
+ if pred_probs is None:
+ if features is None:
+ raise ValueError(
+ "Either pred_probs or features must be provided to find label issues."
+ )
+ # produce out-of-sample pred_probs from features
+ knn = KNeighborsClassifier(n_neighbors=self.k + 1)
+ knn.fit(features, self.datalab.labels)
+ pred_probs = knn.predict_proba(features)
+
+ encoder = OneHotEncoder()
+ label_transform = self.datalab.labels.reshape(-1, 1)
+ one_hot_label = encoder.fit_transform(label_transform)
+
+ # adjust pred_probs so it is out-of-sample
+ pred_probs = np.asarray(
+ (pred_probs - 1 / (self.k + 1) * one_hot_label) * (self.k + 1) / self.k
+ )
+
self.health_summary_parameters.update({"pred_probs": pred_probs})
# Find examples with label issues
self.issues = self.cl.find_label_issues(
|
diff --git a/tests/datalab/test_datalab.py b/tests/datalab/test_datalab.py
--- a/tests/datalab/test_datalab.py
+++ b/tests/datalab/test_datalab.py
@@ -809,8 +809,8 @@ def test_incremental_search(self, pred_probs, random_embeddings):
lab = Datalab(data=data, label_name="labels")
lab.find_issues(features=random_embeddings)
summary = lab.get_issue_summary()
- assert len(summary) == 3
- assert "label" not in summary["issue_type"].values
+ assert len(summary) == 4
+ assert "label" in summary["issue_type"].values
lab.find_issues(pred_probs=pred_probs, issue_types={"label": {}})
summary = lab.get_issue_summary()
assert len(summary) == 4
@@ -818,6 +818,38 @@ def test_incremental_search(self, pred_probs, random_embeddings):
label_summary = lab.get_issue_summary("label")
assert label_summary["num_issues"].values[0] > 0
+ def test_build_pred_probs_from_features(self, random_embeddings):
+ data = {"labels": np.random.randint(0, 2, 100)}
+ lab = Datalab(data=data, label_name="labels")
+ lab.find_issues(features=random_embeddings, issue_types={"label": {}})
+ summary = lab.get_issue_summary()
+ assert len(summary) == 1
+ assert "label" in summary["issue_type"].values
+ lab.find_issues(features=random_embeddings, issue_types={"label": {"k": 5}})
+ summary = lab.get_issue_summary()
+ assert len(summary) == 1
+ assert "label" in summary["issue_type"].values
+
+ def test_pred_probs_precedence(self, pred_probs, random_embeddings):
+ data = {"labels": np.random.randint(0, 2, 100)}
+ lab = Datalab(data=data, label_name="labels")
+ lab.find_issues(pred_probs=pred_probs, issue_types={"label": {}})
+ summary = lab.get_issue_summary()
+ assert "label" in summary["issue_type"].values
+ label_summary_pred_probs = lab.get_issue_summary("label")
+ assert label_summary_pred_probs["num_issues"].values[0] > 0
+ lab = Datalab(data=data, label_name="labels")
+ lab.find_issues(
+ features=random_embeddings, pred_probs=pred_probs, issue_types={"label": {}}
+ )
+ summary = lab.get_issue_summary()
+ assert "label" in summary["issue_type"].values
+ label_summary_both = lab.get_issue_summary("label")
+ assert (
+ label_summary_both["num_issues"].values[0]
+ == label_summary_pred_probs["num_issues"].values[0]
+ )
+
class TestDatalabFindOutlierIssues:
@pytest.fixture
@@ -930,5 +962,6 @@ def test_find_issues_features_works_with_and_without_labels(self, features, labe
issues_with_labels = lab_with_labels.issues
issues_without_label_name = lab_without_label_name.issues
- pd.testing.assert_frame_equal(issues_without_labels, issues_with_labels)
+ # issues_with_labels should have two additional columns about label issues
+ assert len(issues_without_labels.columns) + 2 == len(issues_with_labels.columns)
pd.testing.assert_frame_equal(issues_without_labels, issues_without_label_name)
|
Extend label issue detection in Datalab to work even without pred_probs input
Goal: extend the label issue check in Datalab to work even if user only provided: `features`, `labels` to `Datalab.find_issues()`.
There are multiple ways this can be achieved:
Option 1 (easiest): Use sklearn `KNNclassifier` (or `LogisticRegression`) applied to `X=features, y=labels` in order to produce out-of-sample `pred_probs` and then continue as usual.
Option 2: Use methods from other papers like these (requires benchmarking them first):
- [SelfClean: A Self-Supervised Data Cleaning Strategy](https://arxiv.org/abs/2305.17048)
- [Detecting Corrupted Labels Without Training a Model to Predict](https://arxiv.org/abs/2110.06283)
| 2023-07-04T02:43:48 |
|
cleanlab/cleanlab
| 856 |
cleanlab__cleanlab-856
|
[
"810"
] |
8b8bf780aa219bd358c453b8429b59eb03df0c6e
|
diff --git a/cleanlab/datalab/internal/issue_manager/null.py b/cleanlab/datalab/internal/issue_manager/null.py
new file mode 100644
--- /dev/null
+++ b/cleanlab/datalab/internal/issue_manager/null.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+from collections import Counter
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, List
+
+import numpy as np
+import pandas as pd
+from numpy import ndarray
+
+from cleanlab.datalab.internal.issue_manager import IssueManager
+
+if TYPE_CHECKING: # pragma: no cover
+ import numpy.typing as npt
+
+
+class NullIssueManager(IssueManager):
+ """Manages issues related to null/missing values in the rows of features.
+
+ Parameters
+ ----------
+ datalab :
+ The Datalab instance that this issue manager searches for issues in.
+ """
+
+ description: ClassVar[
+ str
+ ] = """Whether the dataset has any missing/null values
+ """
+ issue_name: ClassVar[str] = "null"
+ verbosity_levels = {
+ 0: ["average_null_score"],
+ 1: ["most_common_issue"],
+ 2: [],
+ }
+
+ @staticmethod
+ def _calculate_null_issues(features: npt.NDArray) -> tuple[ndarray, ndarray, Any]:
+ """Tracks the number of null values in each row of a feature array,
+ computes quality scores based on the fraction of null values in each row,
+ and returns a boolean array indicating whether each row only has null values."""
+ rows = features.shape[0]
+ cols = features.shape[1]
+ scores = np.ones(rows).astype(np.float32)
+ is_null_issue = np.full(rows, False)
+ null_tracker = np.isnan(features)
+ if null_tracker.any():
+ for row in range(rows):
+ if null_tracker[row].any():
+ non_null_col_count = np.count_nonzero(~null_tracker[row])
+ scores[row] = non_null_col_count / cols
+ if scores[row] == 0.00:
+ is_null_issue[row] = True
+ return is_null_issue, scores, null_tracker
+
+ def find_issues(
+ self,
+ features: Optional[npt.NDArray] = None,
+ **kwargs,
+ ) -> None:
+ if features is None:
+ raise ValueError("features must be provided to check for null values.")
+ is_null_issue, scores, null_tracker = self._calculate_null_issues(features=features)
+
+ self.issues = pd.DataFrame(
+ {
+ f"is_{self.issue_name}_issue": is_null_issue,
+ self.issue_score_key: scores,
+ },
+ )
+
+ self.summary = self.make_summary(score=scores.mean())
+ self.info = self.collect_info(null_tracker)
+
+ @staticmethod
+ def _most_common_issue(
+ null_tracker: np.ndarray,
+ ) -> dict[str, dict[str, str | int | list[int] | list[int | None]]]:
+ """
+ Identify and return the most common null value pattern across all rows
+ and count the number of rows with this pattern.
+
+ Parameters
+ ------------
+ null_tracker : np.ndarray
+ A boolean array of the same shape as features, where True indicates null/missing entries.
+
+ Returns
+ --------
+ Dict[str, Any]
+ A dictionary containing the most common issue pattern and the count of rows with this pattern.
+ """
+ # Convert the boolean null_tracker matrix into a list of strings.
+ most_frequent_pattern = "no_null"
+ rows_affected: List[int] = []
+ occurrence_of_most_frequent_pattern = 0
+ if null_tracker.any():
+ null_patterns_as_strings = [
+ "".join(map(str, row.astype(int).tolist())) for row in null_tracker if row.any()
+ ]
+
+ # Use Counter to efficiently count occurrences and find the most common pattern.
+ pattern_counter = Counter(null_patterns_as_strings)
+ (
+ most_frequent_pattern,
+ occurrence_of_most_frequent_pattern,
+ ) = pattern_counter.most_common(1)[0]
+ rows_affected = []
+ for idx, row in enumerate(null_patterns_as_strings):
+ if row == most_frequent_pattern:
+ rows_affected.append(idx)
+ return {
+ "most_common_issue": {
+ "pattern": most_frequent_pattern,
+ "rows_affected": rows_affected,
+ "count": occurrence_of_most_frequent_pattern,
+ }
+ }
+
+ @staticmethod
+ def _column_impact(null_tracker: np.ndarray) -> Dict[str, List[float]]:
+ """
+ Calculate and return the impact of null values per column, represented as the proportion
+ of rows having null values in each column.
+
+ Parameters
+ ----------
+ null_tracker : np.ndarray
+ A boolean array of the same shape as features, where True indicates null/missing entries.
+
+ Returns
+ -------
+ Dict[str, List[float]]
+ A dictionary containing the impact per column, with values being a list
+ where each element is the percentage of rows having null values in the corresponding column.
+ """
+ # Calculate proportion of nulls in each column
+ proportion_of_nulls_per_column = null_tracker.mean(axis=0)
+
+ # Return result as a dictionary containing a list of proportions
+ return {"column_impact": proportion_of_nulls_per_column.tolist()}
+
+ def collect_info(self, null_tracker: np.ndarray) -> dict:
+ most_common_issue = self._most_common_issue(null_tracker=null_tracker)
+ column_impact = self._column_impact(null_tracker=null_tracker)
+ average_null_score = {"average_null_score": self.issues[self.issue_score_key].mean()}
+ issues_dict = {**average_null_score, **most_common_issue, **column_impact}
+ info_dict: Dict[str, Any] = {**issues_dict}
+ return info_dict
|
diff --git a/tests/datalab/issue_manager/test_null.py b/tests/datalab/issue_manager/test_null.py
new file mode 100644
--- /dev/null
+++ b/tests/datalab/issue_manager/test_null.py
@@ -0,0 +1,178 @@
+import numpy as np
+import pytest
+from hypothesis.extra.numpy import arrays, array_shapes
+from hypothesis.strategies import floats, just
+from hypothesis import HealthCheck, given, settings
+
+from cleanlab.datalab.internal.issue_manager.null import NullIssueManager
+
+SEED = 42
+
+
+class TestNullIssueManager:
+ @pytest.fixture
+ def embeddings(self):
+ np.random.seed(SEED)
+ embeddings_array = np.random.random((4, 3))
+ return embeddings_array
+
+ @pytest.fixture
+ def embeddings_with_null(self):
+ np.random.seed(SEED)
+ embeddings_array = np.random.random((4, 3))
+ embeddings_array[0][0] = np.NaN
+ embeddings_array[1] = np.NaN
+ return embeddings_array
+
+ @pytest.fixture
+ def issue_manager(self, lab):
+ return NullIssueManager(datalab=lab)
+
+ def test_init(self, lab, issue_manager):
+ assert issue_manager.datalab == lab
+
+ def test_find_issues(self, issue_manager, embeddings):
+ np.random.seed(SEED)
+ issue_manager.find_issues(features=embeddings)
+ issues_sort, summary_sort, info_sort = (
+ issue_manager.issues,
+ issue_manager.summary,
+ issue_manager.info,
+ )
+ expected_sorted_issue_mask = np.array([False, False, False, False])
+ assert np.all(
+ issues_sort["is_null_issue"] == expected_sorted_issue_mask
+ ), "Issue mask should be correct"
+ assert summary_sort["issue_type"][0] == "null"
+ assert summary_sort["score"][0] == pytest.approx(expected=1.0, abs=1e-7)
+ assert (
+ info_sort.get("average_null_score", None) is not None
+ ), "Should have average null score"
+ assert summary_sort["score"][0] == pytest.approx(
+ expected=info_sort["average_null_score"], abs=1e-7
+ )
+
+ def test_find_issues_with_null(self, issue_manager, embeddings_with_null):
+ np.random.seed(SEED)
+ issue_manager.find_issues(features=embeddings_with_null)
+ issues_sort, summary_sort, info_sort = (
+ issue_manager.issues,
+ issue_manager.summary,
+ issue_manager.info,
+ )
+ expected_sorted_issue_mask = np.array([False, True, False, False])
+ assert np.all(
+ issues_sort["is_null_issue"] == expected_sorted_issue_mask
+ ), "Issue mask should be correct"
+ assert summary_sort["issue_type"][0] == "null"
+ assert summary_sort["score"][0] == pytest.approx(expected=8 / 12, abs=1e-7)
+ assert (
+ info_sort.get("average_null_score", None) is not None
+ ), "Should have average null score"
+ assert summary_sort["score"][0] == pytest.approx(
+ expected=info_sort["average_null_score"], abs=1e-7
+ )
+
+ def test_report(self, issue_manager, embeddings):
+ np.random.seed(SEED)
+ issue_manager.find_issues(features=embeddings)
+ report = issue_manager.report(
+ issues=issue_manager.issues,
+ summary=issue_manager.summary,
+ info=issue_manager.info,
+ )
+
+ assert isinstance(report, str)
+ assert (
+ "----------------------- null issues ------------------------\n\n"
+ "Number of examples with this issue:"
+ ) in report
+
+ report = issue_manager.report(
+ issues=issue_manager.issues,
+ summary=issue_manager.summary,
+ info=issue_manager.info,
+ verbosity=3,
+ )
+ assert "Additional Information: " in report
+
+ def test_report_with_null(self, issue_manager, embeddings_with_null):
+ np.random.seed(SEED)
+ issue_manager.find_issues(features=embeddings_with_null)
+ report = issue_manager.report(
+ issues=issue_manager.issues,
+ summary=issue_manager.summary,
+ info=issue_manager.info,
+ )
+
+ assert isinstance(report, str)
+ assert (
+ "----------------------- null issues ------------------------\n\n"
+ "Number of examples with this issue:"
+ ) in report
+
+ report = issue_manager.report(
+ issues=issue_manager.issues,
+ summary=issue_manager.summary,
+ info=issue_manager.info,
+ verbosity=3,
+ )
+ assert "Additional Information: " in report
+
+ def test_collect_info(self, issue_manager, embeddings):
+ """Test some values in the info dict."""
+ issue_manager.find_issues(features=embeddings)
+ info = issue_manager.info
+ assert info["average_null_score"] == 1.0
+ assert info["most_common_issue"]["pattern"] == "no_null"
+ assert info["most_common_issue"]["count"] == 0
+ assert info["most_common_issue"]["rows_affected"] == []
+ assert info["column_impact"] == [0, 0, 0]
+
+ def test_collect_info_with_nulls(self, issue_manager, embeddings_with_null):
+ """Test some values in the info dict."""
+ issue_manager.find_issues(features=embeddings_with_null)
+ info = issue_manager.info
+ assert info["average_null_score"] == pytest.approx(expected=8 / 12, abs=1e-7)
+ assert info["most_common_issue"]["pattern"] == "100"
+ assert info["most_common_issue"]["count"] == 1
+ assert info["most_common_issue"]["rows_affected"] == [0]
+ assert info["column_impact"] == [0.5, 0.25, 0.25]
+
+ # Strategy for generating NaN values
+ nan_strategy = just(np.nan)
+
+ # Strategy for generating regular float values, including NaNs
+ float_with_nan = floats(allow_nan=True)
+
+ # Strategy for generating NumPy arrays with some NaN values
+ features_with_nan_strategy = arrays(
+ dtype=np.float64,
+ shape=array_shapes(min_dims=2, max_dims=2, min_side=1, max_side=5),
+ elements=float_with_nan,
+ fill=nan_strategy,
+ )
+
+ @settings(
+ suppress_health_check=[HealthCheck.function_scoped_fixture]
+ ) # No need to reset state of issue_manager fixture
+ @given(embeddings=features_with_nan_strategy)
+ def test_quality_scores_and_full_null_row_identification(self, issue_manager, embeddings):
+ # Run the find_issues method
+ issue_manager.find_issues(features=embeddings)
+ issues_sort, _, _ = (
+ issue_manager.issues,
+ issue_manager.summary,
+ issue_manager.info,
+ )
+
+ # Check for the two main properties:
+
+ # 1. The quality score for each row should be the fraction of features which are not null in that row.
+ non_null_fractions = [np.count_nonzero(~np.isnan(row)) / len(row) for row in embeddings]
+ scores = issues_sort[issue_manager.issue_score_key]
+ assert np.allclose(scores, non_null_fractions, atol=1e-7)
+
+ # 2. The rows that are marked as is_null_issue should ONLY be those rows which are 100% null values.
+ all_rows_are_null = np.all(np.isnan(embeddings), axis=1)
+ assert np.all(issues_sort["is_null_issue"] == all_rows_are_null)
|
Datalab issue type for null/missing feature values
[New Datalab issue type](https://docs.cleanlab.ai/master/cleanlab/datalab/guide/custom_issue_manager.html) called something like `null` that checks `features` for rows that are entirely missing / null values (across all columns).
Those rows should get flagged as `is_null_issue`.
The quality score for each row can be the fraction of `features` which are missing in that row.
Make sure this issue check does not waste compute time if it is irrelevant, ie. first check that there even exist any missing values in the `features` at all before proceeding further.
|
@jwmueller can you assign this issue to me?
| 2023-10-03T18:32:26 |
cleanlab/cleanlab
| 857 |
cleanlab__cleanlab-857
|
[
"808"
] |
8b8bf780aa219bd358c453b8429b59eb03df0c6e
|
diff --git a/cleanlab/datalab/internal/issue_finder.py b/cleanlab/datalab/internal/issue_finder.py
--- a/cleanlab/datalab/internal/issue_finder.py
+++ b/cleanlab/datalab/internal/issue_finder.py
@@ -199,7 +199,7 @@ def _resolve_required_args(self, pred_probs, features, knn_graph):
"label": {"pred_probs": pred_probs, "features": features},
"outlier": {"pred_probs": pred_probs, "features": features, "knn_graph": knn_graph},
"near_duplicate": {"features": features, "knn_graph": knn_graph},
- "non_iid": {"features": features, "knn_graph": knn_graph},
+ "non_iid": {"pred_probs": pred_probs, "features": features, "knn_graph": knn_graph},
}
args_dict = {
diff --git a/cleanlab/datalab/internal/issue_manager/noniid.py b/cleanlab/datalab/internal/issue_manager/noniid.py
--- a/cleanlab/datalab/internal/issue_manager/noniid.py
+++ b/cleanlab/datalab/internal/issue_manager/noniid.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union, cast
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union, cast, Tuple
import warnings
import itertools
@@ -124,38 +124,103 @@ def __init__(
self.seed = seed
self.significance_threshold = significance_threshold
- def find_issues(self, features: Optional[npt.NDArray] = None, **kwargs) -> None:
+ # TODO: Temporary flag introduced to decide on storing knn graphs based on pred_probs.
+ # Revisit and finalize the implementation.
+ self._skip_storing_knn_graph_for_pred_probs: bool = False
+
+ @staticmethod
+ def _determine_features(
+ features: Optional[npt.NDArray],
+ pred_probs: Optional[np.ndarray],
+ ) -> npt.NDArray:
+ """
+ Determines the feature array to be used for the non-IID check. Prioritizing the original features array over pred_probs.
+
+ Parameters
+ ----------
+ features :
+ Original feature array or None.
+
+ pred_probs :
+ Predicted probabilities array or None.
+
+ Returns
+ -------
+ features_to_use :
+ Either the original feature array or the predicted probabilities array,
+ intended to be used for the non-IID check.
+
+ Raises
+ ------
+ ValueError :
+ If both `features` and `pred_probs` are None.
+ """
+ if features is not None:
+ return features
+
+ if pred_probs is not None:
+ return pred_probs
+
+ raise ValueError(
+ "If a knn_graph is not provided, either 'features' or 'pred_probs' must be provided to fit a new knn."
+ )
+
+ def _select_features_and_setup_knn(
+ self,
+ features: Optional[npt.NDArray],
+ pred_probs: Optional[np.ndarray],
+ knn_graph: Optional[csr_matrix],
+ metric_changes: bool,
+ ) -> Tuple[Optional[NearestNeighbors], npt.NDArray]:
+ """
+ Selects features (or pred_probs if features are None) and sets up a NearestNeighbors object if needed.
+
+ # Add type-hints and document the arguments.
+ """
+ if features is None and pred_probs is not None:
+ self._skip_storing_knn_graph_for_pred_probs = True
+ features_to_use = self._determine_features(features, pred_probs)
+
+ if self.metric is None:
+ self.metric = "cosine" if features_to_use.shape[1] > 3 else "euclidean"
+
+ if knn_graph is not None and not metric_changes:
+ return None, features_to_use
+
+ knn = NearestNeighbors(n_neighbors=self.k, metric=self.metric)
+
+ if self.metric != knn.metric:
+ warnings.warn(
+ f"Metric {self.metric} does not match metric {knn.metric} used to fit knn. "
+ "Most likely an existing NearestNeighbors object was passed in, but a different "
+ "metric was specified."
+ )
+ self.metric = knn.metric
+
+ try:
+ check_is_fitted(knn)
+ except NotFittedError:
+ knn.fit(features_to_use)
+
+ return knn, features_to_use
+
+ def find_issues(
+ self,
+ features: Optional[npt.NDArray] = None,
+ pred_probs: Optional[np.ndarray] = None,
+ **kwargs,
+ ) -> None:
knn_graph = self._process_knn_graph_from_inputs(kwargs)
old_knn_metric = self.datalab.get_info("statistics").get("knn_metric")
metric_changes = self.metric and self.metric != old_knn_metric
-
- knn = None # Won't be used if knn_graph is not None
+ knn, features_used = self._select_features_and_setup_knn(
+ features, pred_probs, knn_graph, metric_changes
+ )
if knn_graph is None or metric_changes:
- if features is None:
- raise ValueError(
- "If a knn_graph is not provided, features must be provided to fit a new knn."
- )
-
- if self.metric is None:
- self.metric = "cosine" if features.shape[1] > 3 else "euclidean"
- knn = NearestNeighbors(n_neighbors=self.k, metric=self.metric)
-
- if self.metric and self.metric != knn.metric:
- warnings.warn(
- f"Metric {self.metric} does not match metric {knn.metric} used to fit knn. "
- "Most likely an existing NearestNeighbors object was passed in, but a different "
- "metric was specified."
- )
- self.metric = knn.metric
-
- try:
- check_is_fitted(knn)
- except NotFittedError:
- knn.fit(features)
-
self.neighbor_index_choices = self._get_neighbors(knn=knn)
else:
+ self._skip_storing_knn_graph_for_pred_probs = False
self.neighbor_index_choices = self._get_neighbors(knn_graph=knn_graph)
self.num_neighbors = self.k
@@ -234,6 +299,8 @@ def collect_info(
def _build_statistics_dictionary(self, knn_graph: csr_matrix) -> Dict[str, Dict[str, Any]]:
statistics_dict: Dict[str, Dict[str, Any]] = {"statistics": {}}
+ if self._skip_storing_knn_graph_for_pred_probs:
+ return statistics_dict
# Add the knn graph as a statistic if necessary
graph_key = "weighted_knn_graph"
old_knn_graph = self.datalab.get_info("statistics").get(graph_key, None)
|
diff --git a/tests/datalab/issue_manager/test_noniid.py b/tests/datalab/issue_manager/test_noniid.py
--- a/tests/datalab/issue_manager/test_noniid.py
+++ b/tests/datalab/issue_manager/test_noniid.py
@@ -60,6 +60,13 @@ def embeddings(self, lab):
embeddings_array = np.arange(lab.get_info("statistics")["num_examples"] * 10).reshape(-1, 1)
return embeddings_array
+ @pytest.fixture
+ def pred_probs(self, lab):
+ pred_probs_array = (
+ np.arange(lab.get_info("statistics")["num_examples"] * 10).reshape(-1, 1)
+ ) / len(np.arange(lab.get_info("statistics")["num_examples"] * 10).reshape(-1, 1))
+ return pred_probs_array
+
@pytest.fixture
def issue_manager(self, lab):
return NonIIDIssueManager(
@@ -116,7 +123,49 @@ def test_find_issues(self, issue_manager, embeddings):
issues_perm["is_non_iid_issue"] == expected_permuted_issue_mask
), "Issue mask should be correct"
assert summary_perm["issue_type"][0] == "non_iid"
- # ensure score is large, cannot easily ensure precise value because random seed has different effects on different OS:
+ # ensure score is large, cannot easily ensure precise value because random seed has different effects on
+ # different OS:
+ assert summary_perm["score"][0] > 0.05
+ assert info_perm.get("p-value", None) is not None, "Should have p-value"
+ assert summary_perm["score"][0] == pytest.approx(expected=info_perm["p-value"], abs=1e-7)
+
+ def test_find_issues_using_pred_probs(self, issue_manager, pred_probs):
+ np.random.seed(SEED)
+ issue_manager.find_issues(pred_probs=pred_probs)
+ issues_sort, summary_sort, info_sort = (
+ issue_manager.issues,
+ issue_manager.summary,
+ issue_manager.info,
+ )
+ expected_sorted_issue_mask = np.array([False] * 46 + [True] + [False] * 3)
+ assert np.all(
+ issues_sort["is_non_iid_issue"] == expected_sorted_issue_mask
+ ), "Issue mask should be correct"
+ assert summary_sort["issue_type"][0] == "non_iid"
+ assert summary_sort["score"][0] == pytest.approx(expected=0.0, abs=1e-7)
+ assert info_sort.get("p-value", None) is not None, "Should have p-value"
+ assert summary_sort["score"][0] == pytest.approx(expected=info_sort["p-value"], abs=1e-7)
+
+ permutation = np.random.permutation(len(pred_probs))
+ new_issue_manager = NonIIDIssueManager(
+ datalab=issue_manager.datalab,
+ metric="euclidean",
+ k=10,
+ )
+
+ new_issue_manager.find_issues(pred_probs=pred_probs[permutation])
+ issues_perm, summary_perm, info_perm = (
+ new_issue_manager.issues,
+ new_issue_manager.summary,
+ new_issue_manager.info,
+ )
+ expected_permuted_issue_mask = np.array([False] * len(pred_probs))
+ assert np.all(
+ issues_perm["is_non_iid_issue"] == expected_permuted_issue_mask
+ ), "Issue mask should be correct"
+ assert summary_perm["issue_type"][0] == "non_iid"
+ # ensure score is large, cannot easily ensure precise value because random seed has different effects on
+ # different OS:
assert summary_perm["score"][0] > 0.05
assert info_perm.get("p-value", None) is not None, "Should have p-value"
assert summary_perm["score"][0] == pytest.approx(expected=info_perm["p-value"], abs=1e-7)
@@ -136,12 +185,38 @@ def test_report(self, issue_manager, embeddings):
"Number of examples with this issue:"
) in report
+ issue_manager.find_issues(features=embeddings)
report = issue_manager.report(
issues=issue_manager.issues,
summary=issue_manager.summary,
info=issue_manager.info,
verbosity=3,
)
+
+ assert "Additional Information: " in report
+
+ def test_report_using_pred_probs(self, issue_manager, pred_probs):
+ np.random.seed(SEED)
+ issue_manager.find_issues(pred_probs=pred_probs)
+ report = issue_manager.report(
+ issues=issue_manager.issues,
+ summary=issue_manager.summary,
+ info=issue_manager.info,
+ )
+
+ assert (
+ "---------------------- non_iid issues ----------------------\n\n"
+ "Number of examples with this issue:"
+ ) in report
+
+ issue_manager.find_issues(pred_probs=pred_probs)
+ report = issue_manager.report(
+ issues=issue_manager.issues,
+ summary=issue_manager.summary,
+ info=issue_manager.info,
+ verbosity=3,
+ )
+
assert "Additional Information: " in report
def test_collect_info(self, issue_manager, embeddings):
@@ -157,6 +232,18 @@ def test_collect_info(self, issue_manager, embeddings):
assert info["metric"] == "euclidean"
assert info["k"] == 10
+ def test_collect_info_using_pred_probs(self, issue_manager, pred_probs):
+ """Test some values in the info dict.
+
+ Mainly focused on the nearest neighbor info.
+ """
+ issue_manager.find_issues(pred_probs=pred_probs)
+ info = issue_manager.info
+
+ assert info["p-value"] == 0
+ assert info["metric"] == "euclidean"
+ assert info["k"] == 10
+
@pytest.mark.parametrize(
"seed",
[
@@ -221,3 +308,32 @@ def generate_data_iid():
assert p_value == p_value2
else:
assert p_value != p_value2
+
+ # using pred_probs
+ # normalizing pred_probs (0 to 1)
+ pred_probs = embeddings / (np.max(embeddings) - np.min(embeddings))
+ if seed == "default":
+ issue_manager = NonIIDIssueManager(
+ datalab=lab,
+ metric="euclidean",
+ k=10,
+ )
+ else:
+ issue_manager = NonIIDIssueManager(
+ datalab=lab,
+ metric="euclidean",
+ k=10,
+ seed=seed,
+ )
+ issue_manager.find_issues(pred_probs=pred_probs)
+ p_value = issue_manager.info["p-value"]
+
+ # Run again with the same seed
+ issue_manager.find_issues(pred_probs=pred_probs)
+ p_value2 = issue_manager.info["p-value"]
+
+ assert p_value > 0.0
+ if seed is not None or seed == "default":
+ assert p_value == p_value2
+ else:
+ assert p_value != p_value2
diff --git a/tests/datalab/test_datalab.py b/tests/datalab/test_datalab.py
--- a/tests/datalab/test_datalab.py
+++ b/tests/datalab/test_datalab.py
@@ -776,6 +776,16 @@ def test_find_non_iid_issues(self, random_embeddings):
assert summary["score"].values[0] > 0.05
assert lab.get_issues()["is_non_iid_issue"].sum() == 0
+ def test_find_non_iid_issues_using_pred_probs(self, random_embeddings):
+ data = {"labels": [0, 1, 0]}
+ lab = Datalab(data=data, label_name="labels")
+ pred_probs = random_embeddings / random_embeddings.sum(axis=1, keepdims=True)
+ lab.find_issues(pred_probs=pred_probs, issue_types={"non_iid": {}})
+ summary = lab.get_issue_summary()
+ assert ["non_iid"] == summary["issue_type"].values
+ assert summary["score"].values[0] > 0.05
+ assert lab.get_issues()["is_non_iid_issue"].sum() == 0
+
def test_find_non_iid_issues_sorted(self, sorted_embeddings):
data = {"labels": [0, 1, 0]}
lab = Datalab(data=data, label_name="labels")
@@ -785,6 +795,16 @@ def test_find_non_iid_issues_sorted(self, sorted_embeddings):
assert summary["score"].values[0] == 0
assert lab.get_issues()["is_non_iid_issue"].sum() == 1
+ def test_find_non_iid_issues_sorted_using_pred_probs(self, sorted_embeddings):
+ data = {"labels": [0, 1, 0]}
+ lab = Datalab(data=data, label_name="labels")
+ pred_probs = sorted_embeddings / sorted_embeddings.sum(axis=1, keepdims=True)
+ lab.find_issues(pred_probs=pred_probs, issue_types={"non_iid": {}})
+ summary = lab.get_issue_summary()
+ assert ["non_iid"] == summary["issue_type"].values
+ assert summary["score"].values[0] == 0
+ assert lab.get_issues()["is_non_iid_issue"].sum() == 1
+
def test_incremental_search(self, sorted_embeddings):
data = {"labels": [0, 1, 0]}
lab = Datalab(data=data, label_name="labels")
@@ -799,6 +819,21 @@ def test_incremental_search(self, sorted_embeddings):
assert non_iid_summary["score"].values[0] == 0
assert non_iid_summary["num_issues"].values[0] == 1
+ def test_incremental_search_using_pred_probs(self, sorted_embeddings):
+ data = {"labels": [0, 1, 0]}
+ lab = Datalab(data=data, label_name="labels")
+ pred_probs = sorted_embeddings / sorted_embeddings.sum(axis=1, keepdims=True)
+ lab.find_issues(pred_probs=pred_probs, issue_types={"non_iid": {}})
+ summary = lab.get_issue_summary()
+ assert len(summary) == 1
+ lab.find_issues(pred_probs=pred_probs, issue_types={"non_iid": {}})
+ summary = lab.get_issue_summary()
+ assert len(summary) == 1
+ assert "non_iid" in summary["issue_type"].values
+ non_iid_summary = lab.get_issue_summary("non_iid")
+ assert non_iid_summary["score"].values[0] == 0
+ assert non_iid_summary["num_issues"].values[0] == 1
+
class TestDatalabFindLabelIssues:
@pytest.fixture
@@ -1042,7 +1077,7 @@ def test_init(self, lab, features):
def test_find_issues(self, lab, features, pred_probs):
lab = Datalab(data={"X": features})
lab.find_issues(pred_probs=pred_probs)
- assert lab.issues.empty
+ assert set(lab.issues.columns) == {"is_non_iid_issue", "non_iid_score"}
lab = Datalab(data={"X": features})
lab.find_issues(features=features)
|
extend non-iid issue check in Datalab
Currently Datalab's non-iid issue type is only detected based on `features`:
https://docs.cleanlab.ai/master/cleanlab/datalab/guide/issue_type_description.html#non-iid-issue
If the user does not input `features`, but does provide` pred_probs`, run this same check based on the `pred_probs` instead (just treat them as features).
Note that if the user did provide `features` or there was already a KNN graph constructed in Datalab, the results should be returned as they currently are, not using the `pred_probs` at all!
Reference: https://github.com/cleanlab/cleanlab/blob/master/cleanlab/datalab/internal/issue_manager/noniid.py
|
@jwmueller can you assign this issue to me?
| 2023-10-03T18:46:03 |
cleanlab/cleanlab
| 912 |
cleanlab__cleanlab-912
|
[
"836"
] |
e90dea60c750cb18a50342899f8d8e376726d3c3
|
diff --git a/cleanlab/datalab/internal/issue_finder.py b/cleanlab/datalab/internal/issue_finder.py
--- a/cleanlab/datalab/internal/issue_finder.py
+++ b/cleanlab/datalab/internal/issue_finder.py
@@ -55,6 +55,7 @@
"non_iid": ["pred_probs", "features", "knn_graph"],
"underperforming_group": ["pred_probs", "features", "knn_graph", "cluster_ids"],
"data_valuation": ["knn_graph"],
+ "class_imbalance": [],
}
_REGRESSION_ARGS_DICT = {
"label": ["features", "predictions"],
@@ -69,9 +70,15 @@ def _resolve_required_args_for_classification(**kwargs):
for issue_type in initial_args_dict
}
+ # Some issue types (like class-imbalance) have no required args.
+ # This conditional lambda is used to include them in args dict.
+ keep_empty_argument = lambda k: not len(_CLASSIFICATION_ARGS_DICT[k])
+
# Remove None values from argument list, rely on default values in IssueManager
args_dict = {
- k: {k2: v2 for k2, v2 in v.items() if v2 is not None} for k, v in args_dict.items() if v
+ k: {k2: v2 for k2, v2 in v.items() if v2 is not None}
+ for k, v in args_dict.items()
+ if (v or keep_empty_argument(k))
}
# Prefer `knn_graph` over `features` if both are provided.
@@ -91,7 +98,8 @@ def _resolve_required_args_for_classification(**kwargs):
)
# Only keep issue types that have at least one argument
- args_dict = {k: v for k, v in args_dict.items() if v}
+ # or those that require no arguments.
+ args_dict = {k: v for k, v in args_dict.items() if (v or keep_empty_argument(k))}
return args_dict
@@ -103,12 +111,15 @@ def _resolve_required_args_for_regression(**kwargs):
issue_type: {arg: kwargs.get(arg, None) for arg in initial_args_dict[issue_type]}
for issue_type in initial_args_dict
}
+ # Some issue types have no required args.
+ # This conditional lambda is used to include them in args dict.
+ keep_empty_argument = lambda k: not len(_REGRESSION_ARGS_DICT[k])
# Remove None values from argument list, rely on default values in IssueManager
args_dict = {
k: {k2: v2 for k2, v2 in v.items() if v2 is not None}
for k, v in args_dict.items()
- if v or k == "label" # Allow label issues to require no arguments
+ if v or k == "label" or keep_empty_argument(k) # Allow label issues to require no arguments
}
return args_dict
diff --git a/cleanlab/datalab/internal/issue_manager_factory.py b/cleanlab/datalab/internal/issue_manager_factory.py
--- a/cleanlab/datalab/internal/issue_manager_factory.py
+++ b/cleanlab/datalab/internal/issue_manager_factory.py
@@ -203,10 +203,5 @@ def list_default_issue_types(task: str) -> List[str]:
if task == "regression":
default_issue_types = ["label"]
else:
- default_issue_types = [
- "label",
- "outlier",
- "near_duplicate",
- "non_iid",
- ]
+ default_issue_types = ["label", "outlier", "near_duplicate", "non_iid", "class_imbalance"]
return default_issue_types
|
diff --git a/tests/datalab/test_cleanvision_integration.py b/tests/datalab/test_cleanvision_integration.py
--- a/tests/datalab/test_cleanvision_integration.py
+++ b/tests/datalab/test_cleanvision_integration.py
@@ -32,7 +32,7 @@ def num_imagelab_issues(self):
@pytest.fixture
def num_datalab_issues(self):
- return 3
+ return 4
@pytest.fixture
def pred_probs(self, image_dataset):
@@ -67,6 +67,7 @@ def test_imagelab_issues_checked(
"label",
"outlier",
"near_duplicate",
+ "class_imbalance"
# "non_iid",
]
@@ -90,8 +91,9 @@ def test_imagelab_issues_checked(
"label",
"outlier",
"near_duplicate",
+ "class_imbalance",
],
- "num_issues": [1, 1, 0, 1, 1, 1, 1, 0, 0, 0],
+ "num_issues": [1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
}
)
expected_count = df.sort_values(by="issue_type")["num_issues"].tolist()
@@ -143,12 +145,7 @@ def test_imagelab_issues_not_checked(
assert len(datalab.issues.columns) == num_datalab_issues * 2
assert len(datalab.issue_summary) == num_datalab_issues
- all_keys = [
- "statistics",
- "label",
- "outlier",
- "near_duplicate",
- ]
+ all_keys = ["statistics", "label", "outlier", "near_duplicate", "class_imbalance"]
assert set(all_keys) == set(datalab.info.keys())
datalab.report()
diff --git a/tests/datalab/test_datalab.py b/tests/datalab/test_datalab.py
--- a/tests/datalab/test_datalab.py
+++ b/tests/datalab/test_datalab.py
@@ -87,6 +87,7 @@ def test_list_default_issue_types(self):
"outlier",
"near_duplicate",
"non_iid",
+ "class_imbalance",
]
def tmp_path(self):
@@ -585,13 +586,14 @@ def test_features_and_knn_graph(self, data_tuple):
assert lab.get_info("statistics").get("knn_metric") == "cosine"
def test_without_features_or_knn_graph(self, data_tuple):
- """Test that the `knn_graph` argument to `find_issues` is used instead of computing a new
- one from the `features` argument."""
+ """Test that only the class_imbalance issue is run
+ when no features, knn_graph or pred_probs are passed."""
lab, _, _ = data_tuple
# Test that a warning is raised
lab.find_issues()
- assert lab.issues.empty # No columns should be added to the issues dataframe
+ # Only class_imbalance issue columns should be present
+ assert list(lab.issues.columns) == ["is_class_imbalance_issue", "class_imbalance_score"]
def test_data_valuation_issue_with_knn_graph(self, data_tuple):
lab, knn_graph, features = data_tuple
@@ -856,10 +858,10 @@ def test_find_non_iid_issues_sorted_using_pred_probs(self, lab, sorted_embedding
def test_incremental_search(self, lab, sorted_embeddings):
lab.find_issues(features=sorted_embeddings)
summary = lab.get_issue_summary()
- assert len(summary) == 3
+ assert len(summary) == 4
lab.find_issues(features=sorted_embeddings, issue_types={"non_iid": {}})
summary = lab.get_issue_summary()
- assert len(summary) == 3
+ assert len(summary) == 4
assert "non_iid" in summary["issue_type"].values
non_iid_summary = lab.get_issue_summary("non_iid")
assert non_iid_summary["score"].values[0] == 0
@@ -929,11 +931,11 @@ def test_incremental_search(self, pred_probs, random_embeddings):
lab = Datalab(data=data, label_name="labels")
lab.find_issues(features=random_embeddings)
summary = lab.get_issue_summary()
- assert len(summary) == 4
+ assert len(summary) == 5
assert "label" in summary["issue_type"].values
lab.find_issues(pred_probs=pred_probs, issue_types={"label": {}})
summary = lab.get_issue_summary()
- assert len(summary) == 4
+ assert len(summary) == 5
assert "label" in summary["issue_type"].values
label_summary = lab.get_issue_summary("label")
assert label_summary["num_issues"].values[0] > 0
@@ -1314,8 +1316,9 @@ def test_find_issues_features_works_with_and_without_labels(self, features, labe
issues_with_labels = lab_with_labels.issues
issues_without_label_name = lab_without_label_name.issues
- # issues_with_labels should have two additional columns about label issues
- assert len(issues_without_labels.columns) + 2 == len(issues_with_labels.columns)
+ # issues_with_labels should have four additional columns, which include label issues
+ # and class_imbalance issues
+ assert len(issues_without_labels.columns) + 4 == len(issues_with_labels.columns)
pd.testing.assert_frame_equal(issues_without_labels, issues_without_label_name)
diff --git a/tests/datalab/test_issue_finder.py b/tests/datalab/test_issue_finder.py
--- a/tests/datalab/test_issue_finder.py
+++ b/tests/datalab/test_issue_finder.py
@@ -25,13 +25,13 @@ def test_init(self, issue_finder):
assert issue_finder.verbosity == 1
def test_get_available_issue_types(self, issue_finder):
- expected_issue_types = {}
+ expected_issue_types = {"class_imbalance": {}}
# Test with no kwargs, no issue type expected to be returned
for key in ["pred_probs", "features", "knn_graph"]:
issue_types = issue_finder.get_available_issue_types(**{key: None})
assert (
issue_types == expected_issue_types
- ), "Every issue type for classification requires some kwargs, expected empty dict"
+ ), "Only class_imbalance issue type for classification requires no kwargs"
# Test with only issue_types, input should be
issue_types_dicts = [
|
add class_imbalance issue type among the Datalab defaults
I'd test it on a couple different types of datasets (including w extreme imbalance -- only 1 example of minority class) before adding it to the defaults.
Make sure to also comment on it in the issue guide and tutorial once this is added to defaults.
| 2023-12-07T14:21:49 |
|
cleanlab/cleanlab
| 940 |
cleanlab__cleanlab-940
|
[
"921"
] |
b806cba4aa551ec4a0db54a5612bf8e96a25245b
|
diff --git a/cleanlab/datalab/internal/data_issues.py b/cleanlab/datalab/internal/data_issues.py
--- a/cleanlab/datalab/internal/data_issues.py
+++ b/cleanlab/datalab/internal/data_issues.py
@@ -110,7 +110,7 @@ def get_info(
) -> Dict[str, Any]:
info_extracted = _InfoStrategy._get_info_helper(info=info, issue_name=issue_name)
info = info_extracted if info_extracted is not None else info
- if issue_name == "label":
+ if issue_name in ["label", "class_imbalance"]:
if data.labels.is_available is False:
raise ValueError(
"The labels are not available. "
@@ -236,9 +236,7 @@ def get_issues(self, issue_name: Optional[str] = None) -> pd.DataFrame:
specific_issues = specific_issues.assign(**column_dict)
if issue_name == "class_imbalance":
- specific_issues = specific_issues.assign(
- class_imbalance_class_name=info["Rarest Class"]
- )
+ specific_issues = specific_issues.assign(given_label=info["given_label"])
return specific_issues
def get_issue_summary(self, issue_name: Optional[str] = None) -> pd.DataFrame:
diff --git a/cleanlab/datalab/internal/issue_manager/imbalance.py b/cleanlab/datalab/internal/issue_manager/imbalance.py
--- a/cleanlab/datalab/internal/issue_manager/imbalance.py
+++ b/cleanlab/datalab/internal/issue_manager/imbalance.py
@@ -75,9 +75,13 @@ def find_issues(
},
)
self.summary = self.make_summary(score=class_probs[rarest_class_idx])
- self.info = self.collect_info(class_name=rarest_class_name)
+ self.info = self.collect_info(class_name=rarest_class_name, labels=labels)
- def collect_info(self, class_name: str) -> dict:
- params_dict = {"threshold": self.threshold, "Rarest Class": class_name}
+ def collect_info(self, class_name: str, labels: np.ndarray) -> dict:
+ params_dict = {
+ "threshold": self.threshold,
+ "Rarest Class": class_name,
+ "given_label": labels,
+ }
info_dict = {**params_dict}
return info_dict
|
diff --git a/tests/datalab/test_datalab.py b/tests/datalab/test_datalab.py
--- a/tests/datalab/test_datalab.py
+++ b/tests/datalab/test_datalab.py
@@ -177,7 +177,7 @@ def test_get_issues(self, lab, monkeypatch):
"distance_to_nearest_neighbor": mock_distance_to_nearest_neighbor,
},
"class_imbalance": {
- "Rarest Class": "class_0",
+ "given_label": lab.labels,
},
}
)
@@ -217,7 +217,7 @@ def test_get_issues(self, lab, monkeypatch):
key: mock_issues[key]
for key in ["is_class_imbalance_issue", "class_imbalance_score"]
},
- "class_imbalance_class_name": ["class_0"] * 5,
+ "given_label": [4, 4, 5, 3, 5],
},
)
pd.testing.assert_frame_equal(
|
update datalab.get_issues("class_imbalance") to include the label of each example
This dataframe here should indicate the class label of each example

Also `datalab.report()` should be updated so that:

Contains an extra line:
> About this issue:
> Examples belonging to the most under-represented class in the dataset (class: <name_of_class>)
| 2024-01-03T09:44:52 |
|
cleanlab/cleanlab
| 965 |
cleanlab__cleanlab-965
|
[
"962"
] |
7504a21cf72e3f15699b7c8f82261100fdad4175
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -102,7 +102,7 @@ def run(self):
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/
install_requires=[
"numpy>=1.20.0",
- "scikit-learn>=1.0,<1.4.0",
+ "scikit-learn>=1.0",
"tqdm>=4.53.0",
"pandas>=1.1.5",
"termcolor>=2.0.0,<2.4.0",
|
Revert #961 before release
Tensorflow version temporarily has an upper bound (`tensorflow<2.16.0`) in requirements-dev.txt.
scikit-learn version temporarily has an upper bound (`scikit-learn>=1.0,<1.4.0`) in setup.py
This needs to be reverted before releasing v2.6.0.
_Originally posted by @elisno in https://github.com/cleanlab/cleanlab/issues/961#issuecomment-1898968097_
| 2024-01-22T12:34:45 |
||
cleanlab/cleanlab
| 970 |
cleanlab__cleanlab-970
|
[
"922",
"922"
] |
070b6e5b4271d4065e936ddc58e111f81031daf3
|
diff --git a/cleanlab/datalab/datalab.py b/cleanlab/datalab/datalab.py
--- a/cleanlab/datalab/datalab.py
+++ b/cleanlab/datalab/datalab.py
@@ -377,6 +377,7 @@ def report(
verbosity: Optional[int] = None,
include_description: bool = True,
show_summary_score: bool = False,
+ show_all_issues: bool = False,
) -> None:
"""Prints informative summary of all issues.
@@ -393,6 +394,13 @@ def report(
Whether or not to include a description of each issue type in the report.
Consider setting this to ``False`` once you're familiar with how each issue type is defined.
+ show_summary_score :
+ Whether or not to include the overall severity of each issue type in the report.
+
+ show_all_issues :
+ Whether or not to show all issues in the report, or only the issues for which examples were found in the dataset
+ With this set to ``True``, the report may include more types of issues that were not detected in the dataset.
+
See Also
--------
For advanced usage, see documentation for the
@@ -410,6 +418,7 @@ def report(
verbosity=verbosity,
include_description=include_description,
show_summary_score=show_summary_score,
+ show_all_issues=show_all_issues,
imagelab=self._imagelab,
)
reporter.report(num_examples=num_examples)
diff --git a/cleanlab/datalab/internal/adapter/imagelab.py b/cleanlab/datalab/internal/adapter/imagelab.py
--- a/cleanlab/datalab/internal/adapter/imagelab.py
+++ b/cleanlab/datalab/internal/adapter/imagelab.py
@@ -149,6 +149,7 @@ def __init__(
verbosity: int = 1,
include_description: bool = True,
show_summary_score: bool = False,
+ show_all_issues: bool = False,
):
super().__init__(
data_issues=data_issues,
@@ -156,6 +157,7 @@ def __init__(
verbosity=verbosity,
include_description=include_description,
show_summary_score=show_summary_score,
+ show_all_issues=show_all_issues,
)
self.imagelab = imagelab
diff --git a/cleanlab/datalab/internal/report.py b/cleanlab/datalab/internal/report.py
--- a/cleanlab/datalab/internal/report.py
+++ b/cleanlab/datalab/internal/report.py
@@ -63,6 +63,7 @@ def __init__(
verbosity: int = 1,
include_description: bool = True,
show_summary_score: bool = False,
+ show_all_issues: bool = False,
**kwargs,
):
self.data_issues = data_issues
@@ -70,6 +71,20 @@ def __init__(
self.verbosity = verbosity
self.include_description = include_description
self.show_summary_score = show_summary_score
+ self.show_all_issues = show_all_issues
+
+ def _get_empty_report(self) -> str:
+ """This method is used to return a report when there are
+ no issues found in the data with Datalab.find_issues().
+ """
+ report_str = "No issues found in the data. Good job!"
+ if not self.show_summary_score:
+ recommendation_msg = (
+ "Try re-running Datalab.report() with "
+ "`show_summary_score = True` and `show_all_issues = True`."
+ )
+ report_str += f"\n\n{recommendation_msg}"
+ return report_str
def report(self, num_examples: int) -> None:
"""Prints a report about identified issues in the data.
@@ -104,11 +119,28 @@ def get_report(self, num_examples: int) -> str:
"""
report_str = ""
issue_summary = self.data_issues.issue_summary
+ should_return_empty_report = not (
+ self.show_all_issues or issue_summary.empty or issue_summary["num_issues"].sum() > 0
+ )
+
+ if should_return_empty_report:
+ return self._get_empty_report()
issue_summary_sorted = issue_summary.sort_values(by="num_issues", ascending=False)
report_str += self._write_summary(summary=issue_summary_sorted)
issue_types = self._get_issue_types(issue_summary_sorted)
+ def add_issue_to_report(issue_name: str) -> bool:
+ """Returns True if the issue should be added to the report.
+ It is excluded if show_all_issues is False and there are no issues of that type
+ found in the data.
+ """
+ if self.show_all_issues:
+ return True
+ summary = self.data_issues.get_issue_summary(issue_name=issue_name)
+ has_issues = summary["num_issues"][0] > 0
+ return has_issues
+
issue_reports = [
_IssueManagerFactory.from_str(issue_type=key, task=self.task).report(
issues=self.data_issues.get_issues(issue_name=key),
@@ -154,7 +186,10 @@ def _write_summary(self, summary: pd.DataFrame) -> str:
def _get_issue_types(self, issue_summary: pd.DataFrame) -> List[str]:
issue_types = [
issue_type
- for issue_type in issue_summary["issue_type"].tolist()
+ for issue_type, num_issues in zip(
+ issue_summary["issue_type"].tolist(), issue_summary["num_issues"].tolist()
+ )
if issue_type not in DEFAULT_CLEANVISION_ISSUES
+ and (self.show_all_issues or num_issues > 0)
]
return issue_types
|
diff --git a/tests/datalab/test_datalab.py b/tests/datalab/test_datalab.py
--- a/tests/datalab/test_datalab.py
+++ b/tests/datalab/test_datalab.py
@@ -1786,3 +1786,51 @@ def test_underperforming_group_reuses_knn_graph(self, features, pred_probs, labe
assert (
time_underperforming_after_outlier < time_only_underperforming_group
), "KNN graph reuse should make this run of find_issues faster."
+
+
+class TestDatalabDefaultReporting:
+ """This test class focuses on testing the default behavior of the reporting functionality.
+
+ If there are no issues found, the report should contain a message for no issues found.
+
+ If there are issues found, the report should start with a summary of the issues found.
+
+ Other test classes focus on testing the reporting functionality with different issue types.
+ """
+
+ @pytest.fixture
+ def data(self):
+ np.random.seed(SEED)
+ X = np.random.rand(100, 10)
+ y = np.random.randint(0, 2, 100)
+
+ X[y == 1] += 1.5
+ return {"X": X, "y": y}
+
+ def test_report(self, data):
+ lab = Datalab(data=data, label_name="y")
+ lab.find_issues(features=data["X"], issue_types={"label": {}})
+ with contextlib.redirect_stdout(io.StringIO()) as f:
+ lab.report()
+ report = f.getvalue()
+ assert (
+ "No issues found in the data." in report
+ ), "Report should contain a message for no issues found"
+
+ def test_report_with_one_label_issue(self, data):
+ # Flip the label of one example
+ y = data["y"]
+ y[-1] = 1 - y[-1]
+
+ lab = Datalab(data={"X": data["X"], "y": y}, label_name="y")
+ lab.find_issues(features=data["X"], issue_types={"label": {}})
+ with contextlib.redirect_stdout(io.StringIO()) as f:
+ lab.report()
+ report = f.getvalue()
+ expected_header = (
+ "Here is a summary of the different kinds of issues found in the data:"
+ "\n\nissue_type num_issues\n label 1\n\n"
+ )
+ assert report.startswith(
+ expected_header
+ ), "Report should contain a message for one issue found"
diff --git a/tests/datalab/test_report.py b/tests/datalab/test_report.py
--- a/tests/datalab/test_report.py
+++ b/tests/datalab/test_report.py
@@ -100,3 +100,123 @@ def from_str(*args, **kwargs):
report = reporter.get_report(num_examples=3)
expected_report = "\n\n".join(["Here is a lab summary", "foo report"])
assert report == expected_report
+
+ @pytest.mark.parametrize(
+ "show_all_issues, expected_report",
+ [
+ (True, "Here is a lab summary\n\nfoo report\n\n\nbar report"),
+ (False, "Here is a lab summary\n\nfoo report"),
+ ],
+ )
+ def test_show_all_issues(
+ self, reporter, data_issues, monkeypatch, show_all_issues, expected_report
+ ):
+ """Test that the report method works. Assuming we have two issue managers, each should add
+ their section to the report."""
+
+ mock_issue_manager_foo = Mock()
+ mock_issue_manager_foo.issue_name = "foo"
+ mock_issue_manager_foo.report.return_value = "foo report"
+
+ mock_issue_manager_bar = Mock()
+ mock_issue_manager_bar.issue_name = "bar"
+ mock_issue_manager_bar.report.return_value = "bar report"
+
+ class MockIssueManagerFactory:
+ @staticmethod
+ def from_str(*args, **kwargs):
+ name = kwargs["issue_type"]
+ issue_managers = {
+ "foo": mock_issue_manager_foo,
+ "bar": mock_issue_manager_bar,
+ }
+ issue_manager = issue_managers.get(name)
+ if issue_manager is None:
+ raise ValueError(f"Unknown issue manager name: {name}")
+ return issue_manager
+
+ monkeypatch.setattr(
+ "cleanlab.datalab.internal.report._IssueManagerFactory", MockIssueManagerFactory
+ )
+ mock_issues = pd.DataFrame(
+ {
+ "is_foo_issue": [False, True, False, False, False],
+ "foo_score": [0.6, 0.2, 0.7, 0.7, 0.8],
+ "is_bar_issue": [False, False, False, False, False],
+ "bar_score": [0.7, 0.9, 0.8, 0.8, 0.8],
+ }
+ )
+ monkeypatch.setattr(data_issues, "issues", mock_issues)
+
+ # "bar" issue may be omitted in report, unless show_all_issues is True
+ mock_issue_summary = pd.DataFrame(
+ {
+ "issue_type": ["foo", "bar"],
+ "score": [0.6, 0.8],
+ "num_issues": [1, 0],
+ }
+ )
+
+ mock_info = {
+ "foo": {"foobar": "baz"},
+ "bar": {"barfoo": "bazbar"},
+ }
+
+ monkeypatch.setattr(data_issues, "issue_summary", mock_issue_summary)
+
+ reporter = Reporter(
+ data_issues=data_issues,
+ task="classification",
+ verbosity=0,
+ include_description=False,
+ show_all_issues=show_all_issues,
+ )
+ monkeypatch.setattr(data_issues, "issues", mock_issues, raising=False)
+ monkeypatch.setattr(data_issues, "info", mock_info, raising=False)
+
+ monkeypatch.setattr(
+ reporter, "_write_summary", lambda *args, **kwargs: "Here is a lab summary\n\n"
+ )
+ report = reporter.get_report(num_examples=3)
+ assert report == expected_report
+
+ summary = pd.DataFrame(
+ {
+ "issue_type": ["foo", "bar"],
+ "score": [0.6, 0.8],
+ "num_issues": [1, 0],
+ }
+ )
+
+ def test_summary_with_score(self, reporter, data_issues, monkeypatch):
+ """Test that the _write_summary method returns the expected output when show_summary_score is True.
+
+ It should include the score column in the summary and a note about what the score means.
+ """
+ mock_statistics = {"num_examples": 100, "num_classes": 5}
+ monkeypatch.setattr(data_issues, "get_info", lambda *args, **kwargs: mock_statistics)
+
+ expected_output = (
+ "Here is a summary of the different kinds of issues found in the data:\n\n"
+ + self.summary.to_string(index=False)
+ + "\n\n"
+ + "(Note: A lower score indicates a more severe issue across all examples in the dataset.)\n\n"
+ + "Dataset Information: num_examples: 100, num_classes: 5\n\n\n"
+ )
+
+ reporter.show_summary_score = True
+ assert reporter._write_summary(self.summary) == expected_output
+
+ def test_summary_without_score(self, reporter, data_issues, monkeypatch):
+ mock_statistics = {"num_examples": 100, "num_classes": 5}
+ monkeypatch.setattr(data_issues, "get_info", lambda *args, **kwargs: mock_statistics)
+
+ expected_output = (
+ "Here is a summary of the different kinds of issues found in the data:\n\n"
+ + self.summary.drop(columns=["score"]).to_string(index=False)
+ + "\n\n"
+ + "Dataset Information: num_examples: 100, num_classes: 5\n\n\n"
+ )
+
+ reporter.show_summary_score = False
+ assert reporter._write_summary(self.summary) == expected_output
|
add `show_all_issues` optional argument to: datalab.report()
This arg has default = False.
If True, the report includes issue types with `num_issue = 0` in the Datalab report, otherwise we suppress such issues from the report.
This is critical to ensure the report doesn't get overwhelming as more issue-types are supported
Also: `datalab.report()` docstring is missing documentation for the argument:
`show_summary_score`
Add it while adding this other argument.
add `show_all_issues` optional argument to: datalab.report()
This arg has default = False.
If True, the report includes issue types with `num_issue = 0` in the Datalab report, otherwise we suppress such issues from the report.
This is critical to ensure the report doesn't get overwhelming as more issue-types are supported
Also: `datalab.report()` docstring is missing documentation for the argument:
`show_summary_score`
Add it while adding this other argument.
|
Hi @jwmueller I am working on this ✌️
Hi @jwmueller I am working on this ✌️
| 2024-01-26T20:43:01 |
cleanlab/cleanlab
| 980 |
cleanlab__cleanlab-980
|
[
"906"
] |
59d6e3fd654e97b2c2fd9bf52c9d56e2685ba9fb
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,10 +70,10 @@ def run(self):
"Natural Language :: English",
# We believe this package works will these versions, but we do not guarantee it!
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
@@ -82,7 +82,7 @@ def run(self):
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
- python_requires=">=3.7",
+ python_requires=">=3.8",
# What does your project relate to?
keywords="machine_learning data_cleaning confident_learning classification weak_supervision "
"learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric",
@@ -101,11 +101,11 @@ def run(self):
# requirements files see:
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/
install_requires=[
- "numpy>=1.20.0",
- "scikit-learn>=1.0",
+ "numpy>=1.22.0",
+ "scikit-learn>=1.1",
"tqdm>=4.53.0",
- "pandas>=1.1.5",
- "termcolor>=2.0.0,<2.4.0",
+ "pandas>=1.4.0",
+ "termcolor>=2.4.0",
],
extras_require=EXTRAS_REQUIRE,
)
|
diff --git a/tests/test_token_classification.py b/tests/test_token_classification.py
--- a/tests/test_token_classification.py
+++ b/tests/test_token_classification.py
@@ -163,7 +163,11 @@ def test_merge_probs_with_normalization():
],
)
def test_color_sentence(monkeypatch: pytest.MonkeyPatch, sentence, word, expected):
+ import os
+
+ monkeypatch.setattr(os, "isatty", lambda fd: True)
monkeypatch.setattr("sys.stdout.isatty", lambda: True)
+ monkeypatch.setattr("sys.stdout.fileno", lambda: 1)
colored = color_sentence(sentence, word)
assert colored == expected
|
Revert version upper bound of termcolor dependency
## Background
In #905 , an upper bound to the version of termcolor was added. This was a temporary fix to pass CI on existing PRs.
https://github.com/cleanlab/cleanlab/blob/4ac43c0a2654179732cafc704e807c3c5c9ec1f1/setup.py#L108
Without the upper bound, the following tests fail on Python 3.8+
https://github.com/cleanlab/cleanlab/blob/4ac43c0a2654179732cafc704e807c3c5c9ec1f1/tests/test_token_classification.py#L134-L169
Here's a screenshot of a CI run with these tests failing:
<img width="1142" alt="image" src="https://github.com/cleanlab/cleanlab/assets/18127060/2d371cf4-0d73-49e7-8392-df3384c46a71">
It looks like the `colored` variable no longer has the
## Task
The version limits need to be reverted to
```python
"termcolor>=2.0.0",
```
but the affected unit tests need to pass as well when the new release of termcolor ([version 2.4.0](https://github.com/termcolor/termcolor/releases/tag/2.4.0)) is installed.
| 2024-02-07T20:47:45 |
|
cleanlab/cleanlab
| 990 |
cleanlab__cleanlab-990
|
[
"910"
] |
16c5866a8b1b16ae3bc83a4f730d0c2a568a41cd
|
diff --git a/cleanlab/datalab/internal/issue_manager_factory.py b/cleanlab/datalab/internal/issue_manager_factory.py
--- a/cleanlab/datalab/internal/issue_manager_factory.py
+++ b/cleanlab/datalab/internal/issue_manager_factory.py
@@ -223,6 +223,7 @@ def list_default_issue_types(task: str) -> List[str]:
"near_duplicate",
"non_iid",
"class_imbalance",
+ "underperforming_group",
],
"regression": [
"null",
|
diff --git a/tests/datalab/datalab/test_datalab.py b/tests/datalab/datalab/test_datalab.py
--- a/tests/datalab/datalab/test_datalab.py
+++ b/tests/datalab/datalab/test_datalab.py
@@ -89,6 +89,7 @@ def test_list_default_issue_types(self):
"near_duplicate",
"non_iid",
"class_imbalance",
+ "underperforming_group",
]
def tmp_path(self):
diff --git a/tests/datalab/test_cleanvision_integration.py b/tests/datalab/test_cleanvision_integration.py
--- a/tests/datalab/test_cleanvision_integration.py
+++ b/tests/datalab/test_cleanvision_integration.py
@@ -32,7 +32,7 @@ def num_imagelab_issues(self):
@pytest.fixture
def num_datalab_issues(self):
- return 5
+ return 6
@pytest.fixture
def pred_probs(self, image_dataset):
@@ -69,6 +69,7 @@ def test_imagelab_issues_checked(
"near_duplicate",
"class_imbalance",
"null",
+ "underperforming_group",
# "non_iid",
]
@@ -94,12 +95,14 @@ def test_imagelab_issues_checked(
"near_duplicate",
"class_imbalance",
"null",
+ "underperforming_group",
],
- "num_issues": [1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
+ "num_issues": [1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
}
)
expected_count = df.sort_values(by="issue_type")["num_issues"].tolist()
count = datalab.issue_summary.sort_values(by="issue_type")["num_issues"].tolist()
+ assert set(datalab.issue_summary["issue_type"].tolist()) == set(df["issue_type"].tolist())
assert count == expected_count
assert datalab.issue_summary["num_issues"].sum() == df["num_issues"].sum()
@@ -147,7 +150,15 @@ def test_imagelab_issues_not_checked(
assert len(datalab.issues.columns) == num_datalab_issues * 2
assert len(datalab.issue_summary) == num_datalab_issues
- all_keys = ["statistics", "label", "outlier", "near_duplicate", "class_imbalance", "null"]
+ all_keys = [
+ "statistics",
+ "label",
+ "outlier",
+ "near_duplicate",
+ "class_imbalance",
+ "null",
+ "underperforming_group",
+ ]
assert set(all_keys) == set(datalab.info.keys())
datalab.report()
|
Add underperforming_group issue type among the Datalab defaults
Test issue manager with different datasets (Image, tabular etc.) to make sure that the underperforming group in the dataset is extracted successfully. List any failure cases that might need to be addressed before adding this issue type to the defaults.
|
first add extra step that `is_issue = True` only if the cluster in question contains at least M datapoints, for say M = 10
(maybe this is already done though, the responsibility for this should really lie with the clustering algorithm to never produce small clusters in the first place)
dont forget to update the datalab documentation listing the default issue types
| 2024-02-09T19:49:37 |
cleanlab/cleanlab
| 1,000 |
cleanlab__cleanlab-1000
|
[
"988"
] |
b25d54f3802a5ca6f34c12f616928f1f7cde206d
|
diff --git a/cleanlab/datalab/internal/issue_finder.py b/cleanlab/datalab/internal/issue_finder.py
--- a/cleanlab/datalab/internal/issue_finder.py
+++ b/cleanlab/datalab/internal/issue_finder.py
@@ -472,4 +472,12 @@ def get_available_issue_types(self, **kwargs):
)
issue_types_copy.pop("outlier")
+ drop_class_imbalance_check = (
+ "class_imbalance" in issue_types_copy
+ and not self.datalab.has_labels
+ and self.task == Task.CLASSIFICATION
+ )
+ if drop_class_imbalance_check:
+ issue_types_copy.pop("class_imbalance")
+
return issue_types_copy
|
Class Imbalance issue checker should not run if labels are not provided in Datalab
```
from cleanlab import Datalab
lab = Datalab(data=df_without_labels)
lab.find_issues()
```
It should not run the ClassImbalanceIssueManager, but it tries to anyway.
Just add a check that the Datlab had labels specified, then it can run the ClassImbalanceIssueManager in find_issues.
| 2024-02-13T00:04:21 |
||
cleanlab/cleanlab
| 1,005 |
cleanlab__cleanlab-1005
|
[
"989"
] |
1ed51aa35af86c70d2f0f30ffc087f4972a8cdf8
|
diff --git a/cleanlab/datalab/datalab.py b/cleanlab/datalab/datalab.py
--- a/cleanlab/datalab/datalab.py
+++ b/cleanlab/datalab/datalab.py
@@ -136,10 +136,10 @@ def __init__(
# todo: check displayer methods
def __repr__(self) -> str:
- return _Displayer(data_issues=self.data_issues).__repr__()
+ return _Displayer(data_issues=self.data_issues, task=self.task).__repr__()
def __str__(self) -> str:
- return _Displayer(data_issues=self.data_issues).__str__()
+ return _Displayer(data_issues=self.data_issues, task=self.task).__str__()
@property
def labels(self) -> Union[np.ndarray, List[List[int]]]:
diff --git a/cleanlab/datalab/internal/display.py b/cleanlab/datalab/internal/display.py
--- a/cleanlab/datalab/internal/display.py
+++ b/cleanlab/datalab/internal/display.py
@@ -17,45 +17,152 @@
Module that handles the string representation of Datalab objects.
"""
-from typing import TYPE_CHECKING
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, List, Optional
+
+from cleanlab.datalab.internal.task import Task
if TYPE_CHECKING: # pragma: no cover
from cleanlab.datalab.internal.data_issues import DataIssues
+class RepresentationStrategy(ABC):
+ def __init__(self, data_issues: "DataIssues"):
+ self.data_issues = data_issues
+
+ @property
+ def checks_run(self) -> bool:
+ return not self.data_issues.issues.empty
+
+ @property
+ def num_examples(self) -> Optional[int]:
+ return self.data_issues.get_info("statistics").get("num_examples")
+
+ @property
+ def num_classes(self) -> Optional[int]:
+ return self.data_issues.get_info("statistics").get("num_classes")
+
+ @property
+ def issues_identified(self) -> str:
+ return (
+ self.data_issues.issue_summary["num_issues"].sum() if self.checks_run else "Not checked"
+ )
+
+ def show_task(self, task: "Task") -> str:
+ return f"task={str(task).capitalize()}"
+
+ def show_checks_run(self) -> str:
+ return f"checks_run={self.checks_run}"
+
+ def show_num_examples(self) -> str:
+ return f"num_examples={self.num_examples}" if self.num_examples is not None else ""
+
+ def show_num_classes(self) -> str:
+ return f"num_classes={self.num_classes}" if self.num_classes is not None else ""
+
+ def show_issues_identified(self) -> str:
+ return f"issues_identified={self.issues_identified}"
+
+ @abstractmethod
+ def represent(self) -> str:
+ pass
+
+
+class ClassificationRepresentation(RepresentationStrategy):
+ def represent(self) -> str:
+ display_strings: List[str] = [
+ self.show_task(Task.CLASSIFICATION),
+ self.show_checks_run(),
+ self.show_num_examples(),
+ self.show_num_classes(),
+ self.show_issues_identified(),
+ ]
+ # Drop empty strings
+ display_strings = [s for s in display_strings if bool(s)]
+ display_str = ", ".join(display_strings)
+ return f"Datalab({display_str})"
+
+
+class RegressionRepresentation(RepresentationStrategy):
+ def represent(self) -> str:
+ display_strings: List[str] = [
+ self.show_task(Task.REGRESSION),
+ self.show_checks_run(),
+ self.show_num_examples(),
+ self.show_issues_identified(),
+ ]
+ # Drop empty strings
+ display_strings = [s for s in display_strings if bool(s)]
+ display_str = ", ".join(display_strings)
+ return f"Datalab({display_str})"
+
+
+class MultilabelRepresentation(RepresentationStrategy):
+ def represent(self) -> str:
+ display_strings: List[str] = [
+ self.show_task(Task.MULTILABEL),
+ self.show_checks_run(),
+ self.show_num_examples(),
+ self.show_num_classes(),
+ self.show_issues_identified(),
+ ]
+ # Drop empty strings
+ display_strings = [s for s in display_strings if bool(s)]
+ display_str = ", ".join(display_strings)
+ return f"Datalab({display_str})"
+
+
class _Displayer:
- def __init__(self, data_issues: "DataIssues") -> None:
+ def __init__(self, data_issues: "DataIssues", task: "Task") -> None:
self.data_issues = data_issues
+ self.task = task
+ self.representation_strategy = self._get_representation_strategy()
+
+ def _get_representation_strategy(self) -> RepresentationStrategy:
+ strategies = {
+ "classification": ClassificationRepresentation,
+ "regression": RegressionRepresentation,
+ "multilabel": MultilabelRepresentation,
+ }
+ strategy_class = strategies.get(self.task.value)
+ if not strategy_class:
+ raise ValueError(f"Unsupported task type: {self.task}")
+ return strategy_class(self.data_issues)
def __repr__(self) -> str:
"""What is displayed in console if user executes: >>> datalab"""
- checks_run = not self.data_issues.issues.empty
- display_str = f"checks_run={checks_run}"
- num_examples = self.data_issues.get_info("statistics")["num_examples"]
- if num_examples is not None:
- display_str += f", num_examples={num_examples}"
- num_classes = self.data_issues.get_info("statistics")["num_classes"]
- if num_classes is not None:
- display_str += f", num_classes={num_classes}"
- if checks_run:
- issues_identified = self.data_issues.issue_summary["num_issues"].sum()
- display_str += f", issues_identified={issues_identified}"
- return f"Datalab({display_str})"
+ return self.representation_strategy.represent()
- def __str__(self) -> str:
- """What is displayed if user executes: print(datalab)"""
- checks_run = not self.data_issues.issues.empty
- num_examples = self.data_issues.get_info("statistics").get("num_examples")
- num_classes = self.data_issues.get_info("statistics").get("num_classes")
+ @property
+ def checks_run(self) -> bool:
+ """Whether checks have been run on the data."""
+ return not self.data_issues.issues.empty
+
+ @property
+ def num_examples(self) -> Optional[int]:
+ """Number of examples in the dataset."""
+ return self.data_issues.get_info("statistics").get("num_examples")
- issues_identified = (
- self.data_issues.issue_summary["num_issues"].sum() if checks_run else "Not checked"
+ @property
+ def num_classes(self) -> Optional[int]:
+ """Number of classes in the dataset."""
+ return self.data_issues.get_info("statistics").get("num_classes")
+
+ @property
+ def issues_identified(self) -> str:
+ """Number of issues identified in the dataset."""
+ return (
+ self.data_issues.issue_summary["num_issues"].sum() if self.checks_run else "Not checked"
)
+
+ def __str__(self) -> str:
+ """What is displayed if user executes: print(datalab)"""
info_list = [
- f"Checks run: {'Yes' if checks_run else 'No'}",
- f"Number of examples: {num_examples if num_examples is not None else 'Unknown'}",
- f"Number of classes: {num_classes if num_classes is not None else 'Unknown'}",
- f"Issues identified: {issues_identified}",
+ f"Task: {str(self.task).capitalize()}",
+ f"Checks run: {'Yes' if self.checks_run else 'No'}",
+ f"Number of examples: {self.num_examples if self.num_examples is not None else 'Unknown'}",
+ f"Number of classes: {self.num_classes if self.num_classes is not None else 'Unknown'}",
+ f"Issues identified: {self.issues_identified}",
]
return "Datalab:\n" + "\n".join(info_list)
|
diff --git a/tests/datalab/datalab/test_datalab.py b/tests/datalab/datalab/test_datalab.py
--- a/tests/datalab/datalab/test_datalab.py
+++ b/tests/datalab/datalab/test_datalab.py
@@ -65,6 +65,7 @@ def test_print(self, lab, capsys):
captured = capsys.readouterr()
expected_output = (
"Datalab:\n"
+ "Task: Classification\n"
"Checks run: No\n"
"Number of examples: 5\n"
"Number of classes: 3\n"
|
`Datalab.__repr__()` breaks down with a KeyError: num_classes
`Datalab.__repr__()` breaks down with a KeyError: num_classes on this [line](https://github.com/cleanlab/cleanlab/blob/master/cleanlab/datalab/internal/display.py#L34)
# Steps to reproduce
Initialize a dataset with no label name
```
lab = Datalab(df)
```
| 2024-02-13T03:01:15 |
|
cleanlab/cleanlab
| 1,023 |
cleanlab__cleanlab-1023
|
[
"928"
] |
01ef893a740229aa7c02c286250596eab7e46009
|
diff --git a/cleanlab/datalab/internal/issue_finder.py b/cleanlab/datalab/internal/issue_finder.py
--- a/cleanlab/datalab/internal/issue_finder.py
+++ b/cleanlab/datalab/internal/issue_finder.py
@@ -59,7 +59,7 @@
"near_duplicate": ["features", "knn_graph"],
"non_iid": ["pred_probs", "features", "knn_graph"],
"underperforming_group": ["pred_probs", "features", "knn_graph", "cluster_ids"],
- "data_valuation": ["knn_graph"],
+ "data_valuation": ["features", "knn_graph"],
"class_imbalance": [],
"null": ["features"],
}
@@ -68,6 +68,7 @@
"outlier": ["features", "knn_graph"],
"near_duplicate": ["features", "knn_graph"],
"non_iid": ["features", "knn_graph"],
+ "data_valuation": ["features", "knn_graph"],
"null": ["features"],
}
@@ -76,6 +77,7 @@
"outlier": ["features", "knn_graph"],
"near_duplicate": ["features", "knn_graph"],
"non_iid": ["features", "knn_graph"],
+ "data_valuation": ["features", "knn_graph"],
"null": ["features"],
}
diff --git a/cleanlab/datalab/internal/issue_manager/data_valuation.py b/cleanlab/datalab/internal/issue_manager/data_valuation.py
--- a/cleanlab/datalab/internal/issue_manager/data_valuation.py
+++ b/cleanlab/datalab/internal/issue_manager/data_valuation.py
@@ -24,14 +24,20 @@
Optional,
Union,
)
+import warnings
+
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
+from sklearn.exceptions import NotFittedError
+from sklearn.neighbors import NearestNeighbors
+from sklearn.utils.validation import check_is_fitted
from cleanlab.datalab.internal.issue_manager import IssueManager
if TYPE_CHECKING: # pragma: no cover
+ import numpy.typing as npt
import pandas as pd
from cleanlab.datalab.datalab import Datalab
@@ -94,16 +100,19 @@ class DataValuationIssueManager(IssueManager):
def __init__(
self,
datalab: Datalab,
+ metric: Optional[str] = None,
threshold: Optional[float] = None,
k: int = 10,
**kwargs,
):
super().__init__(datalab)
+ self.metric = metric
self.k = k
self.threshold = threshold if threshold is not None else self.DEFAULT_THRESHOLD
def find_issues(
self,
+ features: Optional[npt.NDArray] = None,
**kwargs,
) -> None:
"""Calculate the data valuation score with a provided or existing knn graph.
@@ -117,6 +126,8 @@ def find_issues(
"""
self.k = kwargs.get("k", self.k)
knn_graph = self._process_knn_graph_from_inputs(kwargs)
+ old_knn_metric = self.datalab.get_info("statistics").get("knn_metric")
+ metric_changes = self.metric and self.metric != old_knn_metric
labels = self.datalab.labels
if not isinstance(labels, np.ndarray):
error_msg = (
@@ -124,12 +135,29 @@ def find_issues(
f"but got {type(labels)} instead."
)
raise TypeError(error_msg)
- if knn_graph is None:
- raise ValueError(
- "knn_graph must be provided in kwargs or already stored in the Datalab instance\n"
- "It should be calculated by other issue managers if it is not provided via "
- "`Datalab.find_issues(knn_graph=knn_graph, ...)`"
- )
+ if knn_graph is None or metric_changes:
+ if features is None:
+ raise ValueError(
+ "If a knn_graph is not provided, features must be provided to fit a new knn."
+ )
+ if self.metric is None:
+ self.metric = "cosine" if features.shape[1] > 3 else "euclidean"
+ knn = NearestNeighbors(n_neighbors=self.k, metric=self.metric).fit(features)
+
+ if self.metric and self.metric != knn.metric:
+ warnings.warn(
+ f"Metric {self.metric} does not match metric {knn.metric} used to fit knn. "
+ "Most likely an existing NearestNeighbors object was passed in, but a different "
+ "metric was specified."
+ )
+ self.metric = knn.metric
+
+ try:
+ check_is_fitted(knn)
+ except NotFittedError:
+ knn.fit(features)
+
+ knn_graph = knn.kneighbors_graph(mode="distance")
if labels is None:
raise ValueError("labels must be provided to run data valuation")
@@ -143,7 +171,7 @@ def find_issues(
)
self.summary = self.make_summary(score=scores.mean())
- self.info = self.collect_info(self.issues)
+ self.info = self.collect_info(issues=self.issues, knn_graph=knn_graph)
def _process_knn_graph_from_inputs(self, kwargs: Dict[str, Any]) -> Union[csr_matrix, None]:
"""Determine if a knn_graph is provided in the kwargs or if one is already stored in the associated Datalab instance."""
@@ -163,18 +191,47 @@ def _process_knn_graph_from_inputs(self, kwargs: Dict[str, Any]) -> Union[csr_ma
)
return knn_graph
- def collect_info(self, issues: pd.DataFrame) -> dict:
+ def collect_info(self, issues: pd.DataFrame, knn_graph: csr_matrix) -> dict:
issues_info = {
"num_low_valuation_issues": sum(issues[f"is_{self.issue_name}_issue"]),
"average_data_valuation": issues[self.issue_score_key].mean(),
}
+ params_dict = {
+ "metric": self.metric,
+ "k": self.k,
+ "threshold": self.threshold,
+ }
+
+ statistics_dict = self._build_statistics_dictionary(knn_graph=knn_graph)
+
info_dict = {
**issues_info,
+ **params_dict,
+ **statistics_dict,
}
return info_dict
+ def _build_statistics_dictionary(self, knn_graph: csr_matrix) -> Dict[str, Dict[str, Any]]:
+ statistics_dict: Dict[str, Dict[str, Any]] = {"statistics": {}}
+
+ # Add the knn graph as a statistic if necessary
+ graph_key = "weighted_knn_graph"
+ old_knn_graph = self.datalab.get_info("statistics").get(graph_key, None)
+ old_graph_exists = old_knn_graph is not None
+ prefer_new_graph = (
+ not old_graph_exists
+ or knn_graph.nnz > old_knn_graph.nnz
+ or self.metric != self.datalab.get_info("statistics").get("knn_metric", None)
+ )
+ if prefer_new_graph:
+ statistics_dict["statistics"][graph_key] = knn_graph
+ if self.metric is not None:
+ statistics_dict["statistics"]["knn_metric"] = self.metric
+
+ return statistics_dict
+
def _knn_shapley_score(knn_graph: csr_matrix, labels: np.ndarray, k: int) -> np.ndarray:
"""Compute the Shapley values of data points based on a knn graph."""
diff --git a/cleanlab/datalab/internal/issue_manager_factory.py b/cleanlab/datalab/internal/issue_manager_factory.py
--- a/cleanlab/datalab/internal/issue_manager_factory.py
+++ b/cleanlab/datalab/internal/issue_manager_factory.py
@@ -74,6 +74,7 @@
"outlier": OutlierIssueManager,
"near_duplicate": NearDuplicateIssueManager,
"non_iid": NonIIDIssueManager,
+ "data_valuation": DataValuationIssueManager,
"null": NullIssueManager,
},
Task.MULTILABEL: {
@@ -81,6 +82,7 @@
"outlier": OutlierIssueManager,
"near_duplicate": NearDuplicateIssueManager,
"non_iid": NonIIDIssueManager,
+ "data_valuation": DataValuationIssueManager,
"null": NullIssueManager,
},
}
|
diff --git a/tests/datalab/datalab/test_datalab.py b/tests/datalab/datalab/test_datalab.py
--- a/tests/datalab/datalab/test_datalab.py
+++ b/tests/datalab/datalab/test_datalab.py
@@ -1495,6 +1495,122 @@ def test_report(self, embeddings_with_null):
), "Report should not contain a tip to address partial null examples"
+class TestDatalabDataValuation:
+ label_name: str = "y"
+
+ @pytest.fixture
+ def dataset(self):
+ from sklearn.datasets import make_classification
+
+ np.random.seed(SEED)
+
+ # Generate a 10D dataset with 2 classes
+ X, y = make_classification(
+ n_samples=100,
+ n_features=10,
+ n_informative=2,
+ n_redundant=2,
+ n_repeated=0,
+ n_classes=2,
+ n_clusters_per_class=2,
+ weights=None,
+ flip_y=0.1,
+ class_sep=1.0,
+ hypercube=True,
+ shift=0.0,
+ scale=0.1,
+ shuffle=True,
+ random_state=SEED,
+ )
+
+ return {"X": X, self.label_name: y}
+
+ @pytest.fixture
+ def knn_graph(self, dataset):
+ return (
+ NearestNeighbors(n_neighbors=10, metric="cosine")
+ .fit(dataset["X"])
+ .kneighbors_graph(mode="distance")
+ )
+
+ def test_find_issues(self, dataset, knn_graph):
+ """Test that a fresh Datalab instance can check for data_valuation issues with
+ either `features` or a `knn_graph`.
+ """
+
+ datalabs, summaries, scores_list = [], [], []
+ find_issues_input_dicts = [
+ {"features": dataset["X"]},
+ {"knn_graph": knn_graph},
+ ]
+
+ # Make sure that the results work for both input type
+ for kwargs in find_issues_input_dicts:
+ lab = Datalab(data=dataset, label_name=self.label_name)
+ assert lab.issue_summary.empty
+ lab.find_issues(**kwargs, issue_types={"data_valuation": {}})
+ summary = lab.get_issue_summary()
+ assert len(summary) == 1
+ assert "data_valuation" in summary["issue_type"].values
+ scores = lab.get_issues("data_valuation").get(["data_valuation_score"])
+ assert all((scores >= 0) & (scores <= 1))
+
+ datalabs.append(lab)
+ summaries.append(summary)
+ scores_list.append(scores)
+
+ # Check that the results are the same for both input types
+ base_lab = datalabs[0]
+ base_summary = summaries[0]
+ base_scores = scores_list[0]
+ for lab, summary, scores in zip(datalabs, summaries, scores_list):
+ # The knn-graph is either provided or computed from the features, then stored
+ assert np.allclose(
+ knn_graph.toarray(), lab.get_info("statistics")["weighted_knn_graph"].toarray()
+ )
+ # The summary and scores should be the same
+ assert base_summary.equals(summary)
+ assert np.allclose(base_scores, scores)
+
+ def test_find_issues_with_different_metrics(self, dataset, knn_graph):
+ """Test that a fresh Datalab instance can check for data_valuation issues with
+ different metrics.
+ """
+ knn_graph_euclidean = (
+ NearestNeighbors(n_neighbors=10, metric="euclidean")
+ .fit(dataset["X"])
+ .kneighbors_graph(mode="distance")
+ )
+
+ lab = Datalab(data=dataset, label_name=self.label_name)
+ lab.find_issues(features=dataset["X"], issue_types={"data_valuation": {}})
+
+ # The default metric should be "cosine" for "high-dimensional" features
+ assert lab.get_info("statistics")["knn_metric"] == "cosine"
+ assert np.allclose(
+ knn_graph.toarray(), lab.get_info("statistics")["weighted_knn_graph"].toarray()
+ )
+
+ # Test different scenarios of how the metric affects the knn graph
+ scenarios = [
+ {"metric": "cosine", "expected_knn_graph": knn_graph},
+ {"metric": "euclidean", "expected_knn_graph": knn_graph_euclidean},
+ ]
+
+ # Test what happens to the knn graph when the metric is changed
+ for scenario in scenarios:
+ metric = scenario["metric"]
+ expected_knn_graph = scenario["expected_knn_graph"]
+ lab.find_issues(
+ features=dataset["X"], issue_types={"data_valuation": {"metric": metric}}
+ )
+ assert metric == lab.get_info("statistics")["knn_metric"]
+ assert np.allclose(
+ expected_knn_graph.toarray(),
+ lab.get_info("statistics")["weighted_knn_graph"].toarray(),
+ )
+
+
class TestIssueManagersReuseKnnGraph:
"""
`outlier`, `underperforming_group` and `near_duplicate` issue managers require
diff --git a/tests/datalab/datalab/test_multilabel.py b/tests/datalab/datalab/test_multilabel.py
--- a/tests/datalab/datalab/test_multilabel.py
+++ b/tests/datalab/datalab/test_multilabel.py
@@ -119,7 +119,7 @@ def test_available_issue_types(self, lab):
["label", "near_duplicate", "non_iid", "outlier", "null"]
)
assert set(lab.list_possible_issue_types()) == set(
- ["label", "near_duplicate", "non_iid", "outlier", "null"]
+ ["label", "near_duplicate", "non_iid", "outlier", "null", "data_valuation"]
)
@pytest.mark.parametrize(
diff --git a/tests/datalab/datalab/test_regression.py b/tests/datalab/datalab/test_regression.py
--- a/tests/datalab/datalab/test_regression.py
+++ b/tests/datalab/datalab/test_regression.py
@@ -154,7 +154,7 @@ def test_available_issue_types(self, lab):
["label", "outlier", "near_duplicate", "non_iid", "null"]
)
assert set(lab.list_possible_issue_types()) == set(
- ["label", "outlier", "near_duplicate", "non_iid", "null"]
+ ["label", "outlier", "near_duplicate", "non_iid", "null", "data_valuation"]
)
def test_regression_with_features_finds_label_issues(self, lab, regression_data):
|
allow Data Valuation Issue Manager to operate on provided features
It currently only operates on `knn_graph`. Should simply construct the KNN graph based on the features if one does not already exist in the Datalab object. This is what the Outlier Issue Manager does.
| 2024-02-21T15:58:06 |
|
cleanlab/cleanlab
| 1,024 |
cleanlab__cleanlab-1024
|
[
"1019"
] |
01ef893a740229aa7c02c286250596eab7e46009
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -54,6 +54,7 @@
"sphinx_copybutton",
"sphinxcontrib.katex",
"sphinxcontrib.gtagjs",
+ "sphinx_jinja",
"sphinx_autodoc_typehints",
"sphinx.ext.doctest",
"sphinxext.opengraph",
|
exact issue name should be listed for each issue type in the Datalab Issue Type Guide
Otherwise it's hard to know how to run an audit for this issue type (eg. data valuation say).
| 2024-02-22T19:06:15 |
||
cleanlab/cleanlab
| 1,025 |
cleanlab__cleanlab-1025
|
[
"986"
] |
231ab6314dab63554bc2c8b2a85a78a17b154dfa
|
diff --git a/cleanlab/datalab/datalab.py b/cleanlab/datalab/datalab.py
--- a/cleanlab/datalab/datalab.py
+++ b/cleanlab/datalab/datalab.py
@@ -513,6 +513,15 @@ def get_issues(self, issue_name: Optional[str] = None) -> pd.DataFrame:
Additional columns may be present in the DataFrame depending on the type of issue specified.
"""
+
+ # Validate issue_name
+ if issue_name is not None and issue_name not in self.list_possible_issue_types():
+ raise ValueError(
+ f"""Invalid issue_name: {issue_name}. Please specify a valid issue_name from the list of possible issue types.
+ Either, specify one of the following: {self.list_possible_issue_types()}
+ or set issue_name as None to get all issue types.
+ """
+ )
return self.data_issues.get_issues(issue_name=issue_name)
def get_issue_summary(self, issue_name: Optional[str] = None) -> pd.DataFrame:
diff --git a/cleanlab/datalab/internal/data_issues.py b/cleanlab/datalab/internal/data_issues.py
--- a/cleanlab/datalab/internal/data_issues.py
+++ b/cleanlab/datalab/internal/data_issues.py
@@ -242,12 +242,31 @@ def get_issues(self, issue_name: Optional[str] = None) -> pd.DataFrame:
Additional columns may be present in the DataFrame depending on the type of issue specified.
"""
+ if self.issues.empty:
+ raise ValueError(
+ """No issues available for retrieval. Please check the following before using `get_issues`:
+ 1. Ensure `find_issues` was executed. If not, please run it with the necessary parameters.
+ 2. If `find_issues` was run but you're seeing this message,
+ it may have encountered limitations preventing full analysis.
+ However, partial checks can still provide valuable insights.
+ Review `find_issues` output carefully for any specific actions needed
+ to facilitate a more comprehensive analysis before calling `get_issues`.
+ """
+ )
if issue_name is None:
return self.issues
columns = [col for col in self.issues.columns if issue_name in col]
if not columns:
- raise ValueError(f"No columns found for issue type '{issue_name}'.")
+ raise ValueError(
+ f"""No columns found for issue type '{issue_name}'. Ensure the following:
+ 1. `find_issues` has been executed. If it hasn't, please run it.
+ 2. Check `find_issues` output to verify that the issue type '{issue_name}' was included in the checks to
+ ensure it was not excluded accidentally before the audit.
+ 3. Review `find_issues` output for any errors or warnings that might indicate the check for '{issue_name}' issues failed to complete.
+ This can provide better insights into what adjustments may be necessary.
+ """
+ )
specific_issues = self.issues[columns]
info = self.get_info(issue_name=issue_name)
|
diff --git a/tests/datalab/datalab/test_datalab.py b/tests/datalab/datalab/test_datalab.py
--- a/tests/datalab/datalab/test_datalab.py
+++ b/tests/datalab/datalab/test_datalab.py
@@ -1708,3 +1708,51 @@ def test_report_with_one_label_issue(self, data):
assert report.startswith(
expected_header
), "Report should contain a message for one issue found"
+
+
[email protected]
+class TestDatalabGetIssuesMethod:
+
+ @pytest.fixture
+ def lab(self):
+ """Testing label issues in regression task."""
+ dataset = {"X": [[1, 2], [3, 4], [5, 6]], "y": [0, 0.5, 1.0]}
+ return Datalab(data=dataset, label_name="y", task="regression")
+
+ def test_get_issues_with_unsuccessful_find_issues(self, lab):
+ with patch("builtins.print") as mock_print:
+ # Running 5-fold CV on 3 samples shouldn't work for detecting label issues.
+ lab.find_issues(features=np.array(lab.data["X"]), issue_types={"label": {}})
+ mock_print.assert_any_call(
+ "Error in label: There are too few examples to conduct 5-fold cross validation. "
+ "You can either reduce cv_n_folds for cross validation, or decrease k to exclude less data."
+ )
+ mock_print.assert_any_call(
+ "Failed to check for these issue types: [RegressionLabelIssueManager]"
+ )
+
+ # The issues should be empty,
+ assert lab.issues.empty
+
+ # so the getter method should raise an error.
+ with pytest.raises(
+ ValueError, match="No issues available for retrieval. Please check the following"
+ ):
+ lab.get_issues("label")
+
+ # Run issue check that only needs features
+ lab.find_issues(features=np.array(lab.data["X"]), issue_types={"null": {}})
+
+ # The label issues we searched for should not be present
+ assert not lab.issues.empty
+ with pytest.raises(ValueError, match="No columns found for issue type 'label'."):
+ lab.get_issues("label")
+
+ def test_invalid_issue_name(self, lab):
+ lab.find_issues(features=np.array(lab.data["X"]), issue_types={"null": {}})
+
+ assert not lab.issues.empty
+
+ invalid_issue_name = "nul"
+ with pytest.raises(ValueError, match=f"Invalid issue_name: {invalid_issue_name}."):
+ lab.get_issues(issue_name=invalid_issue_name)
|
Calling Datalab.get_issues() when there are no issues throws an error
When finding issues using code below (on a dataset where there are no issues), the following attempts (1,2) throw error:
```python
lab = cleanlab.Datalab(data=train[numerical], label_name="label", task="regression")
lab.find_issues(pred_probs=y_train_pred)
```
1.`label_quality_scores = lab.get_issues('label')['label_issues']`
2.`label_quality_scores = lab.get_issues('label')`
Instead should print something like "There are no isses"
| 2024-02-24T00:09:54 |
|
cleanlab/cleanlab
| 1,028 |
cleanlab__cleanlab-1028
|
[
"1027"
] |
a2dec09d20dc1114fd0ba74661b8c63b17c9d45f
|
diff --git a/cleanlab/datalab/datalab.py b/cleanlab/datalab/datalab.py
--- a/cleanlab/datalab/datalab.py
+++ b/cleanlab/datalab/datalab.py
@@ -28,6 +28,7 @@
import pandas as pd
import cleanlab
+from cleanlab.datalab.internal.adapter.constants import DEFAULT_CLEANVISION_ISSUES
from cleanlab.datalab.internal.adapter.imagelab import create_imagelab
from cleanlab.datalab.internal.data import Data
from cleanlab.datalab.internal.display import _Displayer
@@ -571,7 +572,10 @@ def list_possible_issue_types(self) -> List[str]:
--------
:py:class:`REGISTRY <cleanlab.datalab.internal.issue_manager_factory.REGISTRY>` : All available issue types and their corresponding issue managers can be found here.
"""
- return _list_possible_issue_types(task=self.task)
+ possible_issue_types = _list_possible_issue_types(task=self.task)
+ if self._imagelab is not None:
+ possible_issue_types.extend(DEFAULT_CLEANVISION_ISSUES.keys())
+ return possible_issue_types
def list_default_issue_types(self) -> List[str]:
"""Returns a list of the issue types that are run by default
@@ -581,7 +585,10 @@ def list_default_issue_types(self) -> List[str]:
--------
:py:class:`REGISTRY <cleanlab.datalab.internal.issue_manager_factory.REGISTRY>` : All available issue types and their corresponding issue managers can be found here.
"""
- return _list_default_issue_types(task=self.task)
+ default_issue_types = _list_default_issue_types(task=self.task)
+ if self._imagelab is not None:
+ default_issue_types.extend(DEFAULT_CLEANVISION_ISSUES.keys())
+ return default_issue_types
def save(self, path: str, force: bool = False) -> None:
"""Saves this Datalab object to file (all files are in folder at `path/`).
|
diff --git a/tests/datalab/test_cleanvision_integration.py b/tests/datalab/test_cleanvision_integration.py
--- a/tests/datalab/test_cleanvision_integration.py
+++ b/tests/datalab/test_cleanvision_integration.py
@@ -250,3 +250,53 @@ def test_labels_not_required_for_imagelab_issues(
for issue_type in IMAGELAB_ISSUE_TYPES:
assert issue_type in captured.out
+
+ @pytest.fixture
+ def lab(self, image_dataset):
+ lab = Datalab(data=image_dataset, label_name=LABEL_NAME, image_key=IMAGE_NAME)
+ lab.find_issues()
+ return lab
+
+ def test_get_summary(self, lab):
+ summary = lab.get_issue_summary("dark")
+ assert len(summary) == 1
+ num_issues = summary["num_issues"].values[0]
+ assert num_issues == 1
+
+ @pytest.mark.parametrize(
+ "list_method", ["list_possible_issue_types", "list_default_issue_types"]
+ )
+ def test_list_issue_type_method(self, image_dataset, lab, list_method):
+ method = getattr(lab, list_method)
+ issue_types = method()
+
+ # Check that Datalab without Imagelab injected has just a subset of possible/default issue types
+ minimal_lab = Datalab(data=image_dataset)
+ minimal_method = getattr(minimal_lab, list_method)
+ datalab_issue_types = minimal_method()
+ assert set(datalab_issue_types).issubset(set(issue_types))
+
+ # The additional issue types found by method should be the same as IMAGELAB_ISSUE_TYPES
+ assert set(issue_types).difference(datalab_issue_types) == set(IMAGELAB_ISSUE_TYPES)
+
+ @pytest.mark.issue1027
+ def test_get_issues(self, lab):
+ """
+ Test the `get_issues` method of the `lab` object.
+
+ This method checks if the columns returned by the `get_issues` method
+ match the expected columns for each issue type defined in `IMAGELAB_ISSUE_TYPES`.
+
+ Raises:
+ AssertionError: If the columns returned by `get_issues` do not match the expected columns.
+
+ """
+ test_condition = lambda s: set(lab.get_issues(s).columns) == set(
+ [f"{s}_score", f"is_{s}_issue"]
+ )
+ failed_assertions = [
+ issue_type for issue_type in IMAGELAB_ISSUE_TYPES if not test_condition(issue_type)
+ ]
+ assert (
+ len(failed_assertions) == 0
+ ), f"Tests for `get_issues` with these `issue_types` failed: {failed_assertions}"
|
Datalab.get_issues() for cleanvision issue types fails
<!-- Briefly summarize the issue. INCLUDE the exact code you are trying to run! Ideally in a self-contained script so we can reproduce your bug. -->
`Datalab.get_issues()` looks at the list of possible issue types as of #1025, this does not consider cases when an image column name is specified, as this includes Imagelab that looks for image-related issue types.
This led to an error in the CI (see [stack trace](#user-content-stack-trace)), where the image tutorial notebook cannot call `lab.get_issues("dark")` for an image dataset, while "dark" is a valid issue type in the cleanvision package. Datalab can already find "dark" issues, but the validation step in the `get_issues` method considers cleanvision-specific issue types "invalid".
This issue is resolved when:
- `Datalab` can list the issue types it supports in Imagelab when calling `find_issues`(there are a few that are not currently supported).
- A test is added where `lab.get_issues("dark")` should return the expected type of DataFrame (or rather that the expected columns score and issue columns are there).
<h1 id="stack-trace">Stack trace</h1>
<!-- If applicable, please include a full stack trace here. If you need to omit
the bottom of the stack trace (e.g. it includes stack frames from your private
code), that is okay. Try to include all cleanlab stack frames. -->
```shell
Notebook error:
CellExecutionError in tutorials/image.ipynb:
------------------
dark_issues = lab.get_issues("dark")
dark_issues_df = dark_issues.query("is_dark_issue").sort_values("dark_score")
dark_issues_df.head()
------------------
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [26], in <module>
----> 1 dark_issues = lab.get_issues("dark")
2 dark_issues_df = dark_issues.query("is_dark_issue").sort_values("dark_score")
3 dark_issues_df.head()
File ~/work/cleanlab/cleanlab/cleanlab/datalab/datalab.py:519, in Datalab.get_issues(self, issue_name)
517 # Validate issue_name
518 if issue_name is not None and issue_name not in self.list_possible_issue_types():
--> 519 raise ValueError(
520 f"""Invalid issue_name: {issue_name}. Please specify a valid issue_name from the list of possible issue types.
521 Either, specify one of the following: {self.list_possible_issue_types()}
522 or set issue_name as None to get all issue types.
523 """
524 )
525 return self.data_issues.get_issues(issue_name=issue_name)
ValueError: Invalid issue_name: dark. Please specify a valid issue_name from the list of possible issue types.
Either, specify one of the following: ['outlier', 'label', 'near_duplicate', 'non_iid', 'class_imbalance', 'underperforming_group', 'data_valuation', 'null']
or set issue_name as None to get all issue types.
You can ignore this error by setting the following in conf.py:
nbsphinx_allow_errors = True
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.11.8/x64/bin/sphinx-multiversion", line 8, in <module>
sys.exit(main())
^^^^^^
File "/opt/hostedtoolcache/Python/3.11.8/x64/lib/python3.11/site-packages/sphinx_multiversion/main.py", line [338](https://github.com/cleanlab/cleanlab/actions/runs/8028055736/job/21932780898#step:18:339), in main
subprocess.check_call(cmd, cwd=current_cwd)
File "/opt/hostedtoolcache/Python/3.11.8/x64/lib/python3.11/subprocess.py", line 413, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '('/opt/hostedtoolcache/Python/3.11.8/x64/bin/python3', '-R', '-m', 'sphinx', '-D', 'smv_metadata_path=/tmp/tmpnljvr_tm/versions.json', '-D', 'smv_branch_whitelist=master', '-D', 'smv_tag_whitelist=None', '-D', 'smv_current_version=master', '-c', '/home/runner/work/cleanlab/cleanlab/docs/source', '/tmp/tmpnljvr_tm/22aa91c29e81ea930cff74d93df083548fcf1f8a/docs/source', '/home/runner/work/cleanlab/cleanlab/cleanlab-docs/master')' returned non-zero exit status 2.
reading sources... [ 88%] tutorials/image
Error: Process completed with exit code 1.
```
<!-- Please include any other information that could be helpful for debugging. -->
| 2024-02-26T10:46:28 |
|
cleanlab/cleanlab
| 1,079 |
cleanlab__cleanlab-1079
|
[
"1074"
] |
0c5d8cbb0b1ff17a3d3e1a1e5c732b0ec1f4c61a
|
diff --git a/cleanlab/internal/outlier.py b/cleanlab/internal/outlier.py
--- a/cleanlab/internal/outlier.py
+++ b/cleanlab/internal/outlier.py
@@ -18,6 +18,8 @@
Helper functions used internally for outlier detection tasks.
"""
+from typing import Union
+
import numpy as np
@@ -67,9 +69,56 @@ def transform_distances_to_scores(
array([0.88988177, 0.80519832])
"""
# Map ood_features_scores to range 0-1 with 0 = most concerning
- ood_features_scores: np.ndarray = np.exp(-1 * avg_distances / scaling_factor * t)
+ return np.exp(-1 * avg_distances / scaling_factor * t)
+
+
+def correct_precision_errors(
+ scores: np.ndarray,
+ avg_distances: np.ndarray,
+ metric: str,
+ C: int = 100,
+ p: Union[int, None] = None,
+):
+ """
+ Ensure that scores where avg_distances are below the tolerance threshold get a score of one.
+
+ Parameters
+ ----------
+ scores : np.ndarray
+ An array of scores of shape ``(N)``, where N is the number of examples.
+ Each entry represents a score between 0 and 1.
+
+ avg_distances : np.ndarray
+ An array of distances of shape ``(N)``, where N is the number of examples.
+ Each entry represents an example's average distance to its k nearest neighbors.
+
+ metric : str
+ The metric used by the knn algorithm to calculate the distances.
+ It must be 'cosine', 'euclidean' or 'minkowski', otherwise this function does nothing.
+
+ C : int, default=100
+ Multiplier used to increase the tolerance of the acceptable precision differences.
+
+ p : int, default=None
+ This value is only used when metric is 'minkowski'.
+ A ValueError will be raised if metric is 'minkowski' and 'p' was not provided.
+
+ Returns
+ -------
+ fixed_scores : np.ndarray
+ An array of scores of shape ``(N,)`` for N examples with scores between 0 and 1.
+ """
+ if metric == "cosine":
+ tolerance = C * np.finfo(np.float_).epsneg
+ elif metric == "euclidean":
+ tolerance = np.sqrt(C * np.finfo(np.float_).eps)
+ elif metric == "minkowski":
+ if p is None:
+ raise ValueError("When metric is 'minkowski' you must specify the 'p' parameter")
+ tolerance = (C * np.finfo(np.float_).eps) ** (1 / p)
+ else:
+ return scores
- # Set scores to 1 if the average distance is close to 0
- inds = np.isclose(avg_distances, 0)
- ood_features_scores[inds] = 1.0
- return ood_features_scores
+ candidates_mask = avg_distances < tolerance
+ scores[candidates_mask] = 1
+ return scores
diff --git a/cleanlab/outlier.py b/cleanlab/outlier.py
--- a/cleanlab/outlier.py
+++ b/cleanlab/outlier.py
@@ -21,17 +21,19 @@
"""
import warnings
+from typing import Dict, Optional, Tuple, Union
+
import numpy as np
-from cleanlab.count import get_confident_thresholds
-from sklearn.neighbors import NearestNeighbors
from sklearn.exceptions import NotFittedError
-from typing import Optional, Union, Tuple, Dict
+from sklearn.neighbors import NearestNeighbors
+
+from cleanlab.count import get_confident_thresholds
from cleanlab.internal.label_quality_utils import (
_subtract_confident_thresholds,
get_normalized_entropy,
)
from cleanlab.internal.numerics import softmax
-from cleanlab.internal.outlier import transform_distances_to_scores
+from cleanlab.internal.outlier import correct_precision_errors, transform_distances_to_scores
from cleanlab.internal.validation import assert_valid_inputs, labels_to_array
from cleanlab.typing import LabelLike
@@ -470,6 +472,13 @@ def _get_ood_features_scores(
ood_features_scores = transform_distances_to_scores(
avg_knn_distances, t, scaling_factor=scaling_factor
)
+ distance_metric = knn.metric
+ p = None
+ if distance_metric == "minkowski":
+ p = knn.p
+ ood_features_scores = correct_precision_errors(
+ ood_features_scores, avg_knn_distances, knn.metric, p=p
+ )
return (ood_features_scores, knn)
|
diff --git a/tests/test_outlier.py b/tests/test_outlier.py
--- a/tests/test_outlier.py
+++ b/tests/test_outlier.py
@@ -14,19 +14,22 @@
# You should have received a copy of the GNU Affero General Public License
# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.
-from hypothesis import example, settings, strategies as st
-from hypothesis import given
import numpy as np
import pandas as pd
import pytest
-from cleanlab.benchmarking.noise_generation import generate_noise_matrix_from_trace
-from cleanlab.benchmarking.noise_generation import generate_noisy_labels
+from hypothesis import example, given, settings
+from hypothesis import strategies as st
+from sklearn.linear_model import LogisticRegression as LogReg
+from sklearn.neighbors import NearestNeighbors
+
from cleanlab import count, outlier
+from cleanlab.benchmarking.noise_generation import (
+ generate_noise_matrix_from_trace,
+ generate_noisy_labels,
+)
from cleanlab.count import get_confident_thresholds
-from cleanlab.outlier import OutOfDistribution
from cleanlab.internal.label_quality_utils import get_normalized_entropy
-from sklearn.neighbors import NearestNeighbors
-from sklearn.linear_model import LogisticRegression as LogReg
+from cleanlab.outlier import OutOfDistribution
def make_data(
@@ -660,7 +663,7 @@ def test_wrong_info_get_ood_predictions_scores():
@given(
fill_value=st.floats(
min_value=5 * float(np.finfo(np.float_).eps),
- max_value=10,
+ max_value=5,
exclude_min=False,
allow_subnormal=False,
allow_infinity=False,
@@ -674,8 +677,42 @@ def test_scores_for_identical_examples(fill_value, K):
N = 20
features = np.full((N, K), fill_value=fill_value)
- scores = OutOfDistribution().fit_score(features=features)
+ ood = OutOfDistribution()
+ scores = ood.fit_score(features=features, verbose=False)
+
+ # Dataset with only
+ expected_score = np.full(N, 1.0)
+ np.testing.assert_array_equal(
+ scores,
+ expected_score,
+ err_msg=f"The calculated distances were {ood.params['knn'].kneighbors()}",
+ )
+
+
+@given(K=st.integers(min_value=2, max_value=100))
+@settings(deadline=None)
+def test_scores_for_identical_examples_across_rows(K):
+ N = 20
+ fill_value = np.random.random(K)
+ features = np.full((N, K), fill_value=fill_value)
+ ood = OutOfDistribution()
+ scores = ood.fit_score(features=features, verbose=False)
# Dataset with only
expected_score = np.full(N, 1.0)
- np.testing.assert_array_equal(scores, expected_score)
+ np.testing.assert_array_equal(
+ scores,
+ expected_score,
+ err_msg=f"The calculated distances were {ood.params['knn'].kneighbors()}",
+ )
+
+ if K < 4:
+ # This little changes should not affect euclidean calculation
+ features += np.random.random(features.shape) * np.sqrt(np.finfo(np.float_).eps)
+ ood = OutOfDistribution()
+ scores = ood.fit_score(features=features, verbose=False)
+ np.testing.assert_array_equal(
+ scores,
+ expected_score,
+ err_msg=f"The calculated distances were {ood.params['knn'].kneighbors()}",
+ )
|
test_scores_for_identical_examples unit test fails
see logs from here: https://github.com/cleanlab/cleanlab/actions/runs/8455871879/job/23167543959

| 2024-04-01T11:25:23 |
|
cleanlab/cleanlab
| 1,099 |
cleanlab__cleanlab-1099
|
[
"1065"
] |
7b4b0690aec3f23448ea3fade9afe2b3fe8434d2
|
diff --git a/cleanlab/datalab/internal/issue_finder.py b/cleanlab/datalab/internal/issue_finder.py
--- a/cleanlab/datalab/internal/issue_finder.py
+++ b/cleanlab/datalab/internal/issue_finder.py
@@ -41,15 +41,16 @@
)
from cleanlab.datalab.internal.model_outputs import (
MultiClassPredProbs,
- RegressionPredictions,
MultiLabelPredProbs,
+ RegressionPredictions,
)
from cleanlab.datalab.internal.task import Task
if TYPE_CHECKING: # pragma: no cover
- import numpy.typing as npt
from typing import Callable
+ import numpy.typing as npt
+
from cleanlab.datalab.datalab import Datalab
@@ -58,6 +59,7 @@
"outlier": ["pred_probs", "features", "knn_graph"],
"near_duplicate": ["features", "knn_graph"],
"non_iid": ["pred_probs", "features", "knn_graph"],
+ # The underperforming_group issue type requires a pair of inputs: (pred_probs, <any_of_the_other_three>)
"underperforming_group": ["pred_probs", "features", "knn_graph", "cluster_ids"],
"data_valuation": ["features", "knn_graph"],
"class_imbalance": [],
@@ -482,4 +484,16 @@ def get_available_issue_types(self, **kwargs):
if drop_class_imbalance_check:
issue_types_copy.pop("class_imbalance")
+ required_pairs_for_underperforming_group = [
+ ("pred_probs", "features"),
+ ("pred_probs", "knn_graph"),
+ ("pred_probs", "cluster_ids"),
+ ]
+ drop_underperforming_group_check = "underperforming_group" in issue_types_copy and not any(
+ all(key in kwargs and kwargs.get(key) is not None for key in pair)
+ for pair in required_pairs_for_underperforming_group
+ )
+ if drop_underperforming_group_check:
+ issue_types_copy.pop("underperforming_group")
+
return issue_types_copy
|
diff --git a/tests/datalab/test_issue_finder.py b/tests/datalab/test_issue_finder.py
--- a/tests/datalab/test_issue_finder.py
+++ b/tests/datalab/test_issue_finder.py
@@ -1,9 +1,8 @@
-import pytest
import numpy as np
-
-from cleanlab.datalab.internal.issue_finder import IssueFinder
+import pytest
from cleanlab import Datalab
+from cleanlab.datalab.internal.issue_finder import IssueFinder
from cleanlab.datalab.internal.task import Task
@@ -25,6 +24,101 @@ def issue_finder(self, lab):
def test_init(self, issue_finder):
assert issue_finder.verbosity == 1
+ @pytest.mark.parametrize("key", ["pred_probs", "features", "knn_graph"])
+ def test_get_available_issue_types_no_kwargs(self, issue_finder, key):
+ expected_issue_types = {"class_imbalance": {}}
+ issue_types = issue_finder.get_available_issue_types(**{key: None})
+ assert (
+ issue_types == expected_issue_types
+ ), "Only class_imbalance issue type for classification requires no kwargs"
+
+ @pytest.mark.parametrize(
+ "issue_types",
+ [
+ {"label": {}},
+ {"label": {"some_arg": "some_value"}},
+ {"label": {"some_arg": "some_value"}, "outlier": {}},
+ {"label": {}, "outlier": {}, "some_issue_type": {"some_arg": "some_value"}},
+ {},
+ ],
+ )
+ def test_get_available_issue_types_with_issue_types(self, issue_finder, issue_types):
+ available_issue_types = issue_finder.get_available_issue_types(issue_types=issue_types)
+ assert (
+ available_issue_types == issue_types
+ ), f"Failed to get available issue types with issue_types={issue_types}"
+
+ @pytest.mark.parametrize(
+ "keys, should_contain_underperforming_group",
+ [
+ # Test cases where 'pred_probs' is not provided, should all give False
+ (["features"], False),
+ (["knn_graph"], False),
+ (["cluster_ids"], False),
+ (["features", "knn_graph"], False),
+ (["features", "cluster_ids"], False),
+ (["knn_graph", "cluster_ids"], False),
+ (["features", "knn_graph", "cluster_ids"], False),
+ # Test cases where 'pred_probs' is provided should all give True
+ (["pred_probs", "features"], True),
+ (["pred_probs", "knn_graph"], True),
+ (["pred_probs", "cluster_ids"], True),
+ (["pred_probs", "features", "knn_graph"], True),
+ (["pred_probs", "features", "cluster_ids"], True),
+ (["pred_probs", "knn_graph", "cluster_ids"], True),
+ (["pred_probs", "features", "knn_graph", "cluster_ids"], True),
+ # only if other required keys are provided
+ (["pred_probs"], False),
+ ],
+ ids=lambda v: (
+ f"keys={v} "
+ if isinstance(v, list)
+ else ("> available" if v is True else "> unavailable")
+ ),
+ )
+ # Some warnings about preferring cluster_ids over knn_graph, or knn_graph over features can be ignored
+ @pytest.mark.filterwarnings(r"ignore:.*will (likely )?prefer.*:UserWarning")
+ # No other warnings should be allowed
+ @pytest.mark.filterwarnings("error")
+ def test_underperforming_group_availability_issue_1065(
+ self, issue_finder, keys, should_contain_underperforming_group
+ ):
+ """
+ Tests the availability of the 'underperforming_group' issue type based on the presence of 'pred_probs' and other required keys in the supplied arguments.
+
+ This test addresses issue #1065, where the mapping that decides which issue types to run based on the supplied arguments is incorrect.
+ Specifically, the 'underperforming_group' check should only be executed if 'pred_probs' and another required key are included in the supplied arguments.
+ See: https://github.com/cleanlab/cleanlab/issues/1065.
+
+ Parameters
+ ----------
+ keys : list
+ A list of keys to be included in the kwargs.
+ should_contain_underperforming_group : bool
+ A flag indicating whether the 'underperforming_group' issue type should be present in the available issue types.
+
+ Scenarios
+ ---------
+ Various combinations of 'features', 'pred_probs', 'knn_graph', and 'cluster_ids' are tested.
+
+ Asserts
+ -------
+ Ensures 'underperforming_group' is in the available issue types if 'pred_probs' and another required key are provided.
+ Ensures 'underperforming_group' is not in the available issue types if the required conditions are not met.
+ """
+ mock_value = object() # Mock value to simulate presence of the required keys
+ kwargs = {key: mock_value for key in keys}
+
+ available_issue_types = issue_finder.get_available_issue_types(**kwargs)
+ if should_contain_underperforming_group:
+ assert (
+ "underperforming_group" in available_issue_types
+ ), "underperforming_group should be available if 'pred_probs' and another required key are provided"
+ else:
+ assert (
+ "underperforming_group" not in available_issue_types
+ ), "underperforming_group should not be available if the required conditions are not met"
+
def test_get_available_issue_types(self, issue_finder):
expected_issue_types = {"class_imbalance": {}}
# Test with no kwargs, no issue type expected to be returned
@@ -47,6 +141,21 @@ def test_get_available_issue_types(self, issue_finder):
fail_msg = f"Failed to get available issue types with issue_types={issue_types}"
assert available_issue_types == issue_types, fail_msg
+ ## Test availability of underperforming_group issue type
+ only_features_available = {"features": np.random.random((10, 2))}
+ available_issue_types = issue_finder.get_available_issue_types(**only_features_available)
+ fail_msg = "underperforming_group should not be available if 'pred_probs' is not provided"
+ assert "underperforming_group" not in available_issue_types, fail_msg
+ features_and_pred_probs_available = {
+ **only_features_available,
+ "pred_probs": np.random.random((10, 2)),
+ }
+ available_issue_types = issue_finder.get_available_issue_types(
+ **features_and_pred_probs_available
+ )
+ fail_msg = "underperforming_group should be available if 'pred_probs' is provided"
+ assert "underperforming_group" in available_issue_types, fail_msg
+
def test_find_issues(self, issue_finder, lab):
N = len(lab.data)
K = lab.get_info("statistics")["num_classes"]
|
lab.find_issues(features=features) outputs error for underperforming issue
lab.find_issues(features=features) output
```
[/Users/sanjana/cleanlab_home/fork_cleanlab/cleanlab/datalab/internal/issue_finder.py:457](https://file+.vscode-resource.vscode-cdn.net/Users/sanjana/cleanlab_home/fork_cleanlab/cleanlab/datalab/internal/issue_finder.py:457): UserWarning: No labels were provided. The 'label' issue type will not be run.
warnings.warn("No labels were provided. " "The 'label' issue type will not be run.")
Finding null issues ...
Finding outlier issues ...
Fitting OOD estimator based on provided features ...
Finding near_duplicate issues ...
Finding non_iid issues ...
Finding underperforming_group issues ...
Error in underperforming_group: UnderperformingGroupIssueManager.find_issues() missing 1 required positional argument: 'pred_probs'
Failed to check for these issue types: [UnderperformingGroupIssueManager]
Audit complete. 984 issues found in the dataset.
```
Dataset: https://www.kaggle.com/datasets/laotse/credit-risk-dataset/data
Code
```python
import pandas as pd
from cleanlab import Datalab
from sklearn.preprocessing import StandardScaler
import numpy as np
df = pd.read_csv("./credit_risk_dataset.csv")
df = df[~df.isnull().any(axis=1)].copy()
feature_columns = df.columns.to_list()
feature_columns.remove("loan_status")
X_raw = df[feature_columns]
labels = df["loan_status"]
cat_features = [
"person_home_ownership",
"loan_intent",
"loan_grade",
"cb_person_default_on_file",
]
numeric_features = [
"person_age",
"person_income",
"person_emp_length",
"loan_amnt",
"loan_int_rate",
"loan_percent_income",
"cb_person_cred_hist_length",
]
X_encoded = pd.get_dummies(X_raw, columns=cat_features, drop_first=True, dtype='float')
scaler = StandardScaler()
X_processed = X_encoded.copy()
X_processed[numeric_features] = scaler.fit_transform(X_encoded[numeric_features])
lab = Datalab({"X": X_processed.to_numpy(), "y": labels})
lab.find_issues(features=X_processed.to_numpy())
```
|
@elisno seems like the mapping that decides what issue-types to run based on the supplied args is off. The Underperforming group check should only run if pred_probs were included in the supplied args.
| 2024-04-14T19:19:27 |
djangopackages/djangopackages
| 154 |
djangopackages__djangopackages-154
|
[
"152"
] |
8e6d27a09e637beee360476de55320160a247917
|
diff --git a/apiv1/resources.py b/apiv1/resources.py
--- a/apiv1/resources.py
+++ b/apiv1/resources.py
@@ -10,7 +10,7 @@
from tastypie import fields
from tastypie.bundle import Bundle
from tastypie.exceptions import NotFound
-from tastypie.resources import ModelResource
+from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from grid.models import Grid
from homepage.models import Dpotw, Gotw
@@ -209,3 +209,6 @@ class Meta:
allowed_methods = ['get']
include_absolute_url = True
lookup_field = 'slug'
+ filtering = {
+ "category": ALL_WITH_RELATIONS
+ }
diff --git a/urls.py b/urls.py
--- a/urls.py
+++ b/urls.py
@@ -43,12 +43,10 @@
from apiv1.resources import (
GotwResource, DpotwResource,
PackageResource, CategoryResource,
- GridResource, PackageResourceBase,
- UserResource
+ GridResource, UserResource
)
v1_api = Api()
-v1_api.register(PackageResourceBase())
v1_api.register(PackageResource())
v1_api.register(CategoryResource())
v1_api.register(GridResource())
|
diff --git a/apiv1/tests/test_package.py b/apiv1/tests/test_package.py
--- a/apiv1/tests/test_package.py
+++ b/apiv1/tests/test_package.py
@@ -4,6 +4,7 @@
from grid.models import Grid, GridPackage
from package.models import Package, Category
import json
+import urllib
class PackageV1Tests(TestCase):
@@ -12,10 +13,14 @@ def setUp(self):
Set up initial data, done through Python because fixtures break way too
quickly with migrations and are terribly hard to maintain.
"""
- app = Category.objects.create(
+ self.app = Category.objects.create(
title='App',
slug='app',
)
+ self.framework = Category.objects.create(
+ title='Framework',
+ slug='framework',
+ )
self.grid = Grid.objects.create(
title='A Grid',
slug='grid',
@@ -23,15 +28,21 @@ def setUp(self):
self.pkg1 = Package.objects.create(
title='Package1',
slug='package1',
- category=app,
+ category=self.app,
repo_url='https://github.com/pydanny/django-uni-form'
)
self.pkg2 = Package.objects.create(
title='Package2',
slug='package2',
- category=app,
+ category=self.app,
repo_url='https://github.com/cartwheelweb/packaginator'
)
+ self.pkg3 = Package.objects.create(
+ title='Package3',
+ slug='package3',
+ category=self.framework,
+ repo_url='https://github.com/divio/django-cms'
+ )
GridPackage.objects.create(package=self.pkg1, grid=self.grid)
GridPackage.objects.create(package=self.pkg2, grid=self.grid)
user = User.objects.create_user('user', '[email protected]', 'user')
@@ -67,4 +78,27 @@ def test_01_packages_usage(self):
raw_json_pkg2 = response_pkg2.content
pkg_2 = json.loads(raw_json_pkg2)
usage_count_pkg2 = int(pkg_2['usage_count'])
- self.assertEqual(usage_count_pkg2, self.pkg2.usage.count())
\ No newline at end of file
+ self.assertEqual(usage_count_pkg2, self.pkg2.usage.count())
+
+ def test_02_category_packages(self):
+ urlkwargs_pkg_list = {
+ 'api_name': 'v1',
+ 'resource_name': 'package',
+ }
+ querystring_filter_app = {
+ 'category__slug': self.app.slug
+ }
+ url_app_pkg = "%s?%s" % (reverse('api_dispatch_list',
+ kwargs=urlkwargs_pkg_list), urllib.urlencode(querystring_filter_app))
+ response_app_pkg = self.client.get(url_app_pkg)
+ # check that the request was successful
+ self.assertEqual(response_app_pkg.status_code, 200)
+ # check that we have correct number of packages in filter
+ raw_json_app_pkg = response_app_pkg.content
+ app_pkg = json.loads(raw_json_app_pkg)
+ app_pkg_count = int(app_pkg['meta']['total_count'])
+ self.assertEqual(app_pkg_count, self.app.package_set.count())
+ # Check that we have filter applied correclty
+ app_package_slug_list = self.app.package_set.values_list('slug', flat=True)
+ self.assertIn(self.pkg1.slug, app_package_slug_list)
+ self.assertIn(self.pkg2.slug, app_package_slug_list)
diff --git a/apiv1/tests/test_resources.py b/apiv1/tests/test_resources.py
--- a/apiv1/tests/test_resources.py
+++ b/apiv1/tests/test_resources.py
@@ -21,6 +21,9 @@ def test_01_category(self):
self.assertTrue(cat_url in response.content)
response = self.client.get(cat_url)
self.assertEqual(response.status_code, 200)
+ query_filter = "?category__slug=apps"
+ cat_filter_url = "%s%s" % (list_url, query_filter)
+ self.assertEqual(response.status_code, 200)
def test_02_grid(self):
kwargs = {'resource_name': 'grid'}
@@ -34,3 +37,16 @@ def test_02_grid(self):
self.assertTrue(grid_url in response.content)
response = self.client.get(grid_url)
self.assertEqual(response.status_code, 200)
+
+ def test_03_package(self):
+ kwargs = {'resource_name': 'package'}
+ kwargs.update(self.base_kwargs)
+ # check 200's
+ list_url = reverse('api_dispatch_list', kwargs=kwargs)
+ response = self.client.get(list_url)
+ self.assertEqual(response.status_code, 200)
+ kwargs['pk'] = 'testability'
+ package_url = reverse('api_dispatch_detail', kwargs=kwargs)
+ self.assertTrue(package_url in response.content)
+ response = self.client.get(package_url)
+ self.assertEqual(response.status_code, 200)
|
Packages API should support filtering
Right now there is no way to get list of packages of a specific category. _Category_ resource does not contain list of resource_url of packages just like the _Grid_ resource does and there is no way to apply a filter on _Package_ resource.
Ideally the _Category_ resource should contain a list of packages like the _Grid_ resource does:
```
packages: [
"/api/v1/package/django-cms/"
"/api/v1/package/django-page-cms/"
"/api/v1/package/django-lfc/"
"/api/v1/package/merengue/"
"/api/v1/package/mezzanine/"
"/api/v1/package/philo/"
"/api/v1/package/pylucid/"
"/api/v1/package/django-gitcms/"
"/api/v1/package/django-simplepages/"
"/api/v1/package/djpcms/"
"/api/v1/package/feincms/"
]
```
Furthermore, there should be a filter on _Package_ resource like this:
`/package/?category=apps`
|
I am attempting to fix this myself.
| 2012-04-10T12:55:07 |
djangopackages/djangopackages
| 160 |
djangopackages__djangopackages-160
|
[
"152"
] |
9370a53405046b7ea48e9012f9c3aee450551230
|
diff --git a/settings/base.py b/settings/base.py
--- a/settings/base.py
+++ b/settings/base.py
@@ -200,18 +200,6 @@
'django.template.loaders.app_directories.Loader',
)
-#TEST_RUNNER = 'testrunner.OurTestRunner'
-TEST_RUNNER = 'testrunner.OurCoverageRunner'
-
-COVERAGE_MODULE_EXCLUDES = [
- 'tests$', 'settings$', 'urls$', 'locale$',
- 'migrations', 'fixtures', 'big_email_send$',
- 'load_dev_data$', 'fix_grid_element$',
- 'package_updater$', 'searchv2_build$'
-]
-COVERAGE_MODULE_EXCLUDES += PREREQ_APPS + ["djkombu", ]
-COVERAGE_REPORT_HTML_OUTPUT_DIR = "coverage"
-
PACKAGINATOR_HELP_TEXT = {
"REPO_URL": "Enter your project repo hosting URL here.<br />Example: https://github.com/opencomparison/opencomparison",
"PYPI_URL": "<strong>Leave this blank if this package does not have a PyPI release.</strong><br />What PyPI uses to index your package. <br />Example: django-uni-form",
diff --git a/settings/travis.py b/settings/travis.py
deleted file mode 100644
--- a/settings/travis.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-"""TravisCI settings which allow us to run our test suite on the TravisCI
-continuous integration service.
-"""
-
-
-from settings.base import *
-
-
-DATABASES = {
- "default": {
- "ENGINE": "django.db.backends.sqlite3",
- "NAME": ":memory:",
- "USER": "",
- "PASSWORD": "",
- "HOST": "",
- "PORT": "",
- },
-}
|
diff --git a/docs/testing_instructions.rst b/docs/testing_instructions.rst
--- a/docs/testing_instructions.rst
+++ b/docs/testing_instructions.rst
@@ -8,8 +8,8 @@ Running the test suite
To run all of the Packaginator tests::
- python manage.py test --settings.base
+ python manage.py test --settings.test
To run tests for a particular Packaginator app, for example the feeds app::
- python manage.py test feeds --settings.base
\ No newline at end of file
+ python manage.py test feeds --settings.test
diff --git a/settings/test.py b/settings/test.py
new file mode 100644
--- /dev/null
+++ b/settings/test.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+"""Local test settings and globals which allows us to run our test suite
+locally.
+"""
+
+
+from settings.base import *
+
+
+########## DEBUG
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+SERVE_MEDIA = DEBUG
+
+
+########## TEST
+TEST_RUNNER = 'testrunner.OurCoverageRunner'
+
+COVERAGE_MODULE_EXCLUDES = [
+ 'tests$', 'settings$', 'urls$', 'locale$',
+ 'migrations', 'fixtures', 'big_email_send$',
+ 'load_dev_data$', 'fix_grid_element$',
+ 'package_updater$', 'searchv2_build$'
+]
+COVERAGE_MODULE_EXCLUDES += PREREQ_APPS + ["djkombu", ]
+COVERAGE_REPORT_HTML_OUTPUT_DIR = "coverage"
+
+
+########## DATABASES
+DATABASES = {
+ "default": {
+ "ENGINE": "django.db.backends.sqlite3",
+ "NAME": ":memory:",
+ "USER": "",
+ "PASSWORD": "",
+ "HOST": "",
+ "PORT": "",
+ },
+}
|
Packages API should support filtering
Right now there is no way to get list of packages of a specific category. _Category_ resource does not contain list of resource_url of packages just like the _Grid_ resource does and there is no way to apply a filter on _Package_ resource.
Ideally the _Category_ resource should contain a list of packages like the _Grid_ resource does:
```
packages: [
"/api/v1/package/django-cms/"
"/api/v1/package/django-page-cms/"
"/api/v1/package/django-lfc/"
"/api/v1/package/merengue/"
"/api/v1/package/mezzanine/"
"/api/v1/package/philo/"
"/api/v1/package/pylucid/"
"/api/v1/package/django-gitcms/"
"/api/v1/package/django-simplepages/"
"/api/v1/package/djpcms/"
"/api/v1/package/feincms/"
]
```
Furthermore, there should be a filter on _Package_ resource like this:
`/package/?category=apps`
|
I am attempting to fix this myself.
| 2012-05-13T00:25:02 |
djangopackages/djangopackages
| 267 |
djangopackages__djangopackages-267
|
[
"242"
] |
3cdd075f3b79e41b866d7b17c993833b43e3e96b
|
diff --git a/settings/heroku.py b/settings/heroku.py
--- a/settings/heroku.py
+++ b/settings/heroku.py
@@ -168,3 +168,14 @@
)
########## end templates
+
+#-------------------
+# appenlight-client
+#------------------
+
+import appenlight_client.client as e_client
+APPENLIGHT = e_client.get_config({'appenlight.api_key': os.environ.get('APPENLIGHT_KEY', '')})
+
+MIDDLEWARE_CLASSES += (
+ 'appenlight_client.django_middleware.AppenlightMiddleware',
+)
\ No newline at end of file
|
Add errormator.com
|
https://errormator.com/page/python/django-exception-logging
| 2014-02-07T17:59:38 |
|
djangopackages/djangopackages
| 269 |
djangopackages__djangopackages-269
|
[
"268"
] |
3cdd075f3b79e41b866d7b17c993833b43e3e96b
|
diff --git a/profiles/forms.py b/profiles/forms.py
--- a/profiles/forms.py
+++ b/profiles/forms.py
@@ -2,16 +2,33 @@
from profiles.models import Profile
+from crispy_forms.helper import FormHelper
+from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, HTML
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
- class Meta:
+ self.helper = FormHelper()
+ self.helper.form_action = 'profile_edit'
+ self.helper.layout = Layout(
+ Fieldset(
+ '',
+ HTML("""
+ <p>Github account, <strong>{{ profile.github_account }}</strong></p>
+ """),
+ 'bitbucket_url',
+ 'google_code_url',
+ ),
+ ButtonHolder(
+ Submit('edit', 'Edit', css_class='btn btn-default')
+ )
+ )
+ class Meta:
fields = (
- 'bitbucket_url',
- 'google_code_url',
- )
+ 'bitbucket_url',
+ 'google_code_url',
+ )
model = Profile
diff --git a/profiles/urls.py b/profiles/urls.py
--- a/profiles/urls.py
+++ b/profiles/urls.py
@@ -3,7 +3,11 @@
from profiles import views
urlpatterns = patterns("",
- url(r"^edit/$", views.profile_edit, name="profile_edit"),
+ url(
+ regex=r"^edit/$",
+ view=views.ProfileEditUpdateView.as_view(),
+ name="profile_edit"
+ ),
url(r"^$", views.profile_list, name="profile_list"),
url(r"^(?P<github_account>[-\w]+)/$", views.profile_detail, name="profile_detail"),
)
diff --git a/profiles/views.py b/profiles/views.py
--- a/profiles/views.py
+++ b/profiles/views.py
@@ -1,12 +1,11 @@
-from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
+from django.views.generic.edit import UpdateView
-from crispy_forms.helper import FormHelper
-from crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, HTML
+from braces.views import LoginRequiredMixin
from social_auth.signals import pre_update
from social_auth.backends.contrib.github import GithubBackend
@@ -36,45 +35,18 @@ def profile_list(request, template_name="profiles/profiles.html"):
})
-@login_required
-def profile_edit(request, template_name="profiles/profile_edit.html"):
+class ProfileEditUpdateView(LoginRequiredMixin, UpdateView):
+ model = Profile
+ form_class = ProfileForm
+ template_name = "profiles/profile_edit.html"
- profile = request.user.get_profile()
- form = ProfileForm(request.POST or None, instance=profile)
+ def get_object(self):
+ return self.request.user.get_profile()
- if form.is_valid():
+ def form_valid(self, form):
form.save()
- msg = 'Profile edited'
- messages.add_message(request, messages.INFO, msg)
- return HttpResponseRedirect(reverse("profile_detail", kwargs={"github_account": profile.github_account}))
-
- # TODO - move this to a template
- github_account = """
- <div
- id="div_id_github_account"
- class="ctrlHolder"><label for="id_github_account" >Github account</label><strong>{0}</strong></div>
- """.format(profile.github_account)
-
- helper = FormHelper()
- helper.form_class = "profile-edit-form"
- helper.layout = Layout(
- Fieldset(
- '',
- HTML(github_account),
- 'bitbucket_url',
- 'google_code_url',
- ),
- ButtonHolder(
- Submit('edit', 'Edit', css_class="btn btn-default"),
- )
- )
-
- return render(request, template_name,
- {
- "profile": profile,
- "form": form,
- "helper": helper,
- })
+ messages.add_message(self.request, messages.INFO, "Profile Saved")
+ return HttpResponseRedirect(reverse("profile_detail", kwargs={"github_account": self.get_object()}))
def github_user_update(sender, user, response, details, **kwargs):
|
Cannot save profile edit form
On https://www.djangopackages.com/profiles/edit/ clicking the Edit button does nothing. Tested on Linux with Firefox and Chromium.
| 2014-02-12T18:48:51 |
||
djangopackages/djangopackages
| 302 |
djangopackages__djangopackages-302
|
[
"295"
] |
588dbc458d636ea614f54f9f7c4bb7fe0be499d0
|
diff --git a/grid/views.py b/grid/views.py
--- a/grid/views.py
+++ b/grid/views.py
@@ -68,7 +68,7 @@ def grid_detail_landscape(request, slug, template_name="grid/grid_detail2.html")
('pypi_version', 'Version'),
('repo', 'Repo'),
('commits_over_52', 'Commits'),
- ('repo_watchers', 'Repo watchers'),
+ ('repo_watchers', 'Stars'),
('repo_forks', 'Forks'),
('participant_list', 'Participants'),
('license_latest', 'License')
@@ -350,7 +350,7 @@ def grid_detail(request, slug, template_name="grid/grid_detail.html"):
('pypi_version', 'Version'),
('repo', 'Repo'),
('commits_over_52', 'Commits'),
- ('repo_watchers', 'Repo watchers'),
+ ('repo_watchers', 'Stars'),
('repo_forks', 'Forks'),
('participant_list', 'Participants'),
('license_latest', 'License')
diff --git a/package/models.py b/package/models.py
--- a/package/models.py
+++ b/package/models.py
@@ -54,7 +54,7 @@ class Package(BaseModel):
category = models.ForeignKey(Category, verbose_name="Installation")
repo_description = models.TextField(_("Repo Description"), blank=True)
repo_url = models.URLField(_("repo URL"), help_text=repo_url_help_text, blank=True, unique=True, verify_exists=True)
- repo_watchers = models.IntegerField(_("repo watchers"), default=0)
+ repo_watchers = models.IntegerField(_("Stars"), default=0)
repo_forks = models.IntegerField(_("repo forks"), default=0)
pypi_url = models.URLField(_("PyPI slug"), help_text=pypi_url_help_text, blank=True, default='', verify_exists=True)
pypi_downloads = models.IntegerField(_("Pypi downloads"), default=0)
diff --git a/searchv2/models.py b/searchv2/models.py
--- a/searchv2/models.py
+++ b/searchv2/models.py
@@ -21,7 +21,7 @@ class SearchV2(BaseModel):
grids
pacakges
categories
- number of watchers
+ stars
number of forks
last repo commit
last release on PyPI
@@ -37,7 +37,7 @@ class SearchV2(BaseModel):
description = models.TextField(_("Repo Description"), blank=True)
category = models.CharField(_("Category"), blank=True, max_length=50)
absolute_url = models.CharField(_("Absolute URL"), max_length="255")
- repo_watchers = models.IntegerField(_("repo watchers"), default=0)
+ repo_watchers = models.IntegerField(_("Stars"), default=0)
repo_forks = models.IntegerField(_("repo forks"), default=0)
pypi_downloads = models.IntegerField(_("Pypi downloads"), default=0)
usage = models.IntegerField(_("Number of users"), default=0)
|
Change watchers to stars
e.g. "Repo Watchers"
|
Just on the templates and translation files?
| 2014-11-12T00:13:35 |
|
djangopackages/djangopackages
| 560 |
djangopackages__djangopackages-560
|
[
"534"
] |
5b4924daa85087c2669f73e923a3c6da7bbb85fd
|
diff --git a/core/management/commands/load_dev_data.py b/core/management/commands/load_dev_data.py
--- a/core/management/commands/load_dev_data.py
+++ b/core/management/commands/load_dev_data.py
@@ -1,16 +1,16 @@
from sys import stdout
+from importlib import import_module
from django.conf import settings
-from django.core.management.base import CommandError, NoArgsCommand
-from django.utils.importlib import import_module
+from django.core.management.base import BaseCommand
from django.utils.module_loading import module_has_submodule
-class Command(NoArgsCommand):
-
+class Command(BaseCommand):
+
help = "Import development data for local dev"
-
- def handle(self, *args, **options):
+
+ def handle(self, *args, **options):
print("Commencing dev data import", file=stdout)
for app in settings.INSTALLED_APPS:
|
ImportError: cannot import name 'NoArgsCommand'
When I run the `load_dev_data` django mgmt command, the following error occurs:
```
$ docker-compose -f dev.yml run django python manage.py load_dev_data
Starting djangopackages_postgres_1 ... done
Starting djangopackages_redis_1 ... done
Postgres is up - continuing...
/src/django-floppyforms/floppyforms/__init__.py:21: UserWarning: Unable to import floppyforms.gis, geometry widgets not available
"Unable to import floppyforms.gis, geometry widgets not available")
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 206, in fetch_command
klass = load_command_class(app_name, subcommand)
File "/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py", line 40, in load_command_class
module = import_module('%s.management.commands.%s' % (app_name, name))
File "/usr/local/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/app/core/management/commands/load_dev_data.py", line 4, in <module>
from django.core.management.base import CommandError, NoArgsCommand
ImportError: cannot import name 'NoArgsCommand'
```
| 2019-10-30T07:22:28 |
||
djangopackages/djangopackages
| 620 |
djangopackages__djangopackages-620
|
[
"278"
] |
e21156beab59fbfb902364d71d1ef827ac082b77
|
diff --git a/package/repos/gitlab.py b/package/repos/gitlab.py
new file mode 100644
--- /dev/null
+++ b/package/repos/gitlab.py
@@ -0,0 +1,64 @@
+import re
+
+from django.conf import settings
+from gitlab import Gitlab
+
+from .base_handler import BaseHandler
+
+
+class GitlabHandler(BaseHandler):
+ title = "Github"
+ url_regex = "(http|https|git)://gitlab.com/"
+ url = "https://gitlab.com"
+ repo_regex = r"(?:http|https|git)://gitlab.com/[^/]*/([^/]*)"
+ slug_regex = repo_regex
+ gitlab: Gitlab
+
+ def __init__(self):
+ if settings.GITLAB_TOKEN:
+ self.gitlab = Gitlab(self.url, private_token=settings.GITLAB_TOKEN)
+ else:
+ self.gitlab = Gitlab(self.url)
+
+ def _get_repo(self, repo_url):
+ path = repo_url.replace(f"{self.url}/", "")
+ return self.gitlab.projects.get(path)
+
+ def fetch_metadata(self, package):
+ repo = self._get_repo(package.repo_url)
+ if repo is None:
+ return package
+
+ package.repo_watchers = repo.star_count
+ package.repo_forks = repo.forks_count
+ package.repo_description = repo.description
+
+ return package
+
+ def fetch_commits(self, package):
+
+ repo = self._get_repo(package.repo_url)
+ if repo is None:
+ return package
+
+ from package.models import Commit # Added here to avoid circular imports
+
+ for commit in repo.commits.list(as_list=False):
+ try:
+ commit_record, created = Commit.objects.get_or_create(
+ package=package,
+ commit_date=commit.committed_date,
+ commit_hash=commit.id,
+ )
+ if not created:
+ break
+ except Commit.MultipleObjectsReturned:
+ continue
+ # If the commit record already exists, it means we are at the end of the
+ # list we want to import
+
+ package.save()
+ return package
+
+
+repo_handler = GitlabHandler()
diff --git a/settings/base.py b/settings/base.py
--- a/settings/base.py
+++ b/settings/base.py
@@ -234,7 +234,7 @@
if LOCAL_INSTALLED_APPS:
INSTALLED_APPS.extend(LOCAL_INSTALLED_APPS)
-SUPPORTED_REPO.extend(["bitbucket", "github"])
+SUPPORTED_REPO.extend(["bitbucket", "github", "gitlab"])
AUTHENTICATION_BACKENDS = (
@@ -408,6 +408,8 @@
GITHUB_APP_ID = environ.get("GITHUB_APP_ID")
GITHUB_TOKEN = environ.get("GITHUB_TOKEN")
+GITLAB_TOKEN = environ.get('GITLAB_TOKEN')
+
########### SEKURITY
ALLOWED_HOSTS = ["*"]
|
diff --git a/package/tests/test_repos.py b/package/tests/test_repos.py
--- a/package/tests/test_repos.py
+++ b/package/tests/test_repos.py
@@ -233,6 +233,16 @@ def setUp(self):
# self.assertEqual(self.package.repo_watchers, 0)
# self.package.fetch_commits()
+class TestGitlabRepo(TestBaseHandler):
+ def setUp(self):
+ super(TestGitlabRepo, self).setUp()
+ self.package = Package.objects.create(
+ title="Django",
+ slug="django",
+ repo_url="https://gitlab.com/delta10/kees",
+ category=self.category
+ )
+
class TestRepos(BaseBase):
def test_repo_registry(self):
|
Add gitlab as a repo option
See https://pypi.python.org/pypi/pyapi-gitlab/6.2.3
| 2021-07-27T15:32:22 |
|
djangopackages/djangopackages
| 639 |
djangopackages__djangopackages-639
|
[
"638"
] |
ea080a17560bfefa73fb43bc577f63b4aeb2299b
|
diff --git a/apiv4/serializers.py b/apiv4/serializers.py
--- a/apiv4/serializers.py
+++ b/apiv4/serializers.py
@@ -11,12 +11,24 @@
from package.models import Package, Category
from searchv2.models import SearchV2
+
class GridSerializer(serializers.ModelSerializer):
packages = serializers.HyperlinkedRelatedField(many=True, view_name='apiv4:package-detail', read_only=True)
class Meta:
+ fields = [
+ "title",
+ "slug",
+ "description",
+ "is_locked",
+ "packages",
+ "header",
+ "created",
+ "modified",
+ ]
model = Grid
+
class PackageSerializer(serializers.HyperlinkedModelSerializer):
# 'Source' is attached to the model attribute
participants = serializers.ListField(source='participant_list')
@@ -124,4 +136,13 @@ class Meta:
class CategorySerializer(serializers.ModelSerializer):
class Meta:
+ fields = [
+ "title",
+ "slug",
+ "description",
+ "title_plural",
+ "show_pypi",
+ "created",
+ "modified",
+ ]
model = Category
|
🐛 AssertionError /api/v4/grids/
This is coming from sentry:
```
("Creating a ModelSerializer without either the 'fields' attribute or the 'exclude' attribute has been deprecated since 3.3.0, and is now disallowed. Add an explicit fields = '__all__' to the GridSerializer serializer.",)
```
| 2021-08-06T13:54:07 |
||
djangopackages/djangopackages
| 640 |
djangopackages__djangopackages-640
|
[
"637"
] |
1e2b8c0f1293e4d22fbd617f19247237926d8968
|
diff --git a/settings/base.py b/settings/base.py
--- a/settings/base.py
+++ b/settings/base.py
@@ -78,6 +78,7 @@
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"dj_pagination.middleware.PaginationMiddleware",
+ "waffle.middleware.WaffleMiddleware",
]
TEMPLATES = [
@@ -149,12 +150,12 @@
"crispy_forms",
"dj_pagination",
"django_extensions",
- "reversion",
- "webstack_django_sorting",
- # "django_modeler",
- "social_django",
"floppyforms",
"rest_framework",
+ "reversion",
+ "social_django",
+ "waffle",
+ "webstack_django_sorting",
]
INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS
|
🔌 Add a feature flipper
Judging from our grids, https://github.com/django-waffle/django-waffle is probably not a bad way to go.
| 2021-08-06T15:12:26 |
||
djangopackages/djangopackages
| 674 |
djangopackages__djangopackages-674
|
[
"641"
] |
612f746421910812a64b9d2f440eafeec7b31933
|
diff --git a/core/__init__.py b/core/__init__.py
--- a/core/__init__.py
+++ b/core/__init__.py
@@ -0,0 +1 @@
+__version__ = "2021.8.1"
diff --git a/urls.py b/urls.py
--- a/urls.py
+++ b/urls.py
@@ -5,12 +5,16 @@
from django.urls import path, re_path
from django.views.generic.base import TemplateView
+from core import __version__
from core.apiv1 import apiv1_gone
from homepage.views import homepage, error_404_view, error_500_view, health_check_view, SitemapView
from package.views import category, python3_list
from profiles.views import LogoutView
-admin.autodiscover()
+admin_header = f"Django Packages v{__version__}"
+# admin.site.enable_nav_sidebar = False # disabled until Django 3.x
+admin.site.site_header = admin_header
+admin.site.site_title = admin_header
urlpatterns = [
|
🔢 Add some type of SemVer/CalVer versioning scheme to the project
It's easier to troubleshoot if we can track a version through from testing through production.
My goto these days is CalVer, but I'm open to suggestions/changing this later.
|
I think CalVer is the right choice. Maybe `YYYY.MM.DD.01` (or `YYYY.MM.DD.001`) where `01` (or `001`) is today's deployment counter.
I don't know if there is any library to manage the version that supports this nor how difficult it is to implement it.
The last time I checked [bumpver](https://pypi.org/project/bumpver/), checked the feature boxed fairly well.
`YYYY.MM.INC1` is kind of appealing because it's PEP440 compatible (even though this grid is wrong https://github.com/mbarkhau/bumpver#pattern-examples)
You can talk me into going with day, but seeing we deployed 35 times in August with a two "." pattern is appealing too.
`YYYY.MM.INC1` also works well
| 2021-08-08T03:57:41 |
|
djangopackages/djangopackages
| 708 |
djangopackages__djangopackages-708
|
[
"643"
] |
cefbb1acc2e90ba9fd800cf2251eb7bb3be683cf
|
diff --git a/urls.py b/urls.py
--- a/urls.py
+++ b/urls.py
@@ -45,7 +45,8 @@
path('open/', OpenView.as_view(), name="open"),
path('syndication/', TemplateView.as_view(template_name='pages/syndication.html'), name="syndication"),
path('help/', TemplateView.as_view(template_name='pages/help.html'), name="help"),
- re_path(r"^sitemap\.xml$", SitemapView.as_view(), name="sitemap"),
+ path("funding/", TemplateView.as_view(template_name='pages/funding.html'), name="funding"),
+ path("sitemap.xml", SitemapView.as_view(), name="sitemap"),
# new apps
path('search/', include("searchv2.urls")),
|
📄 Create Funding page to outline where funding goes and options
I think having a static page linked from the footer would make a lot of sense to provide links for funding past and present maintainers.
| 2021-09-14T04:36:00 |
||
djangopackages/djangopackages
| 724 |
djangopackages__djangopackages-724
|
[
"723"
] |
7eb267db616fc9cbed979c64c80eb798f8b3300f
|
diff --git a/package/models.py b/package/models.py
--- a/package/models.py
+++ b/package/models.py
@@ -185,7 +185,7 @@ def fetch_pypi_data(self, *args, **kwargs):
print((self, response.status_code))
if response.status_code == 404:
if settings.DEBUG:
- print("BOOM!")
+ print("BOOM! this package probably does not exist on pypi")
print((self, response.status_code))
return False
release = json.loads(response.content)
@@ -205,18 +205,19 @@ def fetch_pypi_data(self, *args, **kwargs):
self.supports_python3 = True
# add to versions
- licenses = list(info['license'])
- for index, license in enumerate(licenses):
- if license or "UNKNOWN" == license.upper():
- for classifier in info['classifiers']:
- if classifier.startswith("License"):
- licenses[index] = classifier.strip().replace('License ::', '')
- licenses[index] = licenses[index].replace('OSI Approved :: ', '')
- break
-
- version.licenses = licenses
-
- #version stuff
+ if 'license' in info and info['license']:
+ licenses = list(info['license'])
+ for index, license in enumerate(licenses):
+ if license or "UNKNOWN" == license.upper():
+ for classifier in info['classifiers']:
+ if classifier.startswith("License"):
+ licenses[index] = classifier.strip().replace('License ::', '')
+ licenses[index] = licenses[index].replace('OSI Approved :: ', '')
+ break
+
+ version.licenses = licenses
+
+ # version stuff
try:
url_data = release['urls'][0]
version.downloads = url_data['downloads']
|
🐛 Issue in pypi_updater
Tonight, while testing I started seeing:
```
Traceback (most recent call last):
File "manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.8/site-packages/djclick/adapter.py", line 68, in run_from_argv
exit_code = self.main(
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1062, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.8/site-packages/djclick/adapter.py", line 50, in invoke
return super(DjangoCommandMixin, self).invoke(ctx)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 763, in invoke
return __callback(*args, **kwargs)
File "/workspace/package/management/commands/pypi_updater.py", line 18, in command
updated = package.fetch_pypi_data()
File "/workspace/package/models.py", line 208, in fetch_pypi_data
licenses = list(info['license'])
TypeError: 'NoneType' object is not iterable
```
|
@ShreehariVaasishta I'll take a quick look, but this came up tonight after merging and testing.
| 2021-09-30T04:36:02 |
|
djangopackages/djangopackages
| 725 |
djangopackages__djangopackages-725
|
[
"721"
] |
47e6baacc134f2516453bf9cf279b120378509f9
|
diff --git a/package/models.py b/package/models.py
--- a/package/models.py
+++ b/package/models.py
@@ -97,7 +97,14 @@ def pypi_name(self):
if not self.pypi_url.strip():
return ""
- name = self.pypi_url.replace("http://pypi.python.org/pypi/", "")
+ name = self.pypi_url
+
+ if "http://pypi.python.org/pypi/" in name:
+ name = name.replace("http://pypi.python.org/pypi/", "")
+
+ if "https://pypi.python.org/pypi/" in name:
+ name = name.replace("https://pypi.python.org/pypi/", "")
+
if "/" in name:
return name[:name.index("/")]
return name
|
Incorrect PyPI URLs on some packages
**Describe the bug**
Some packages have a PyPI URL in the form of `http://pypi.python.org/pypi/https://pypi.org/project/...` for example:
https://djangopackages.org/packages/p/wagtail-grapple/
https://djangopackages.org/packages/p/wagtail-2fa/
**Additional context**
We use the API to pull in Wagtail packages and display on https://wagtail.io/packages/
Originally reported on https://github.com/wagtail/wagtail.io/issues/118
| 2021-09-30T04:58:47 |
||
djangopackages/djangopackages
| 738 |
djangopackages__djangopackages-738
|
[
"721"
] |
5651ea95152bb0f880dd5cc995bab1137a5a6a0f
|
diff --git a/package/management/commands/pypi_find_missing.py b/package/management/commands/pypi_find_missing.py
new file mode 100644
--- /dev/null
+++ b/package/management/commands/pypi_find_missing.py
@@ -0,0 +1,24 @@
+import djclick as click
+import logging
+
+from django.db.models import Q
+
+from package.models import Package
+
+logger = logging.getLogger(__name__)
+
+
[email protected]()
+def command():
+ total_empty_packages = Package.objects.filter(
+ ~Q(pypi_url__startswith="http")
+ ).count()
+ total_packages = Package.objects.all().count()
+ packages = Package.objects.filter(~Q(pypi_url__startswith="http") & ~Q(pypi_url=""))
+
+ click.echo(f"total_packages: {total_packages}")
+ click.echo(f"total_empty_packages: {total_empty_packages}")
+ click.echo(f"packages needing fixed: {packages.count()}")
+
+ for package in packages:
+ click.echo(f"- {package.pypi_url}")
|
Incorrect PyPI URLs on some packages
**Describe the bug**
Some packages have a PyPI URL in the form of `http://pypi.python.org/pypi/https://pypi.org/project/...` for example:
https://djangopackages.org/packages/p/wagtail-grapple/
https://djangopackages.org/packages/p/wagtail-2fa/
**Additional context**
We use the API to pull in Wagtail packages and display on https://wagtail.io/packages/
Originally reported on https://github.com/wagtail/wagtail.io/issues/118
|
@zerolab thank you for the report. This should be patched now both in #725 and in 06bf99a59ed325ab2dcf39fd39115b21949a92f1
I suspect I'll have to do a cleanup script or something to fix these. I'll double-check tomorrow. We need to refactor more of the pypi logic to add some validation.
It's also pretty neat to see Wagtail is using this data. I have looked at it a few dozen times and I never realized it before :)
I'm also noticing the pypi history isn't being fetched. More for tomorrow.
Ah, thank you so much for the swift fix and response 🌟 ❤️
I'm still seeing issues in a few categories, but I didn't have the headspace to poke around at the database. I will have a fix Friday or this weekend though with a better default for pypi too since I noticed we are hitting a redirect.
| 2021-10-05T03:43:15 |
|
djangopackages/djangopackages
| 751 |
djangopackages__djangopackages-751
|
[
"750"
] |
16684156c49135d6e08992a6cec8de7228a6cee2
|
diff --git a/apiv4/serializers.py b/apiv4/serializers.py
--- a/apiv4/serializers.py
+++ b/apiv4/serializers.py
@@ -17,6 +17,7 @@ class GridSerializer(serializers.ModelSerializer):
class Meta:
fields = [
+ "id",
"title",
"slug",
"description",
@@ -138,6 +139,7 @@ class Meta:
class CategorySerializer(serializers.ModelSerializer):
class Meta:
fields = [
+ "id",
"title",
"slug",
"description",
|
Re-add grid id in the API output
Before the rounds of updates and #639, the API was returning the Grid id.
We are using the id for creating or updating a Grid entry on wagtail.io -- https://github.com/wagtail/wagtail.io/blob/master/wagtailio/packages/views.py#L16, but that is now failing with an integrity error since `id` is None (https://github.com/wagtail/wagtail.io/issues/119)
Happy to submit a PR
| 2021-10-27T13:00:20 |
||
djangopackages/djangopackages
| 752 |
djangopackages__djangopackages-752
|
[
"747"
] |
98f4d652ada5f455eec1355b386ae7dbe39baa84
|
diff --git a/package/models.py b/package/models.py
--- a/package/models.py
+++ b/package/models.py
@@ -274,14 +274,10 @@ def fetch_pypi_data(self, *args, **kwargs):
if license or "UNKNOWN" == license.upper():
for classifier in info["classifiers"]:
if classifier.startswith("License"):
- licenses[index] = classifier.strip().replace(
- "License ::", ""
- )
- licenses[index] = licenses[index].replace(
- "OSI Approved :: ", ""
- )
+ licenses[index] = classifier.split("::")[-1].strip()
break
+ version.license = licenses[0]
version.licenses = licenses
# version stuff
|
Incorrect `license` on package pages
**Describe the bug**
The `license` shown on package pages has started to show as `UNKNOWN` even where a licence was previously available. See [here](https://djangopackages.org/packages/p/django-axes/) and [here](https://djangopackages.org/packages/p/django_coverage_plugin/)
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://djangopackages.org/packages/p/django-axes/
2. Scroll down to Releases
3. See license in table is `UNKNOWN`
**Expected behavior**
Show the correct license
**Additional context**
I am sorry I've not had time to do a deep dive at this stage, but thought it best to log this now before I forget! However I noticed that there were recent commits in this area (e.g. https://github.com/djangopackages/djangopackages/commit/8bad61f4729818a923d903c40e8b49b75f854148 ) so suspect that is probably related.
|
Good catch. I'll write up a test later and work on a fix when I get time.
It looks like we have `licenses = [ MIT License, MIT License, MIT License]` in the new field that we want to move over to, but it looks like there is some leakage there too.
| 2021-10-29T03:08:00 |
|
djangopackages/djangopackages
| 754 |
djangopackages__djangopackages-754
|
[
"747"
] |
2eccb8d91c0285739b08e8a7d8b4bf3b65db37ce
|
diff --git a/package/models.py b/package/models.py
--- a/package/models.py
+++ b/package/models.py
@@ -269,7 +269,7 @@ def fetch_pypi_data(self, *args, **kwargs):
# add to versions
if "license" in info and info["license"]:
- licenses = list(info["license"])
+ licenses = [info["license"]]
for index, license in enumerate(licenses):
if license or "UNKNOWN" == license.upper():
for classifier in info["classifiers"]:
@@ -277,8 +277,8 @@ def fetch_pypi_data(self, *args, **kwargs):
licenses[index] = classifier.split("::")[-1].strip()
break
- version.license = licenses[0]
version.licenses = licenses
+ version.license = licenses[0]
# version stuff
try:
|
Incorrect `license` on package pages
**Describe the bug**
The `license` shown on package pages has started to show as `UNKNOWN` even where a licence was previously available. See [here](https://djangopackages.org/packages/p/django-axes/) and [here](https://djangopackages.org/packages/p/django_coverage_plugin/)
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://djangopackages.org/packages/p/django-axes/
2. Scroll down to Releases
3. See license in table is `UNKNOWN`
**Expected behavior**
Show the correct license
**Additional context**
I am sorry I've not had time to do a deep dive at this stage, but thought it best to log this now before I forget! However I noticed that there were recent commits in this area (e.g. https://github.com/djangopackages/djangopackages/commit/8bad61f4729818a923d903c40e8b49b75f854148 ) so suspect that is probably related.
|
Good catch. I'll write up a test later and work on a fix when I get time.
It looks like we have `licenses = [ MIT License, MIT License, MIT License]` in the new field that we want to move over to, but it looks like there is some leakage there too.
I'm going to leave hit open, but I have a partial fix based on our license refactor. It needs a little more work (hopefully not a night during a week night) to see it through though.
<img width="881" alt="Screen Shot 2021-10-28 at 10 17 08 PM" src="https://user-images.githubusercontent.com/50527/139370034-7820fcc2-c74c-4ae0-b60c-827f567e4f19.png">
| 2021-10-29T04:12:26 |
|
djangopackages/djangopackages
| 770 |
djangopackages__djangopackages-770
|
[
"675"
] |
e20a9dc327a3b9d063dfc9ee3dc98312f3275da3
|
diff --git a/settings/celery.py b/settings/celery.py
new file mode 100644
--- /dev/null
+++ b/settings/celery.py
@@ -0,0 +1,23 @@
+import os
+
+from celery import Celery
+
+
+# set the default Django settings module for the 'celery' program.
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.docker")
+
+app = Celery("config")
+
+# Using a string here means the worker doesn't have to serialize
+# the configuration object to child processes.
+# - namespace='CELERY' means all celery-related configuration keys
+# should have a `CELERY_` prefix.
+app.config_from_object("django.conf:settings", namespace="CELERY")
+
+# Load task modules from all registered Django app configs.
+app.autodiscover_tasks()
+
+
[email protected](bind=True)
+def debug_task(self):
+ print(f"Request: {self.request!r}")
diff --git a/settings/docker.py b/settings/docker.py
--- a/settings/docker.py
+++ b/settings/docker.py
@@ -193,3 +193,14 @@
PACKAGE_HEALTHCHECK_URL = env.str("PACKAGE_HEALTHCHECK_URL", "")
PYPI_HEALTHCHECK_URL = env.str("PYPI_HEALTHCHECK_URL", "")
SEARCHV2_HEALTHCHECK_URL = env.str("SEARCHV2_HEALTHCHECK_URL", "")
+
+# Configure Redis
+REDIS_HOST = env("REDIS_HOST", default="redis")
+
+# Configure Celery
+CELERY_BROKER_URL = f"redis://{REDIS_HOST}:6379"
+CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:6379"
+CELERY_ACCEPT_CONTENT = ["application/json"]
+CELERY_TASK_SERIALIZER = "json"
+CELERY_RESULT_SERIALIZER = "json"
+CELERY_TIMEZONE = "UTC"
|
🚜 Migrate to a task queue (Add Celery)
I noticed that the package_updater was running three copies and seemed to be stuck. I noticed that there was a 5-second sleep between every package that is in our database.
This works as a way to workaround rate limit issues, but we can move to a task queue and avoid blocking via sleep() completely. We should get either 5k (newer APIs might even go as large as 15k an hour) and be able to manage this both from the front and backend.
I'd like to see the /post-data/ endpoint move the fetching logic to a queued task to cut down on IO too.
|
Hi @jefftriplett! I want to work on this ticket :)
I'm thinking that we can use the same celery docker config as defined by Cookiecutter Django. What do you think?
We are currently running `package_updater` via cron schedule.
```bash
crontab -l
12 23 * * * /usr/local/bin/docker-compose -f /code/djangopackages/docker-compose.yml run --rm django-a python manage.py searchv2_build
36 17 * * * /usr/local/bin/docker-compose -f /code/djangopackages/docker-compose.yml run --rm django-a python manage.py package_updater
48 21 * * * /usr/local/bin/docker-compose -f /code/djangopackages/docker-compose.yml run --rm django-a python manage.py pypi_updater
```
@luzfcb thanks for the cron details. How do you feel about offloading some of the heavy loadings to Celery or some other task queue? I think it could clean a few things up and we wouldn't have as many blocking processes running either on the web side or tasks that pile up on the backend.
> How do you feel about offloading some of the heavy loadings to Celery or some other task queue? I think it could clean a few things up and we wouldn't have as many blocking processes running either on the web side or tasks that pile up on the backend.
@jefftriplett No problem for me. I think it's the right thing to do.
> I'm thinking that we can use the same celery docker config as defined by Cookiecutter Django. What do you think?
@nicysneiros this seems like a good starting point IMO. Let us know if you have any question and thanks 🙏
| 2022-01-06T20:50:08 |
|
djangopackages/djangopackages
| 773 |
djangopackages__djangopackages-773
|
[
"772"
] |
6598a6b024f5b971d5549efb95e630d7ea8cd2e7
|
diff --git a/package/management/commands/cleanup_github_projects.py b/package/management/commands/cleanup_github_projects.py
new file mode 100644
--- /dev/null
+++ b/package/management/commands/cleanup_github_projects.py
@@ -0,0 +1,55 @@
+import djclick as click
+import requests
+
+from package.models import Package
+
+
[email protected]()
[email protected]("--limit", default=None)
+def command(limit):
+ # fix non-https links
+ packages = Package.objects.filter(repo_url__startswith="http://github.com")
+ if packages.exists():
+ click.secho(
+ f"Found {packages.count()} GitHub Packages that need to be migrated",
+ fg="yellow",
+ )
+ for package in packages:
+ click.echo(f" migrating {package.repo_url}")
+ package.repo_url = package.repo_url.replace(
+ "http://github.com", "https://github.com"
+ )
+ try:
+ package.save()
+ except Exception as e:
+ click.secho(f"{e}", fg="red")
+ # TODO: write migration code...
+
+ packages = Package.objects.filter(repo_url__startswith="https://github.com")
+
+ if limit:
+ packages = packages[0:limit]
+
+ if packages.exists():
+ click.secho(
+ f"Found {packages.count()} GitHub Packages to be scanned",
+ fg="yellow",
+ )
+ for package in packages:
+ response = requests.get(package.repo_url)
+ history = response.history
+ if len(history):
+ new_packages = Package.objects.filter(repo_url__startswith=response.url)
+ if new_packages.exists():
+ found_pks = new_packages.values_list("pk", flat=True)
+ click.echo(f" found {found_pks}")
+
+ else:
+ click.echo(f" migrating {package.repo_url} => {response.url}")
+ package.repo_url = response.url
+ # TODO: Make a note about migrating the account...
+ package.save()
+
+ # package.date_deprecated
+ # package.deprecated_by
+ # package.deprecates_package
|
Clean up migrated projects
I did some manual cleanup Friday night (mostly jazzband projects) to get a better understanding of how to fix GitHub projects which have been renamed or migrated.
Some of this is needed for #761 and to start addressing failed project sin our logs.
| 2022-01-22T18:06:35 |
||
djangopackages/djangopackages
| 787 |
djangopackages__djangopackages-787
|
[
"786"
] |
0f680eaae7c59014d7b29ccf10dc74bcae6e1b96
|
diff --git a/package/models.py b/package/models.py
--- a/package/models.py
+++ b/package/models.py
@@ -267,8 +267,8 @@ def fetch_pypi_data(self, *args, **kwargs):
):
self.supports_python3 = True
- # add to versions
- if "license" in info and info["license"]:
+ # do we have a license set?
+ if "license" in info and len(info["license"]):
licenses = [info["license"]]
for index, license in enumerate(licenses):
if license or "UNKNOWN" == license.upper():
@@ -280,6 +280,16 @@ def fetch_pypi_data(self, *args, **kwargs):
version.licenses = licenses
version.license = licenses[0]
+ # do we have a license set in our classifier?
+ elif "classifiers" in info and len(info["classifiers"]):
+ licenses = []
+ for classifier in info["classifiers"]:
+ if classifier.startswith("License"):
+ licenses.append(classifier.split("::")[-1].strip())
+
+ version.licenses = licenses
+ version.license = licenses[0]
+
# version stuff
try:
url_data = release["urls"][0]
|
🐛 License shows "UNKNOWN"
The license field shows "UNKNOWN" if the pypi "license" field is blank, even if contains a valid License in the classifiers fields.
| 2022-02-19T01:09:10 |
||
djangopackages/djangopackages
| 793 |
djangopackages__djangopackages-793
|
[
"462"
] |
8872319c999ffc1d193f6d523a571c3836c1332a
|
diff --git a/package/repos/bitbucket.py b/package/repos/bitbucket.py
--- a/package/repos/bitbucket.py
+++ b/package/repos/bitbucket.py
@@ -8,7 +8,7 @@
import requests
-API_TARGET = "https://api.bitbucket.org/1.0/repositories"
+API_TARGET = "https://api.bitbucket.org/2.0/repositories"
descendants_re = re.compile(r"Forks/Queues \((?P<descendants>\d+)\)", re.IGNORECASE)
@@ -24,7 +24,8 @@ def _get_bitbucket_commits(self, package):
repo_name = package.repo_name()
if repo_name.endswith("/"):
repo_name = repo_name[0:-1]
- target = f"{API_TARGET}/{repo_name}/changesets/?limit=50"
+ # not sure if the limit parameter does anything in api 2.0
+ target = f"{API_TARGET}/{repo_name}/commits/?limit=50"
try:
data = self.get_json(target)
except requests.exceptions.HTTPError:
@@ -32,7 +33,7 @@ def _get_bitbucket_commits(self, package):
if data is None:
return [] # todo: log this?
- return data.get("changesets", [])
+ return data.get("values", [])
def fetch_commits(self, package):
from package.models import (
@@ -40,11 +41,11 @@ def fetch_commits(self, package):
) # Import placed here to avoid circular dependencies
for commit in self._get_bitbucket_commits(package):
- timestamp = commit["timestamp"].split("+")
+ timestamp = commit["date"].split("+")
if len(timestamp) > 1:
timestamp = timestamp[0]
else:
- timestamp = commit["timestamp"]
+ timestamp = commit["date"]
commit, created = Commit.objects.get_or_create(
package=package, commit_date=timestamp
)
@@ -93,15 +94,15 @@ def fetch_metadata(self, package):
data = self.get_json(url)
except requests.exceptions.HTTPError:
return package
- package.repo_forks = len(data["forks"])
+ package.repo_forks = len(data["values"])
# get the followers of a repo
- url = f"{target}followers/"
+ url = f"{target}watchers/"
try:
data = self.get_json(url)
except requests.exceptions.HTTPError:
return package
- package.repo_watchers = data["count"]
+ package.repo_watchers = len(data.get("values", []))
# Getting participants
try:
|
diff --git a/package/tests/get_bitbucket_repos.py b/package/tests/get_bitbucket_repos.py
new file mode 100644
--- /dev/null
+++ b/package/tests/get_bitbucket_repos.py
@@ -0,0 +1,85 @@
+"""
+A script to find Bitbucket repos that (a) still exist and (b) have forks.
+
+Usage: python get_bitbucket_repos.py
+
+Outputs a list of repos and the number of forks each has.
+
+When testing repo handlers, the tests call the Bitbucket repo handler to
+fetch repo metadata. However, many Bitbucket repos are no longer active,
+have disappeared, or have no forks. This script was created to find a good
+repo to test against, and, may be needed in the future if that particular
+repo goes away. It may take a few minutes to run, due to only being able
+to hit the APIs so fast.
+"""
+
+import requests, json, re, time
+
+DJPACK_API_URL = "https://djangopackages.org/api/v3/packages/"
+DJPACK_API_URL_BASE = "https://djangopackages.org"
+
+
+def bitbucket_urls():
+ next_url = DJPACK_API_URL
+ while next_url:
+ response = requests.get(next_url)
+ parsed = json.loads(response.content)
+ next_path = parsed["meta"]["next"]
+ next_url = f"{DJPACK_API_URL_BASE}{next_path}" if next_path else None
+ for repo in parsed["objects"]:
+ if "bitbucket.org" in repo["repo_url"]:
+ yield repo["repo_url"]
+ time.sleep(.1)
+
+
+def non404urls(urls):
+ for url in urls:
+ url = url.strip()
+ response = requests.get(url)
+ # if response.status_code == 200:
+ # print(url)
+ if response.status_code != 404:
+ yield response.status_code, url
+ time.sleep(1)
+ if response.status_code == 429: # too many requests:
+ time.sleep(10)
+
+
+def bitbucket_repos_with_forks(urls, include_unforked=False):
+ for url in urls:
+ urlparts = url.split("/")
+ if len(urlparts) < 5:
+ continue
+ _, _, _, user, repo, *_ = urlparts
+ api_url = f"https://api.bitbucket.org/2.0/repositories/{user}/{repo}/forks/"
+ response = requests.get(api_url)
+ if response.status_code != 200:
+ continue
+ parsed = json.loads(response.content)
+ num_forks = len(parsed["values"])
+ if num_forks or include_unforked:
+ yield num_forks, url
+
+ time.sleep(1)
+ if response.status_code == 429: # too many requests:
+ time.sleep(10)
+
+
+def main():
+ print("Getting bitbucket repos from Django Packages API...")
+ urls = list(bitbucket_urls())
+ print(f"Found {len(urls)}.")
+
+ # We might not actually need do do this before calling the BB API
+ print("Checking for non-404'd repos...")
+ urls = [url for status, url in non404urls(urls) if status == 200]
+ print(f"Found {len(urls)}.")
+
+ print("Searching Bitbucket Cloud API for repos with forks...")
+ results = list(bitbucket_repos_with_forks(urls))
+ print(f"Found {len(results)}. Showing repos and number of forks:")
+ for num_forks, url in results:
+ print(num_forks, url)
+
+if __name__ == '__main__':
+ main()
diff --git a/package/tests/test_repos.py b/package/tests/test_repos.py
--- a/package/tests/test_repos.py
+++ b/package/tests/test_repos.py
@@ -1,21 +1,26 @@
import pytest
+from django.test import TestCase
+
from package.repos import get_repo, get_repo_for_repo_url, supported_repos
from package.repos.base_handler import BaseHandler
from package.repos.unsupported import UnsupportedHandler
+from package.repos.bitbucket import BitbucketHandler
+from package.repos.github import GitHubHandler
+from package.models import Package, Category, Commit
-# class TestBaseHandler(TestCase):
-# def setUp(self):
-# super().setUp()
-# self.category = Category.objects.create(title="dummy", slug="dummy")
-# self.category.save()
-# self.package = Package.objects.create(
-# title="Django Piston",
-# slug="django-piston",
-# repo_url="https://bitbucket.org/jespern/django-piston",
-# category=self.category,
-# )
+class TestBaseHandler(TestCase):
+ def setUp(self):
+ super().setUp()
+ self.category = Category.objects.create(title="dummy", slug="dummy")
+ self.category.save()
+ self.package = Package.objects.create(
+ title="Django Piston",
+ slug="django-piston",
+ repo_url="https://bitbucket.org/jespern/django-piston",
+ category=self.category,
+ )
def test_base_handler_not_implemented(package):
@@ -179,70 +184,89 @@ def test_get_repo_registry(package):
# TODO: Convert all of these to pytest tests and re-write them since
# they were already commented out.
-"""
class TestBitbucketRepo(TestBaseHandler):
def setUp(self):
super(TestBitbucketRepo, self).setUp()
self.package = Package.objects.create(
- title="django",
- slug="django",
- repo_url="https://bitbucket.org/django/django",
- category=self.category
+ category=self.category,
+ title="django-mssql",
+ slug="django-mssql",
+ repo_url="https://bitbucket.org/Manfre/django-mssql/"
)
+ self.bitbucket_handler = BitbucketHandler()
def test_fetch_commits(self):
self.assertEqual(Commit.objects.count(), 0)
- bitbucket_handler.fetch_commits(self.package)
+ self.bitbucket_handler.fetch_commits(self.package)
self.assertNotEqual(Commit.objects.count(), 0)
def test_fetch_metadata(self):
- package = bitbucket_handler.fetch_metadata(self.package)
+ package = self.bitbucket_handler.fetch_metadata(self.package)
self.assertTrue(
- package.repo_description.startswith("Official clone of the Subversion repo")
+ package.repo_description.startswith("Microsoft SQL server backend for Django running on windows")
)
self.assertTrue(package.repo_watchers > 0)
self.assertTrue(package.repo_forks > 0)
- self.assertEquals(package.participants, "django")
-"""
-
-
-# class TestGithubRepo(TestBaseHandler):
-# def setUp(self):
-# super().setUp()
-# self.package = Package.objects.create(
-# title="Django",
-# slug="django",
-# repo_url="https://github.com/django/django",
-# category=self.category,
-# )
-
-# # def test_fetch_commits(self):
-# # import time
-# # time.sleep(10)
-# # self.assertEqual(Commit.objects.count(), 0)
-# # github_handler.fetch_commits(self.package)
-# # self.assertTrue(Commit.objects.count() > 0)
-
-# # def test_fetch_metadata(self):
-# # # Currently a live tests that access github
-# # package = github_handler.fetch_metadata(self.package)
-# # self.assertEqual(package.repo_description, "The Web framework for perfectionists with deadlines.")
-# # self.assertTrue(package.repo_watchers > 100)
-
-# # # test what happens when setting up an unsupported repo
-# # self.package.repo_url = "https://example.com"
-# # self.package.fetch_metadata()
-# # self.assertEqual(self.package.repo_description, "")
-# # self.assertEqual(self.package.repo_watchers, 0)
-# # self.package.fetch_commits()
-
-
-# class TestGitlabRepo(TestBaseHandler):
-# def setUp(self):
-# super().setUp()
-# self.package = Package.objects.create(
-# title="Django",
-# slug="django",
-# repo_url="https://gitlab.com/delta10/kees",
-# category=self.category,
-# )
+ self.assertEquals(package.participants, "Manfre")
+
+
+class TestGithubRepo(TestBaseHandler):
+ def setUp(self):
+ super().setUp()
+ self.package = Package.objects.create(
+ title="Django",
+ slug="django",
+ repo_url="https://github.com/django/django",
+ category=self.category,
+ )
+ self.github_handler = GitHubHandler()
+
+ self.invalid_package = Package.objects.create(
+ title="Invalid Package",
+ slug="invldpkg",
+ repo_url="https://example.com",
+ category=self.category,
+ )
+
+ def test_fetch_commits(self):
+ self.assertEqual(Commit.objects.count(), 0)
+ self.github_handler.fetch_commits(self.package)
+ self.assertTrue(Commit.objects.count() > 0)
+
+ def test_fetch_metadata(self):
+ # Currently a live tests that access github
+ package = self.github_handler.fetch_metadata(self.package)
+ self.assertEqual(package.repo_description, "The Web framework for perfectionists with deadlines.")
+ self.assertTrue(package.repo_watchers > 100)
+
+ def test_fetch_metadata_unsupported_repo(self):
+ # test what happens when setting up an unsupported repo
+ self.package.repo_url = "https://example.com"
+ package = self.github_handler.fetch_metadata(self.invalid_package)
+
+ self.assertEqual(package.repo_description, "")
+ self.assertEqual(package.repo_watchers, 0)
+ self.invalid_package.fetch_commits()
+ self.assertEqual(package.commit_set.count(), 0)
+
+
+class TestGitlabRepo(TestBaseHandler):
+ def setUp(self):
+ super().setUp()
+ self.package = Package.objects.create(
+ title="Django",
+ slug="django",
+ repo_url="https://gitlab.com/delta10/kees",
+ category=self.category,
+ )
+
+
+class TestRepos(TestBaseHandler):
+ def test_repo_registry(self):
+ from package.repos import get_repo, supported_repos
+
+ g = get_repo("github")
+ self.assertEqual(g.title, "GitHub")
+ self.assertEqual(g.url, "https://github.com")
+ self.assertTrue("github" in supported_repos())
+ self.assertRaises(ImportError, lambda: get_repo("xyzzy"))
|
Re-enable bitbucket test
Bitbucket repo tests are currently disabled (https://github.com/pydanny/djangopackages/commit/4ac30c5420404ae4f3aa6cac05cad161e43af0b6).
|
@jayfk Do you recall why it was disabled? Please share if you do :)
| 2022-02-25T20:56:09 |
djangopackages/djangopackages
| 800 |
djangopackages__djangopackages-800
|
[
"703"
] |
ad2930c4d2f541266ee60be0520bf593be6bd7b5
|
diff --git a/package/admin.py b/package/admin.py
--- a/package/admin.py
+++ b/package/admin.py
@@ -41,6 +41,7 @@ class PackageAdmin(VersionAdmin):
"score",
"created_by",
"last_modified_by",
+ "date_repo_archived",
"date_deprecated",
"deprecates_package",
"deprecated_by",
@@ -55,7 +56,6 @@ class PackageAdmin(VersionAdmin):
"repo_description",
"repo_watchers",
"repo_forks",
- "date_repo_archived",
"commit_list",
"pypi_downloads",
"pypi_classifiers",
diff --git a/package/management/commands/package_updater.py b/package/management/commands/package_updater.py
--- a/package/management/commands/package_updater.py
+++ b/package/management/commands/package_updater.py
@@ -41,7 +41,7 @@ def handle(self, *args, **options):
try:
try:
- package.fetch_metadata(fetch_pypi=False)
+ package.fetch_metadata(fetch_pypi=False, fetch_repo=True)
package.fetch_commits()
except Exception as e:
logger.error(
diff --git a/package/repos/github.py b/package/repos/github.py
--- a/package/repos/github.py
+++ b/package/repos/github.py
@@ -43,14 +43,12 @@ def fetch_metadata(self, package):
if repo is None:
return package
- # import pytest
- # pytest.set_trace()
-
# package.repo_watchers = repo.watchers
package.repo_watchers = repo.watchers_count
if repo.archived:
- package.date_repo_archived = timezone.now()
+ if not package.date_repo_archived:
+ package.date_repo_archived = timezone.now()
# package.repo_forks = repo.forks
package.repo_forks = repo.forks_count
|
🔍 Detect Archived GitHub Projects
Semi-related to #681
When we use the GitHub API to check the stats on projects, we should look for the archived bit so that we can flag packages. This could be an archived_date field, or maybe there's a good way to flag these packages for review with a note?
See https://github.com/berinhard/model_mommy for an example.
|
Hi @jefftriplett ! I intend to work on this issue and will be submitting a PR soon :+1:
Sounds great. 👍
@jefftriplett and @cacoze it looks like this was fixed with PR #761. Is that correct?
@ryancheley The feature was added, but I noticed a few packages like https://djangopackages.org/packages/p/dj-paginator/ are not being flagged. I'm going to poke around and see if we missed something obvious.
| 2022-03-05T05:08:51 |
|
djangopackages/djangopackages
| 820 |
djangopackages__djangopackages-820
|
[
"749"
] |
5decbeb84769864f4c78305597acfd9c3a30b129
|
diff --git a/settings/base.py b/settings/base.py
--- a/settings/base.py
+++ b/settings/base.py
@@ -80,7 +80,6 @@
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
- "dj_pagination.middleware.PaginationMiddleware",
"waffle.middleware.WaffleMiddleware",
"django_structlog.middlewares.RequestMiddleware",
"maintenance_mode.middleware.MaintenanceModeMiddleware",
@@ -155,7 +154,6 @@
# external
"maintenance_mode",
"crispy_forms",
- "dj_pagination",
"django_better_admin_arrayfield",
"django_extensions",
"django_tables2",
|
Modernise tables / dj-pagination is archived
I was looking at some of the other dependencies of the project and noticed that dj-pagination had been [archived](https://github.com/pydanny/dj-pagination).
I'm assuming the project would therefore like to try and remove reliance on this package?
The package is used in a couple of [templates](https://github.com/djangopackages/djangopackages/blob/506184070e37bdcfad5658d8a1579f03db2593aa/templates/package/category.html#L54). I have had a look at what it would take to move to Django's own `Paginator` and this looks simple enough. However it is used in conjunction with `webstack-django-sorting` (wds) which enables you to click on the table headers to sort the data.
Unfortunately the package [doesn't work](https://github.com/webstack/webstack-django-sorting/issues/12) with Django's Paginator. So the complexity here is much higher than "just" a Paginator.
So I guess there are few options:
- Leave things alone until there is an incompatibility with Django (I've had a PR merged to wds to add Django 4.0 support)
- Use Django Paginator, drop the sort feature
- use Django Paginator, roll our own sort feature
- Use another package, `django-tables2` seems to do what we need, it looks like it covers the features of both of the above packages and seems to be well maintained (release on PyPI already declares django 4.0 and py 3.10 support)
https://github.com/jieter/django-tables2
What are your thoughts, there are certainly things I am missing, but to me it feels like Django-tables2 is the front runner here. Not sure what a patch to include it would actually look like though but feels like it is worth investigating.
On mobile apologies for poor spelling/ grammar etc
|
Great catch. I'd either lean towards Django's built-in or `django-tables2` if it gives some compelling features.
My main goal this quarter is a pretty big grid refactor and to address some performance issues.
Not everything is covered yet, but I made a dent into it with #815
| 2022-03-25T20:43:57 |
|
djangopackages/djangopackages
| 851 |
djangopackages__djangopackages-851
|
[
"850"
] |
9951ae1f3ceff06e37af1e2c413dfc1e5552317d
|
diff --git a/grid/views.py b/grid/views.py
--- a/grid/views.py
+++ b/grid/views.py
@@ -304,7 +304,7 @@ def grid_detail(request, slug, template_name="grid/grid_detail.html"):
}
grid_packages = grid.grid_packages.select_related("package").filter(
- package__score__gt=max(0, settings.PACKAGE_SCORE_MIN)
+ package__score__gte=max(0, settings.PACKAGE_SCORE_MIN)
)
if filters.get("python3"):
|
diff --git a/package/tests/test_repos.py b/package/tests/test_repos.py
--- a/package/tests/test_repos.py
+++ b/package/tests/test_repos.py
@@ -151,7 +151,8 @@ def test_base_handler_get_repo_for_repo_url():
sebpiq/spiteat/
schinckel/django-timedelta-field/
http://projects.unbit.it/hg/uwsgi
-http://www.dataportal.it"""
+http://www.dataportal.it
+https://hg.code.netlandish.com/~petersanchez/django-impersonate"""
for sample in samples.split("\n"):
assert isinstance(get_repo_for_repo_url(sample), UnsupportedHandler)
|
Packages with custom git repos are not being scored
See this tweet: https://twitter.com/morenoh149/status/1580971411145125888
Package scoring should factor in packages that exist on PyPI, but might have a custom repo location. They appear to be scored as a 0 and won't show up in Grids.
|
I took a quick peek at this. It seems like the `Package.calculate_score()` method returns `0` if the `Package.repo_watchers` field is set to `0`.
https://github.com/djangopackages/djangopackages/blob/9951ae1f3ceff06e37af1e2c413dfc1e5552317d/package/models.py#L394-L416
And the `grid_detail` view is filtering out the packages that score `<1`. (This makes the package not show up in the grid)
https://github.com/djangopackages/djangopackages/blob/9951ae1f3ceff06e37af1e2c413dfc1e5552317d/grid/views.py#L306-L308
This happens when the `UnsupportedHandler` (Unsupported Git Service) is used.
https://github.com/djangopackages/djangopackages/blob/9951ae1f3ceff06e37af1e2c413dfc1e5552317d/package/repos/unsupported.py#L11
A quick fix would be to either update the `Package.calculate_score()` method to use the value `1` for `repo_watchers` if `Package.repo_watchers` field is set to `0` or set a default for `Package.repo_watchers` in the `UnsupportedHandler`
| 2022-10-21T00:26:09 |
djangopackages/djangopackages
| 865 |
djangopackages__djangopackages-865
|
[
"759"
] |
7d82426bfd0d47008be8bcc7dfc244c83b3b326e
|
diff --git a/fabfile.py b/fabfile.py
--- a/fabfile.py
+++ b/fabfile.py
@@ -152,6 +152,10 @@ def maintenance_mode_off(service):
docker_compose(f"exec {service} python manage.py maintenance_mode off")
+def purge_cache(service):
+ docker_compose(f"exec {service} cli4 --delete purge_everything=true /zones/:djangopackages.org/purge_cache")
+
+
def docker_compose(command):
"""
Run a docker-compose command
|
Invalidate Cloudflare cache when a static file changes
**Describe the bug**
On the #719 the [grid css](https://github.com/djangopackages/djangopackages/blob/930c7565ea216e42380694d32f1311b6d64e78b9/static/css/grid.css) was improved, but in production the old version is still served.
I'm not sure if something is wrong with [`collectstatic`](https://github.com/djangopackages/djangopackages/blob/930c7565ea216e42380694d32f1311b6d64e78b9/compose/django/start.sh#L3), with some Caddy HTTP headers or a configuration on Cloudflare
**To Reproduce**
Steps to reproduce the behavior:
1. Open the https://djangopackages.org/static/css/grid.css and check if it contains the [new css ](https://github.com/djangopackages/djangopackages/blob/930c7565ea216e42380694d32f1311b6d64e78b9/static/css/grid.css)
**Expected behavior**
Whenever a new deployment occurs, if a static file has changed, Cloudflare must invalidate the previous version and serve the new version.
**Desktop (please complete the following information):**
- OS: Ubuntu 20.04
- Browser: Google Chrome v95.0.4638.54
|
I found out I don't have access to the Cloudflare account
Should be doable with https://developers.cloudflare.com/cache/how-to/purge-cache
I transferred Cloudflare into my personal account, but I'll poke around to see if there's a team option or if we need to create a new account, etc.
| 2022-10-30T00:28:19 |
|
djangopackages/djangopackages
| 948 |
djangopackages__djangopackages-948
|
[
"903"
] |
f57a2f291370b2dbccb585d89e2aa43f73cda28b
|
diff --git a/settings/base.py b/settings/base.py
--- a/settings/base.py
+++ b/settings/base.py
@@ -256,11 +256,12 @@
GITHUB_APP_ID = env("GITHUB_APP_ID", default="")
GITHUB_TOKEN = env("GITHUB_TOKEN", default="")
-SOCIAL_AUTH_GITHUB_KEY = GITHUB_APP_ID
-SOCIAL_AUTH_GITHUB_SECRET = GITHUB_API_SECRET
-SOCIAL_AUTH_ENABLED_BACKENDS = ("github",)
-SOCIAL_AUTH_COMPLETE_URL_NAME = "socialauth_complete"
+# GitHub OAuth for login settings
SOCIAL_AUTH_ASSOCIATE_URL_NAME = "associate_complete"
+SOCIAL_AUTH_COMPLETE_URL_NAME = "socialauth_complete"
+SOCIAL_AUTH_ENABLED_BACKENDS = ("github",)
+SOCIAL_AUTH_GITHUB_KEY = env("SOCIAL_AUTH_GITHUB_KEY", default=GITHUB_APP_ID)
+SOCIAL_AUTH_GITHUB_SECRET = env("SOCIAL_AUTH_GITHUB_SECRET", default=GITHUB_API_SECRET)
def SOCIAL_AUTH_DEFAULT_USERNAME(u):
|
:closed_lock_with_key: Auth Improvement
**Is your feature request related to a problem? Please describe.**
No.
**Describe the solution you'd like**
A better way to auth in order to use Django packages
**Describe alternatives you've considered**
N/A
**Additional context**

|
@daheats thanks. It's been a while since I logged out/back in. I'll look into what we need to match the names up with the org.
I think we need to transfer ownership of the OAuth App from personal account to the GitHub Organisation.
Ref: https://docs.github.com/en/apps/oauth-apps/maintaining-oauth-apps/transferring-ownership-of-an-oauth-app
| 2023-03-10T20:36:37 |
|
djangopackages/djangopackages
| 959 |
djangopackages__djangopackages-959
|
[
"958"
] |
85c947e6649543bb4c9bbc431152292ba4a30d52
|
diff --git a/package/management/commands/package_updater.py b/package/management/commands/package_updater.py
--- a/package/management/commands/package_updater.py
+++ b/package/management/commands/package_updater.py
@@ -24,7 +24,7 @@ def __init__(self, error, title):
@click.command()
@click.option("--limit", default=None, type=int)
-def command(all, limit):
+def command(limit):
"""Updates all the GitHub Packages in the database."""
github = github_login(token=settings.GITHUB_TOKEN)
|
diff --git a/package/tests/test_package_updater.py b/package/tests/test_package_updater.py
new file mode 100644
--- /dev/null
+++ b/package/tests/test_package_updater.py
@@ -0,0 +1,9 @@
+import pytest
+
+from click import exceptions
+from django.core.management import call_command
+
+
+def test_package_updater_command(db):
+ with pytest.raises(exceptions.Exit):
+ call_command("package_updater", "--help")
|
🐛 package_updater is missing the `all` argument
**Describe the bug**
The `package_updater` management command is missing the `all` argument. This means we should at least be testing that we can invoke `--help` on this command too.
**To Reproduce**
```
root@web2:~# /usr/bin/docker compose -f /code/djangopackages/docker-compose.prod.yml run --rm django-a python manage.py package_updater
[+] Running 1/0
⠿ Container djangopackages-redis-1 Running 0.0s
Postgres is up - continuing...
Traceback (most recent call last):
File "/app/manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 440, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.9/site-packages/djclick/adapter.py", line 68, in run_from_argv
exit_code = self.main(
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.9/site-packages/djclick/adapter.py", line 50, in invoke
return super(DjangoCommandMixin, self).invoke(ctx)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
TypeError: command() missing 1 required positional argument: 'all'
```
| 2023-03-27T17:17:45 |
|
djangopackages/djangopackages
| 967 |
djangopackages__djangopackages-967
|
[
"960"
] |
b3bfcd7ef0c998f001b8caa623912feaba3ed9c2
|
diff --git a/fabfile.py b/fabfile.py
--- a/fabfile.py
+++ b/fabfile.py
@@ -20,6 +20,7 @@
from fabric.colors import blue
from fabric.operations import local as lrun
from fabric.operations import put, run
+from rich import print
def local():
@@ -27,6 +28,7 @@ def local():
Work on the local environment
"""
env.compose_file = "docker-compose.yml"
+ env.compose_version = "v1"
env.project_dir = "."
env.run = lrun
env.cd = lcd
@@ -37,23 +39,35 @@ def production():
Work on the production environment
"""
env.hosts = [
- "159.203.191.135"
+ "165.22.184.193"
] # list the ip addresses or domain names of your production boxes here
- env.port = 56565 # ssh port
env.user = "root" # remote user, see `env.run` if you don't log in as root
env.compose_file = "docker-compose.prod.yml"
+ env.compose_version = "v2"
env.project_dir = "/code/djangopackages" # this is the project dir where your code lives on this machine
-
- # if you don't use key authentication, add your password here
- # env.password = "foobar"
- # if your machine has no bash installed, fall back to sh
- # env.shell = "/bin/sh -c"
-
env.run = run # if you don't log in as root, replace with 'env.run = sudo'
env.cd = cd
+def setup():
+ env.run("apt update")
+ env.run(
+ "apt install apt-transport-https ca-certificates curl software-properties-common"
+ )
+ env.run(
+ "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg"
+ )
+ env.run(
+ 'echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null'
+ )
+ env.run("apt update")
+ env.run("apt-cache policy docker-ce")
+ env.run("apt install docker-ce")
+ env.run("systemctl status docker")
+ env.run("systemctl enable docker.service")
+
+
def copy_secrets():
"""
Copies secrets from local to remote.
@@ -94,6 +108,22 @@ def backup():
env.run("gzip /data/djangopackages/backups/*.sql")
+def build_and_restart(service):
+ docker_compose(f"build {service}")
+ docker_compose(f"create {service}")
+ docker_compose(f"stop {service}")
+ docker_compose(f"start {service}")
+
+
+def clearsessions():
+ """
+ Clear old database sessions
+ """
+
+ with env.cd(env.project_dir):
+ docker_compose("run django-a python manage.py clearsessions")
+
+
def cron():
with env.cd(env.project_dir):
docker_compose("run django-a python manage.py import_classifiers")
@@ -101,23 +131,32 @@ def cron():
docker_compose("run django-a python manage.py import_releases")
-def deploy():
+def deploy(clearsessions: bool = False, stash: bool = False):
"""
Pulls the latest changes from main, rebuilt and restarts the stack
"""
- # lrun("git push origin main")
# copy_secrets()
+
with env.cd(env.project_dir):
- # Manage Backups
- # docker_compose("run django-a python manage.py clearsessions")
+ # Clear old database sessions
+ if clearsessions:
+ docker_compose("run django-a python manage.py clearsessions")
# docker_compose("run postgres backup")
# env.run("gzip /data/djangopackages/backups/*.sql")
+ # stash existing changes
+ if stash:
+ env.run("git stash")
+
# Pull the latest code
env.run("git pull origin main")
+ # stash existing changes
+ if stash:
+ env.run("git stash pop")
+
# turn maintenance mode on
# maintenance_mode_on("django-a")
@@ -140,13 +179,6 @@ def deploy():
# maintenance_mode_off("django-a")
-def build_and_restart(service):
- docker_compose(f"build {service}")
- docker_compose(f"create {service}")
- docker_compose(f"stop {service}")
- docker_compose(f"start {service}")
-
-
def collectstatic(service):
docker_compose(f"exec {service} python manage.py collectstatic --no-input -v 1")
@@ -165,10 +197,12 @@ def purge_cache(service):
)
-def docker_compose(command):
+def docker_compose(command, old=True):
"""
Run a docker-compose command
:param command: Command you want to run
"""
with env.cd(env.project_dir):
+ if env.compose_version == "v2":
+ return env.run(f"docker compose -f {env.compose_file} {command}")
return env.run(f"docker-compose -f {env.compose_file} {command}")
diff --git a/homepage/views.py b/homepage/views.py
--- a/homepage/views.py
+++ b/homepage/views.py
@@ -1,4 +1,3 @@
-
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db.models import Count, Q
|
:tractor: Test/migrate to web2.djangopackages.org server
This is a follow-up from #876
Over the past week, our primary server ran out of disk space which caused Redis to die and took everything down. Reported in #956
I created a new server (same resources but 2x the disk space) but need to do some load testing on it and wrap up the server bootstrap code into Fabric.
|
📆 I might move the floating IP for this on **2023-03-29** for a few hours and see how things look.
Currently testing, and everything looks solid so far. https://mastodon.social/@webology/110107688705044133
| 2023-03-31T20:16:18 |
|
djangopackages/djangopackages
| 1,027 |
djangopackages__djangopackages-1027
|
[
"1021"
] |
dcbb28199ea881659c6d110a7d23f0e8cc49d542
|
diff --git a/settings/base.py b/settings/base.py
--- a/settings/base.py
+++ b/settings/base.py
@@ -154,6 +154,7 @@
# external
"maintenance_mode",
"crispy_forms",
+ "crispy_bootstrap3",
"django_better_admin_arrayfield",
"django_extensions",
"django_htmx",
@@ -334,6 +335,7 @@ def SOCIAL_AUTH_DEFAULT_USERNAME(u):
########### end redis setup
########### crispy_forms setup
+CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap3"
CRISPY_TEMPLATE_PACK = "bootstrap3"
########### end crispy_forms setup
|
all /grids/ views are broken
**Describe the bug**
Clicking on any of the grid listings at the top, for example [deployment](https://djangopackages.org/grids/g/deployment/) generates an error
**To Reproduce**
Steps to reproduce the behavior:
1. Go to Home Page
2. Click on `Deployment` (or any package listed at the top)
3. See error
**Expected behavior**
Clicking on deployment will list out the packages that are deployment related
**Screenshots**
<img width="1366" alt="image" src="https://github.com/djangopackages/djangopackages/assets/9857779/b70399ca-4d19-4f61-b645-afaa70fc840e">
|
also getting this issue
me too 😢😭 but thx for your job
+1
also getting this issue
| 2023-09-27T13:23:05 |
|
djangopackages/djangopackages
| 1,080 |
djangopackages__djangopackages-1080
|
[
"1077"
] |
5ea23fbf795c51c471ec08c8e3bc822fb22affef
|
diff --git a/fabfile.py b/fabfile.py
--- a/fabfile.py
+++ b/fabfile.py
@@ -100,14 +100,6 @@ def rollback(commit="HEAD~1"):
deploy()
-def backup():
- with env.cd(env.project_dir):
- # Manage Backups
- docker_compose("run django-a python manage.py clearsessions")
- docker_compose("run postgres backup")
- env.run("gzip /data/djangopackages/backups/*.sql")
-
-
def build_and_restart(service):
docker_compose(f"build {service} --parallel --progress plain")
docker_compose(f"create {service}")
@@ -143,9 +135,6 @@ def deploy(clearsessions: bool = False, stash: bool = False):
if clearsessions:
docker_compose("run django-a python manage.py clearsessions")
- # docker_compose("run postgres backup")
- # env.run("gzip /data/djangopackages/backups/*.sql")
-
# stash existing changes
if stash:
env.run("git stash")
|
Use Official PostgreSQL Docker Image for "postgres" Docker Service
I'd like to remove the `postgres/Dockerfile` at some too since that feels like part of a different era and I don't see where that's useful anymore.
_Originally posted by @jefftriplett in https://github.com/djangopackages/djangopackages/pull/1076#discussion_r1421759886_
|
I'm 3 to 6 months of using the `pgautoupgrade/pgautoupgrade:latest` for a dozen+ projects and I'm a fan. The project/image is designed to make postgres upgrades painless and so far so good for me.
I'm also happy to adopt one of the official images if that's preferred.
It's more of a convenience / safe choice than preferred. We can try out the one you suggested. :)
I like the official version most of the times because it's more likely to be stable/reliable, contain less bloat and have regular updates.
But pgautoupgrade sounds like an interesting project to try out.
I'm happy with either and I can also override it in compose override either way :) So whichever you prefer works for me.
| 2023-12-15T18:08:49 |
|
djangopackages/djangopackages
| 1,196 |
djangopackages__djangopackages-1196
|
[
"1195"
] |
3939fefca5bc0018949f63b1295666f34077a475
|
diff --git a/grid/forms.py b/grid/forms.py
--- a/grid/forms.py
+++ b/grid/forms.py
@@ -1,5 +1,6 @@
"""Forms for the :mod:`grid` app"""
+from crispy_forms.helper import FormHelper
from django.forms import BooleanField, ChoiceField, Form, ModelForm
from django.utils.translation import gettext_lazy as _
@@ -77,3 +78,9 @@ class GridPackageFilterForm(Form):
sort = ChoiceField(
choices=SORT_CHOICES, initial=SCORE, required=False, label=_("Sort by")
)
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.helper = FormHelper()
+ self.helper.field_template = "bootstrap3/layout/inline_field.html"
+ self.helper.form_class = "form-inline"
|
:tractor: Follow-up issue: Split out "Filter results" on grid view form template into individual fields
See previous issue:
@lipemorais please push this last template change, and we can complete it. I can update the form to use the form fields individually and skip having crispy forms process it all. (goal is a quick win for you not to create a never-ending issue)
_Originally posted by @jefftriplett in https://github.com/djangopackages/djangopackages/issues/1187#issuecomment-2222979425_
| 2024-07-12T20:12:57 |
||
wger-project/wger
| 170 |
wger-project__wger-170
|
[
"146"
] |
0d63f1105c10f9ba5fce1e0d45a8dfa79bb7d109
|
diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py
--- a/wger/nutrition/forms.py
+++ b/wger/nutrition/forms.py
@@ -55,6 +55,9 @@ def __init__(self, *args, **kwargs):
class BmiForm(forms.ModelForm):
+ height = forms.DecimalField(widget=Html5NumberInput(),
+ max_value=999,
+ label=_('Height (cm)'))
weight = forms.DecimalField(widget=Html5NumberInput(),
max_value=999)
|
BMI And Calorie Calculator Not Working
Using this software in Linux Mint 13.
When I enter my data into either the BMI calculator or the calorie estimator nothing happens.
I have entered my height in cm and my weight in kgs.
The BMI calculator says my BMI = 0.
I'd be happy with 10.
|
I'll take a look
Hi!
I checked out the BMI calculator and I think that the problem could be with the BmiForm validation. It only accepts decimal for the weight field and and gets the Integer field from the Model.
@dfteam1 did you use a non integer value in the height field?
@rolandgeider Do you think that we can show the form validation errors? I can do it if that is okay.
Yes, the decimal input seems to trigger this. Showing errors is always a good idea, but this form is a bit AJAXy and I would prefer something more general for this. I would suggest that we just fix this specific bug. @rlaszlo do you want to give it a try? If not, I can do it
Yes, I'm working on it now. Should I migrate the UserProfile.height field to Decimal or just add a new Decimal field for the BmiForm?
No, don't migrate the field, better solve it in the form
oh, BTW, if you solve this and make a pull request, send it to my "release" branch
| 2015-07-13T21:25:17 |
|
wger-project/wger
| 231 |
wger-project__wger-231
|
[
"210",
"210"
] |
2f8d1342fd8a97f90a714562985b2651df0453ca
|
diff --git a/wger/nutrition/models.py b/wger/nutrition/models.py
--- a/wger/nutrition/models.py
+++ b/wger/nutrition/models.py
@@ -37,6 +37,7 @@
from wger.utils.fields import Html5TimeField
from wger.utils.models import AbstractLicenseModel
from wger.utils.units import AbstractWeight
+from wger.weight.models import WeightEntry
MEALITEM_WEIGHT_GRAM = '1'
MEALITEM_WEIGHT_UNIT = '2'
@@ -121,7 +122,7 @@ def get_nutritional_values(self):
'fat': 0},
'per_kg': {'protein': 0,
'carbohydrates': 0,
- 'fat': 0}
+ 'fat': 0},
}
# Energy
@@ -139,10 +140,10 @@ def get_nutritional_values(self):
result['total'][key] * ENERGY_FACTOR[key][unit] / energy * 100
# Per body weight
- if self.user.userprofile.weight:
- weight = Decimal(self.user.userprofile.weight)
+ weight_entry = self.get_closest_weight_entry()
+ if weight_entry:
for key in result['per_kg'].keys():
- result['per_kg'][key] = result['total'][key] / weight
+ result['per_kg'][key] = result['total'][key] / weight_entry.weight
# Only 2 decimal places, anything else doesn't make sense
for key in result.keys():
@@ -151,6 +152,23 @@ def get_nutritional_values(self):
return result
+ def get_closest_weight_entry(self):
+ '''
+ Returns the closest weight entry for the nutrition plan.
+ Returns None if there are no entries.
+ '''
+ target = self.creation_date
+ closest_entry_gte = WeightEntry.objects.filter(user=self.user) \
+ .filter(date__gte=target).order_by('date').first()
+ closest_entry_lte = WeightEntry.objects.filter(user=self.user) \
+ .filter(date__lte=target).order_by('-date').first()
+ if closest_entry_gte is None or closest_entry_lte is None:
+ return closest_entry_gte or closest_entry_lte
+ if abs(closest_entry_gte.date - target) < abs(closest_entry_lte.date - target):
+ return closest_entry_gte
+ else:
+ return closest_entry_lte
+
def get_owner_object(self):
'''
Returns the object that has owner information
diff --git a/wger/nutrition/views/plan.py b/wger/nutrition/views/plan.py
--- a/wger/nutrition/views/plan.py
+++ b/wger/nutrition/views/plan.py
@@ -144,7 +144,11 @@ def view(request, id):
# Get the nutritional info
template_data['plan'] = plan
- template_data['nutritional_data'] = plan.get_nutritional_values()
+ template_data['nutritional_data'] = \
+ plan.get_nutritional_values()
+
+ # Get the weight entry used
+ template_data['weight_entry'] = plan.get_closest_weight_entry()
# Tokens for the links
template_data['uid'] = uid
|
Nutrition plan: pick correct body weight entry
In the `get_nutritional_values` method of `NutritionPlan` the per-kg-calculation uses the user's last weight entry. While this is correct when adding meals to the plan, it can be wrong when opening the plan later (the user's weight changed). This should be changed so that the weight entry _closest to the plan's date_ should be used.
Nutrition plan: pick correct body weight entry
In the `get_nutritional_values` method of `NutritionPlan` the per-kg-calculation uses the user's last weight entry. While this is correct when adding meals to the plan, it can be wrong when opening the plan later (the user's weight changed). This should be changed so that the weight entry _closest to the plan's date_ should be used.
|
I can take a look at this
~Avinash
:+1:
Hey @avinsrid, how is it going?
I can take a look at this
~Avinash
:+1:
Hey @avinsrid, how is it going?
| 2015-11-06T22:06:38 |
|
wger-project/wger
| 233 |
wger-project__wger-233
|
[
"208",
"236"
] |
795f7a74ff32ccaa4f54625e1b9382f2320c3dc2
|
diff --git a/wger/gym/views/gym.py b/wger/gym/views/gym.py
--- a/wger/gym/views/gym.py
+++ b/wger/gym/views/gym.py
@@ -138,7 +138,6 @@ class GymAddView(WgerFormMixin, CreateView):
model = Gym
fields = '__all__'
- success_url = reverse_lazy('gym:gym:list')
title = ugettext_lazy('Add new gym')
form_action = reverse_lazy('gym:gym:add')
permission_required = 'gym.add_gym'
diff --git a/wger/weight/helpers.py b/wger/weight/helpers.py
--- a/wger/weight/helpers.py
+++ b/wger/weight/helpers.py
@@ -47,6 +47,8 @@ def parse_weight_csv(request, cleaned_data):
entry_dates = set()
weight_list = []
error_list = []
+ MAX_ROW_COUNT = 1000
+ row_count = 0
# Process the CSV items first
for row in parsed_csv:
@@ -69,6 +71,9 @@ def parse_weight_csv(request, cleaned_data):
except (ValueError, IndexError, decimal.InvalidOperation):
error_list.append(row)
+ row_count += 1
+ if row_count > MAX_ROW_COUNT:
+ break
# Create the valid weight entries
for date, weight in distinct_weight_entries:
|
Warning when opening gym with no members
When opening a gym with no members (e.g. the "default gym" generated by bootstrap_wger), there is an ugly warning by the datatables library because there is no data in the members table

Deleting all gyms and then creating a new gym throws error.
To reproduce:
1) Delete all the gyms
2) Create a new gym
3) Try to view the gym members
---
NoReverseMatch at /en/gym/6/members
Reverse for 'edit' with arguments '('',)' and keyword arguments '{}' not found. 1 pattern(s) tried: ['en/gym/admin-config/(?P<pk>\d+)/edit$']
Request Method: GET
Request URL: http://127.0.0.1:8000/en/gym/6/members
Django Version: 1.8.6
Exception Type: NoReverseMatch
Exception Value:
Reverse for 'edit' with arguments '('',)' and keyword arguments '{}' not found. 1 pattern(s) tried: ['en/gym/admin-config/(?P<pk>\d+)/edit$']
| 2015-11-07T21:58:22 |
||
wger-project/wger
| 235 |
wger-project__wger-235
|
[
"204"
] |
c019c337c8642006a7a851c40bbedbb2c32fc5b5
|
diff --git a/wger/weight/helpers.py b/wger/weight/helpers.py
--- a/wger/weight/helpers.py
+++ b/wger/weight/helpers.py
@@ -44,6 +44,7 @@ def parse_weight_csv(request, cleaned_data):
parsed_csv = csv.reader(six.StringIO(cleaned_data['csv_input']),
dialect)
distinct_weight_entries = []
+ entry_dates = set()
weight_list = []
error_list = []
@@ -54,13 +55,15 @@ def parse_weight_csv(request, cleaned_data):
parsed_weight = decimal.Decimal(row[1].replace(',', '.'))
duplicate_date_in_db = WeightEntry.objects.filter(date=parsed_date,
user=request.user).exists()
- # within the list there are no duplicates
- unique_among_csv = (parsed_date, parsed_weight) not in distinct_weight_entries
+ # within the list there are no duplicate dates
+ unique_among_csv = parsed_date not in entry_dates
+
# there is no existing weight entry in the database for that date
unique_in_db = not duplicate_date_in_db
if unique_among_csv and unique_in_db:
distinct_weight_entries.append((parsed_date, parsed_weight))
+ entry_dates.add(parsed_date)
else:
error_list.append(row)
|
Duplicate weight entries in CSV import
It seems it's possible to trigger a uniqueness constraint error using the import CSV function for the weight entries. I could have sworn this was already fixed, but it looks it isn't.
During import the view should make sure that duplicate entries are not saved.
| 2015-11-07T23:12:41 |
||
wger-project/wger
| 240 |
wger-project__wger-240
|
[
"237"
] |
2f8d1342fd8a97f90a714562985b2651df0453ca
|
diff --git a/wger/core/views/user.py b/wger/core/views/user.py
--- a/wger/core/views/user.py
+++ b/wger/core/views/user.py
@@ -73,8 +73,8 @@ def login(request):
context = {'hide_persona': check_request_amazon(request) or check_request_android(request),
'active_tab': USER_TAB}
- if request.REQUEST.get('next'):
- context['next'] = request.REQUEST.get('next')
+ if request.GET.get('next'):
+ context['next'] = request.GET.get('next')
return django_loginview(request,
template_name='user/login.html',
|
Deprecation warning in login function
Django currently throws a deprecation warning in the login function (`login` in `core/views/user.py`):
``` python
wger/core/views/user.py:77: RemovedInDjango19Warning: `request.REQUEST` is deprecated, use `request.GET` or `request.POST` instead.
context['next'] = request.REQUEST.get('next')
```
This should be fixed, but I'm not sure if only GET or also POST is used in the code.
| 2015-11-09T19:57:04 |
||
wger-project/wger
| 249 |
wger-project__wger-249
|
[
"186"
] |
901f5ef586d545d1c6f82d7d6d5cc586febac98a
|
diff --git a/wger/gym/urls.py b/wger/gym/urls.py
--- a/wger/gym/urls.py
+++ b/wger/gym/urls.py
@@ -63,6 +63,9 @@
url(r'^user/(?P<user_pk>\d+)/permission-edit$',
gym.gym_permissions_user_edit,
name='edit-user-permission'),
+ url(r'^user/(?P<user_pk>\d+)/reset-user-password$',
+ gym.reset_user_password,
+ name='reset-user-password'),
]
# 'sub patterns' for gym config
diff --git a/wger/gym/views/gym.py b/wger/gym/views/gym.py
--- a/wger/gym/views/gym.py
+++ b/wger/gym/views/gym.py
@@ -201,6 +201,33 @@ def gym_new_user_info_export(request):
return response
+def reset_user_password(request, user_pk):
+ '''
+ Resets the password of the selected user to random password
+ '''
+
+ user = get_object_or_404(User, pk=user_pk)
+
+ if not request.user.is_authenticated():
+ return HttpResponseForbidden()
+
+ if not request.user.has_perm('gym.manage_gyms') \
+ and not request.user.has_perm('gym.manage_gym'):
+ return HttpResponseForbidden()
+
+ if request.user.has_perm('gym.manage_gym') \
+ and request.user.userprofile.gym != user.userprofile.gym:
+ return HttpResponseForbidden()
+
+ password = password_generator()
+ user.set_password(password)
+ user.save()
+
+ context = {'mod_user': user,
+ 'password': password}
+ return render(request, 'gym/reset_user_password.html', context)
+
+
def gym_permissions_user_edit(request, user_pk):
'''
Edits the permissions of a gym member
|
Feature request: Password reset
After adding a new user to a Gym, I get a random password. I forgot to save/change the password...
I'm not able to reset it or delete the user again.
|
This makes sense. The gym manager (or whatever the role is called that can add users) should be able to change user's passwords to a random string again.
Looking into this.
I've got the reset password functionality coded, but I am still working on it. It has two problems at the present:
1. There is no confirmation dialogue for reseting the user's password, so if an admin clicks the link by accident, it will reset the password. I am working on adding a dialogue to confirm their intention to reset the password.
2. It's not translatable. This is the first time I've worked with Django translations. Are there any tips/tricks to offer aside from reading their documents, @rolandgeider ? I took a brief look at the .po files in the project and it appears they may be generated by some sort of script. Is that true?
| 2016-01-07T01:58:14 |
|
wger-project/wger
| 543 |
wger-project__wger-543
|
[
"478"
] |
1c59eb02367b75d27e033d02dc25d5e443f48195
|
diff --git a/wger/weight/urls.py b/wger/weight/urls.py
--- a/wger/weight/urls.py
+++ b/wger/weight/urls.py
@@ -33,6 +33,10 @@
login_required(views.WeightUpdateView.as_view()),
name='edit'),
+ url(r'^(?P<pk>\d+)/delete/$',
+ views.WeightDeleteView.as_view(),
+ name='delete'),
+
url(r'^export-csv/$',
views.export_csv,
name='export-csv'),
diff --git a/wger/weight/views.py b/wger/weight/views.py
--- a/wger/weight/views.py
+++ b/wger/weight/views.py
@@ -21,6 +21,7 @@
# Django
from django.contrib.auth.decorators import login_required
+from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import (
Max,
Min
@@ -37,7 +38,8 @@
)
from django.views.generic import (
CreateView,
- UpdateView
+ UpdateView,
+ DeleteView
)
# Third Party
@@ -46,7 +48,10 @@
from rest_framework.response import Response
# wger
-from wger.utils.generic_views import WgerFormMixin
+from wger.utils.generic_views import (
+ WgerFormMixin,
+ WgerDeleteMixin
+)
from wger.utils.helpers import check_access
from wger.weight import helpers
from wger.weight.forms import WeightForm
@@ -88,7 +93,7 @@ def get_success_url(self):
return reverse('weight:overview', kwargs={'username': self.object.user.username})
-class WeightUpdateView(WgerFormMixin, UpdateView):
+class WeightUpdateView(WgerFormMixin, LoginRequiredMixin, UpdateView):
"""
Generic view to edit an existing weight entry
"""
@@ -108,6 +113,28 @@ def get_success_url(self):
return reverse('weight:overview', kwargs={'username': self.object.user.username})
+class WeightDeleteView(WgerDeleteMixin, LoginRequiredMixin, DeleteView):
+ """
+ Generic view to delete a weight entry
+ """
+
+ model = WeightEntry
+ fields = ('weight',)
+
+ messages = ugettext_lazy('Successfully deleted.')
+
+ def get_context_data(self, **kwargs):
+ context = super(WeightDeleteView, self).get_context_data(**kwargs)
+ context['title'] = _('Delete weight entry for the %s') % self.object.date
+ return context
+
+ def get_success_url(self):
+ """
+ Return to overview with username
+ """
+ return reverse('weight:overview', kwargs={'username': self.object.user.username})
+
+
@login_required
def export_csv(request):
"""
|
diff --git a/wger/weight/tests/test_entry.py b/wger/weight/tests/test_entry.py
--- a/wger/weight/tests/test_entry.py
+++ b/wger/weight/tests/test_entry.py
@@ -24,7 +24,8 @@
from wger.core.tests.base_testcase import (
WgerAddTestCase,
WgerEditTestCase,
- WgerTestCase
+ WgerTestCase,
+ WgerDeleteTestCase
)
from wger.utils.constants import TWOPLACES
from wger.weight.models import WeightEntry
@@ -112,6 +113,18 @@ class EditWeightEntryTestCase(WgerEditTestCase):
user_fail = 'admin'
+class DeleteWeightEntryTestCase(WgerDeleteTestCase):
+ """
+ Tests deleting a weight entry
+ """
+
+ object_class = WeightEntry
+ url = 'weight:delete'
+ pk = 1
+ user_success = 'test'
+ user_fail = 'admin'
+
+
class WeightEntryTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the weight entry overview resource
|
Feature Request: remove or edit weight entry
- [ ] Remove or edit weight entry
|
Yes! I'm not even sure if this is already possible via the API, but the entries need to be deletable via the website as well
- [ ] Add appropriate delete/edit views (and tests, but this is mostly copy&paste)
- [ ] Add dropdowns to the weight list in the overview page with the two options
| 2020-10-16T09:09:48 |
wger-project/wger
| 579 |
wger-project__wger-579
|
[
"547"
] |
86361277b7b50ac7a0a4c7eb44db5a26edba78bf
|
diff --git a/wger/core/templatetags/wger_extras.py b/wger/core/templatetags/wger_extras.py
--- a/wger/core/templatetags/wger_extras.py
+++ b/wger/core/templatetags/wger_extras.py
@@ -18,6 +18,7 @@
from django import template
from django.conf import settings
from django.db.models import QuerySet
+from django.templatetags.static import static
from django.utils.html import strip_spaces_between_tags
from django.utils.safestring import mark_safe
from django.utils.translation import (
@@ -131,9 +132,9 @@ def render_muscles(muscles=None, muscles_sec=None):
except IndexError:
front_back = "front" if out_sec[0].is_front else "back"
- backgrounds = [f"images/muscles/main/muscle-{i.id}.svg" for i in out_main] \
- + [f"images/muscles/secondary/muscle-{i.id}.svg" for i in out_sec] \
- + [f"images/muscles/muscular_system_{front_back}.svg"]
+ backgrounds = [i.image_url_main for i in out_main] \
+ + [i.image_url_secondary for i in out_sec] \
+ + [static(f"images/muscles/muscular_system_{front_back}.svg")]
return {"backgrounds": backgrounds,
"empty": False}
diff --git a/wger/exercises/api/serializers.py b/wger/exercises/api/serializers.py
--- a/wger/exercises/api/serializers.py
+++ b/wger/exercises/api/serializers.py
@@ -107,4 +107,4 @@ class MuscleSerializer(serializers.ModelSerializer):
"""
class Meta:
model = Muscle
- fields = '__all__'
+ fields = ['name', 'is_front', 'image_url_main', 'image_url_secondary']
diff --git a/wger/exercises/models.py b/wger/exercises/models.py
--- a/wger/exercises/models.py
+++ b/wger/exercises/models.py
@@ -27,6 +27,7 @@
from django.core.validators import MinLengthValidator
from django.db import models
from django.template.loader import render_to_string
+from django.templatetags.static import static
from django.urls import reverse
from django.utils import translation
from django.utils.text import slugify
@@ -68,6 +69,16 @@ class Muscle(models.Model):
class Meta:
ordering = ["name", ]
+ # Image to use when displaying this as a main muscle in an exercise
+ @property
+ def image_url_main(self):
+ return static(f"images/muscles/main/muscle-{self.id}.svg")
+
+ # Image to use when displaying this as a secondary muscle in an exercise
+ @property
+ def image_url_secondary(self):
+ return static(f"images/muscles/secondary/muscle-{self.id}.svg")
+
def __str__(self):
"""
Return a more human-readable representation
|
diff --git a/wger/exercises/tests/test_muscles.py b/wger/exercises/tests/test_muscles.py
--- a/wger/exercises/tests/test_muscles.py
+++ b/wger/exercises/tests/test_muscles.py
@@ -41,6 +41,12 @@ def test_representation(self):
"""
self.assertEqual("{0}".format(Muscle.objects.get(pk=1)), 'Anterior testoid')
+ # Check image URL properties
+ self.assertIn("images/muscles/main/muscle-2.svg",
+ Muscle.objects.get(pk=2).image_url_main)
+ self.assertIn("images/muscles/secondary/muscle-1.svg",
+ Muscle.objects.get(pk=1).image_url_secondary)
+
class MuscleAdminOverviewTest(WgerAccessTestCase):
"""
@@ -148,3 +154,14 @@ class MuscleApiTestCase(api_base_test.ApiBaseResourceTestCase):
private_resource = False
data = {'name': 'The name',
'is_front': True}
+
+ def test_get_detail(self):
+ super().test_get_detail()
+
+ # Check that image URLs are present in response
+ response = self.client.get(self.url_detail)
+ response_object = response.json()
+ self.assertIn("images/muscles/main/muscle-1.svg",
+ response_object["image_url_main"])
+ self.assertIn("images/muscles/secondary/muscle-1.svg",
+ response_object["image_url_secondary"])
|
Return image URL on muscle endpoint
The muscle endpoint should also return the URLs for the muscle images and the backgrounds (probably the exercise endpoint as well). Needs to check whether we can access the URL when the static files are hosted on AWS or such
|
To get the URL:
```python
url = request.build_absolute_uri(static('images/path/foo/bar.svg')),
```
A better place would a property in the model itself that returns the URL, then it can be used in the endpoint and templates e.g.
Hi, I would be interested in looking at this issue!
Hi! I have heard that @lydiaxing wanted to do this
Hi! I'm already working on another issue so Kevin can take this one!
| 2020-12-13T03:11:17 |
wger-project/wger
| 631 |
wger-project__wger-631
|
[
"585"
] |
4b79269a90ad93907238fbc09ce71519fbe59128
|
diff --git a/wger/core/views/user.py b/wger/core/views/user.py
--- a/wger/core/views/user.py
+++ b/wger/core/views/user.py
@@ -520,6 +520,12 @@ def get_context_data(self, **kwargs):
context['session'] = WorkoutSession.objects.filter(user=self.object).order_by('-date')[:10]
context['admin_notes'] = AdminUserNote.objects.filter(member=self.object)[:5]
context['contracts'] = Contract.objects.filter(member=self.object)[:5]
+
+ page_user = self.object # type: User
+ request_user = self.request.user # type: User
+ same_gym_id = request_user.userprofile.gym_id == page_user.userprofile.gym_id
+ context['enable_login_button'] = request_user.has_perm('gym.gym_trainer') and same_gym_id
+ context['gym_name'] = request_user.userprofile.gym.name
return context
|
Log in as user functionality produces 403 error
Hello,
when trying to log in from administrator account as user **without** default gym set (that happens when you register through site yourself), I get an 403 error.
#### Steps to reproduce
Use any of the current deployment methods (docker, docker-compose, VM installation with Debian 10). And try as an admin to use **log in as this user** (e.g. http://url/cs/user/5/trainer-login ).
Test user is guaranteed to produce this error if no gym is set:

#### Log
```
[Wed Dec 16 11:09:28.687239 2020] [wsgi:error] [pid 11615:tid 140290494953216] [remote 192.168.4.14:60186] Forbidden: /cs/user/5/trainer-login
```
|
With further testing this happens in two cases:
1. when user is either **not** in default gym
2. or **does not have** any gym at all.
So creating another gym and adding user there results in the same error.
| 2021-03-13T05:12:43 |
|
wger-project/wger
| 637 |
wger-project__wger-637
|
[
"617"
] |
35f6aad3f6cd87894ad5ce6ad1052d2d0fab40f7
|
diff --git a/wger/exercises/models.py b/wger/exercises/models.py
--- a/wger/exercises/models.py
+++ b/wger/exercises/models.py
@@ -417,6 +417,7 @@ def set_author(self, request):
Set author and status
This is only used when creating exercises (via web or API)
"""
+
if request.user.has_perm('exercises.add_exercise'):
self.status = self.STATUS_ACCEPTED
if not self.license_author:
@@ -426,8 +427,9 @@ def set_author(self, request):
self.license_author = request.user.username
subject = _('New user submitted exercise')
+
message = _('The user {0} submitted a new exercise "{1}".').format(
- request.user.username, self.name)
+ request.user.username, self.name_original)
mail.mail_admins(str(subject),
str(message),
fail_silently=True)
|
Missing exercise name in notification emails
When a user submits a new exercise, the name is missing in the notification email, e.g.
``
The user <name> submitted a new exercise "".
``
It's probably an error in the template
|
Hi, I would like to try solve this issue. Is it ok?
Sure, go ahead!
| 2021-03-20T22:32:29 |
|
wger-project/wger
| 1,133 |
wger-project__wger-1133
|
[
"1126"
] |
469ef714b9ab6754edc698a63df74d0fed108dcf
|
diff --git a/wger/core/forms.py b/wger/core/forms.py
--- a/wger/core/forms.py
+++ b/wger/core/forms.py
@@ -14,6 +14,7 @@
#
# You should have received a copy of the GNU Affero General Public License
+from datetime import date
# Django
from django import forms
from django.contrib.auth.forms import (
@@ -77,6 +78,17 @@ class UserPreferencesForm(forms.ModelForm):
help_text=_("Used for password resets and, optionally, e-mail reminders."),
required=False
)
+ birthdate = forms.DateField(
+ label=_("Date of Birth"),
+ required=False,
+ widget=forms.DateInput(
+ attrs={
+ 'type': 'date',
+ "max": str(date.today().replace(year=date.today().year - 10)),
+ "min": str(date.today().replace(year=date.today().year - 100))
+ },
+ )
+ )
class Meta:
model = UserProfile
@@ -104,7 +116,9 @@ def __init__(self, *args, **kwargs):
Column('first_name', css_class='form-group col-6 mb-0'),
Column('last_name', css_class='form-group col-6 mb-0'),
css_class='form-row'
- ), HTML("<hr>")
+ ),
+ 'birthdate',
+ HTML("<hr>")
),
Fieldset(
_("Workout reminders"),
@@ -121,7 +135,6 @@ def __init__(self, *args, **kwargs):
"show_comments",
"show_english_ingredients",
"num_days_weight_reminder",
- "birthdate",
), ButtonHolder(Submit('submit', _("Save"), css_class='btn-success btn-block'))
)
diff --git a/wger/core/models/profile.py b/wger/core/models/profile.py
--- a/wger/core/models/profile.py
+++ b/wger/core/models/profile.py
@@ -191,7 +191,7 @@ class UserProfile(models.Model):
"""The user's age"""
birthdate = models.DateField(
- verbose_name=('Date of Birth'),
+ verbose_name=_('Date of Birth'),
blank=False,
null=True,
validators=[birthdate_validator],
|
Rework the user preferences
The current user preferences need to be cleaned up somewhat
- Remove obsolete options
- Birthdate shouldn't be a required field
- Better error messages for birthdate
- Possibly: reimplement the settings page in react
|
Hi @rolandgeider , I would like to work on this issue. Can you please share which options need to be removed ?
| 2022-09-30T20:33:53 |
|
wger-project/wger
| 1,134 |
wger-project__wger-1134
|
[
"1047"
] |
685fa200f55a4723f949695b4f745dc5ff0b091b
|
diff --git a/wger/core/api/views.py b/wger/core/api/views.py
--- a/wger/core/api/views.py
+++ b/wger/core/api/views.py
@@ -65,7 +65,6 @@
from wger.utils.api_token import create_token
from wger.utils.permissions import WgerPermission
-
logger = logging.getLogger(__name__)
@@ -155,7 +154,7 @@ class ApplicationVersionView(viewsets.ViewSet):
"""
Returns the application's version
"""
- permission_classes = (AllowAny, )
+ permission_classes = (AllowAny,)
@staticmethod
def get(request):
@@ -188,7 +187,7 @@ class RequiredApplicationVersionView(viewsets.ViewSet):
"""
Returns the minimum required version of flutter app to access this server
"""
- permission_classes = (AllowAny, )
+ permission_classes = (AllowAny,)
@staticmethod
def get(request):
@@ -198,6 +197,7 @@ def get(request):
class UserAPILoginView(viewsets.ViewSet):
"""
API endpoint for api user objects
+ .. warning:: This endpoint is deprecated
"""
permission_classes = (AllowAny, )
queryset = User.objects.all()
@@ -205,7 +205,15 @@ class UserAPILoginView(viewsets.ViewSet):
throttle_scope = 'login'
def get(self, request):
- return Response({'message': "You must send a 'username' and 'password' via POST"})
+ return Response(
+ data={
+ 'message': "You must send a 'username' and 'password' via POST",
+ 'warning': "This endpoint is deprecated."
+ },
+ headers={
+ "Deprecation": "Sat, 01 Oct 2022 23:59:59 GMT",
+ },
+ )
def post(self, request):
data = request.data
@@ -223,7 +231,13 @@ def post(self, request):
)
token = create_token(form.get_user())
- return Response({'token': token.key}, status=status.HTTP_200_OK)
+ return Response(
+ data={'token': token.key, 'message': "This endpoint is deprecated."},
+ status=status.HTTP_200_OK,
+ headers={
+ "Deprecation": "Sat, 01 Oct 2022 23:59:59 GMT",
+ }
+ )
class UserAPIRegistrationViewSet(viewsets.ViewSet):
@@ -281,7 +295,7 @@ class DaysOfWeekViewSet(viewsets.ReadOnlyModelViewSet):
queryset = DaysOfWeek.objects.all()
serializer_class = DaysOfWeekSerializer
ordering_fields = '__all__'
- filterset_fields = ('day_of_week', )
+ filterset_fields = ('day_of_week',)
class LicenseViewSet(viewsets.ReadOnlyModelViewSet):
diff --git a/wger/settings_global.py b/wger/settings_global.py
--- a/wger/settings_global.py
+++ b/wger/settings_global.py
@@ -18,7 +18,7 @@
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import re
-
+from datetime import timedelta
"""
This file contains the global settings that don't usually need to be changed.
@@ -82,6 +82,7 @@
'rest_framework',
'rest_framework.authtoken',
'django_filters',
+ 'rest_framework_simplejwt',
# Breadcrumbs
'django_bootstrap_breadcrumbs',
@@ -391,6 +392,7 @@
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
+ 'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
@@ -402,6 +404,17 @@
}
}
+#
+# Django Rest Framework SimpleJWT
+#
+SIMPLE_JWT = {
+ 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
+ 'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
+ 'ROTATE_REFRESH_TOKENS': False,
+ 'BLACKLIST_AFTER_ROTATION': False,
+ 'UPDATE_LAST_LOGIN': False,
+}
+
#
# CORS headers: allow all hosts to access the API
#
diff --git a/wger/urls.py b/wger/urls.py
--- a/wger/urls.py
+++ b/wger/urls.py
@@ -30,6 +30,7 @@
# Third Party
from django_email_verification import urls as email_urls
from rest_framework import routers
+from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView
# wger
from wger.core.api import views as core_api_views
@@ -43,8 +44,7 @@
from wger.utils.generic_views import TextTemplateView
from wger.weight.api import views as weight_api_views
-
-#admin.autodiscover()
+# admin.autodiscover()
#
# REST API
@@ -204,7 +204,7 @@
# The actual URLs
#
urlpatterns = i18n_patterns(
- #url(r'^admin/', admin.site.urls),
+ # url(r'^admin/', admin.site.urls),
path('', include(('wger.core.urls', 'core'), namespace='core')),
path('workout/', include(('wger.manager.urls', 'manager'), namespace='manager')),
path('exercise/', include(('wger.exercises.urls', 'exercise'), namespace='exercise')),
@@ -244,6 +244,20 @@
core_api_views.UserAPIRegistrationViewSet.as_view({'post': 'post'}),
name='api_register'
),
+ path(
+ 'api/v2/token/',
+ TokenObtainPairView.as_view(),
+ name='token_obtain_pair'
+ ),
+ path(
+ 'api/v2/token/refresh/',
+ TokenRefreshView.as_view(),
+ name='token_refresh'
+ ),
+ path(
+ 'api/v2/token/verify/',
+ TokenVerifyView.as_view(),
+ name='token_verify'),
# Others
path(
|
Add support for JWT
We should add support for Json Web Tokens to access the API
https://github.com/jazzband/djangorestframework-simplejwt
|
@rolandgeider I would like to implement this
Hi @RohanKaran ! Sure go ahead
| 2022-09-30T21:57:55 |
Subsets and Splits
Distinct GitHub Repositories
The query lists all unique GitHub repository names, which provides basic filtering and an overview of the dataset but lacks deeper analytical value.
Unique Repositories in Train
Lists unique repository names from the dataset, providing a basic overview of the repositories present.